code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self._logger.debug("incoming iq: %r", stanza_obj) if stanza_obj.type_.is_response: # iq response self._logger.debug("iq is response") keys = [(stanza_obj.from_, stanza_obj.id_)] if self._local_jid is not None: # needed for some servers if keys[0][0] == self._local_jid: keys.append((None, keys[0][1])) elif keys[0][0] is None: keys.append((self._local_jid, keys[0][1])) for key in keys: try: self._iq_response_map.unicast(key, stanza_obj) self._logger.debug("iq response delivered to key %r", key) break except KeyError: pass else: self._logger.warning( "unexpected IQ response: from=%r, id=%r", *key) else: # iq request self._logger.debug("iq is request") key = (stanza_obj.type_, type(stanza_obj.payload)) try: coro, with_send_reply = self._iq_request_map[key] except KeyError: self._logger.warning( "unhandleable IQ request: from=%r, type_=%r, payload=%r", stanza_obj.from_, stanza_obj.type_, stanza_obj.payload ) response = stanza_obj.make_reply(type_=structs.IQType.ERROR) response.error = stanza.Error( condition=errors.ErrorCondition.SERVICE_UNAVAILABLE, ) self._enqueue(response) return args = [stanza_obj] if with_send_reply: def send_reply(result=None): nonlocal task, stanza_obj, send_reply_callback if task.done(): raise RuntimeError( "send_reply called after the handler is done") if task.remove_done_callback(send_reply_callback) == 0: raise RuntimeError( "send_reply called more than once") task.add_done_callback(self._iq_request_coro_done_check) self._send_iq_reply(stanza_obj, result) args.append(send_reply) try: awaitable = coro(*args) except Exception as exc: awaitable = asyncio.Future() awaitable.set_exception(exc) task = asyncio.ensure_future(awaitable) send_reply_callback = functools.partial( self._iq_request_coro_done_send_reply, stanza_obj) task.add_done_callback(self._iq_request_coro_done_remove_task) task.add_done_callback(send_reply_callback) self._iq_request_tasks.append(task) self._logger.debug("started task to handle request: %r", task)
def _process_incoming_iq(self, stanza_obj)
Process an incoming IQ stanza `stanza_obj`. Calls the response handler, spawns a request handler coroutine or drops the stanza while logging a warning if no handler can be found.
2.953916
2.887592
1.022969
self._logger.debug("incoming message: %r", stanza_obj) stanza_obj = self.service_inbound_message_filter.filter(stanza_obj) if stanza_obj is None: self._logger.debug("incoming message dropped by service " "filter chain") return stanza_obj = self.app_inbound_message_filter.filter(stanza_obj) if stanza_obj is None: self._logger.debug("incoming message dropped by application " "filter chain") return self.on_message_received(stanza_obj)
def _process_incoming_message(self, stanza_obj)
Process an incoming message stanza `stanza_obj`.
2.670051
2.619293
1.019379
self._logger.debug("incoming presence: %r", stanza_obj) stanza_obj = self.service_inbound_presence_filter.filter(stanza_obj) if stanza_obj is None: self._logger.debug("incoming presence dropped by service filter" " chain") return stanza_obj = self.app_inbound_presence_filter.filter(stanza_obj) if stanza_obj is None: self._logger.debug("incoming presence dropped by application " "filter chain") return self.on_presence_received(stanza_obj)
def _process_incoming_presence(self, stanza_obj)
Process an incoming presence stanza `stanza_obj`.
2.88775
2.825488
1.022036
stanza_obj, exc = queue_entry # first, handle SM stream objects if isinstance(stanza_obj, nonza.SMAcknowledgement): self._logger.debug("received SM ack: %r", stanza_obj) if not self._sm_enabled: self._logger.warning("received SM ack, but SM not enabled") return self.sm_ack(stanza_obj.counter) return elif isinstance(stanza_obj, nonza.SMRequest): self._logger.debug("received SM request: %r", stanza_obj) if not self._sm_enabled: self._logger.warning("received SM request, but SM not enabled") return response = nonza.SMAcknowledgement() response.counter = self._sm_inbound_ctr self._logger.debug("sending SM ack: %r", response) xmlstream.send_xso(response) return # raise if it is not a stanza if not isinstance(stanza_obj, stanza.StanzaBase): raise RuntimeError( "unexpected stanza class: {}".format(stanza_obj)) # now handle stanzas, these always increment the SM counter if self._sm_enabled: self._sm_inbound_ctr += 1 self._sm_inbound_ctr &= 0xffffffff # check if the stanza has errors if exc is not None: self._process_incoming_erroneous_stanza(stanza_obj, exc) return if isinstance(stanza_obj, stanza.IQ): self._process_incoming_iq(stanza_obj) elif isinstance(stanza_obj, stanza.Message): self._process_incoming_message(stanza_obj) elif isinstance(stanza_obj, stanza.Presence): self._process_incoming_presence(stanza_obj)
def _process_incoming(self, xmlstream, queue_entry)
Dispatch to the different methods responsible for the different stanza types or handle a non-stanza stream-level element from `stanza_obj`, which has arrived over the given `xmlstream`.
2.823504
2.724807
1.036222
while True: try: stanza_obj = self._incoming_queue.get_nowait() except asyncio.QueueEmpty: break self._process_incoming(None, stanza_obj)
def flush_incoming(self)
Flush all incoming queues to the respective processing methods. The handlers are called as usual, thus it may require at least one iteration through the asyncio event loop before effects can be seen. The incoming queues are empty after a call to this method. It is legal (but pretty useless) to call this method while the stream is :attr:`running`.
4.30564
4.647993
0.926344
if token.state == StanzaState.ABORTED: return stanza_obj = token.stanza if isinstance(stanza_obj, stanza.Presence): stanza_obj = self.app_outbound_presence_filter.filter( stanza_obj ) if stanza_obj is not None: stanza_obj = self.service_outbound_presence_filter.filter( stanza_obj ) elif isinstance(stanza_obj, stanza.Message): stanza_obj = self.app_outbound_message_filter.filter( stanza_obj ) if stanza_obj is not None: stanza_obj = self.service_outbound_message_filter.filter( stanza_obj ) if stanza_obj is None: token._set_state(StanzaState.DROPPED) self._logger.debug("outgoing stanza %r dropped by filter chain", token.stanza) return self._logger.debug("forwarding stanza to xmlstream: %r", stanza_obj) try: xmlstream.send_xso(stanza_obj) except Exception as exc: self._logger.warning("failed to send stanza", exc_info=True) token._set_state(StanzaState.FAILED, exc) return if self._sm_enabled: token._set_state(StanzaState.SENT) self._sm_unacked_list.append(token) else: token._set_state(StanzaState.SENT_WITHOUT_SM)
def _send_stanza(self, xmlstream, token)
Send a stanza token `token` over the given `xmlstream`. Only sends if the `token` has not been aborted (see :meth:`StanzaToken.abort`). Sends the state of the token acoording to :attr:`sm_enabled`.
2.629092
2.504331
1.049818
self._send_stanza(xmlstream, token) # try to send a bulk while True: try: token = self._active_queue.get_nowait() except asyncio.QueueEmpty: break self._send_stanza(xmlstream, token) if self._sm_enabled: self._logger.debug("sending SM req") xmlstream.send_xso(nonza.SMRequest())
def _process_outgoing(self, xmlstream, token)
Process the current outgoing stanza `token` and also any other outgoing stanza which is currently in the active queue. After all stanzas have been processed, use :meth:`_send_ping` to allow an opportunistic ping to be sent.
7.081892
6.154799
1.150629
self._iq_response_map.add_listener( (from_, id_), callbacks.OneshotAsyncTagListener(cb, loop=self._loop) ) self._logger.debug("iq response callback registered: from=%r, id=%r", from_, id_)
def register_iq_response_callback(self, from_, id_, cb)
Register a callback function `cb` to be called when a IQ stanza with type ``result`` or ``error`` is recieved from the :class:`~aioxmpp.JID` `from_` with the id `id_`. The callback is called at most once. .. note:: In contrast to :meth:`register_iq_response_future`, errors which occur on a level below XMPP stanzas cannot be caught using a callback. If you need notification about other errors and still want to use callbacks, use of a future with :meth:`asyncio.Future.add_done_callback` is recommended.
6.818439
7.459357
0.914079
self._iq_response_map.add_listener( (from_, id_), StanzaErrorAwareListener( callbacks.FutureListener(fut) ) ) self._logger.debug("iq response future registered: from=%r, id=%r", from_, id_)
def register_iq_response_future(self, from_, id_, fut)
Register a future `fut` for an IQ stanza with type ``result`` or ``error`` from the :class:`~aioxmpp.JID` `from_` with the id `id_`. If the type of the IQ stanza is ``result``, the stanza is set as result to the future. If the type of the IQ stanza is ``error``, the stanzas error field is converted to an exception and set as the exception of the future. The future might also receive different exceptions: * :class:`.errors.ErroneousStanza`, if the response stanza received could not be parsed. Note that this exception is not emitted if the ``from`` address of the stanza is unset, because the code cannot determine whether a sender deliberately used an erroneous address to make parsing fail or no sender address was used. In the former case, an attacker could use that to inject a stanza which would be taken as a stanza from the peer server. Thus, the future will never be fulfilled in these cases. Also note that this exception does not derive from :class:`.errors.XMPPError`, as it cannot provide the same attributes. Instead, it dervies from :class:`.errors.StanzaError`, from which :class:`.errors.XMPPError` also derives; to catch all possible stanza errors, catching :class:`.errors.StanzaError` is sufficient and future-proof. * :class:`ConnectionError` if the stream is :meth:`stop`\\ -ped (only if SM is not enabled) or :meth:`close`\\ -ed. * Any :class:`Exception` which may be raised from :meth:`~.protocol.XMLStream.send_xso`, which are generally also :class:`ConnectionError` or at least :class:`OSError` subclasses.
7.854794
7.569354
1.03771
self._iq_response_map.remove_listener((from_, id_)) self._logger.debug("iq response unregistered: from=%r, id=%r", from_, id_)
def unregister_iq_response(self, from_, id_)
Unregister a registered callback or future for the IQ response identified by `from_` and `id_`. See :meth:`register_iq_response_future` or :meth:`register_iq_response_callback` for details on the arguments meanings and how to register futures and callbacks respectively. .. note:: Futures will automatically be unregistered when they are cancelled.
4.403216
5.609985
0.784889
warnings.warn( "register_iq_request_coro is a deprecated alias to " "register_iq_request_handler and will be removed in aioxmpp 1.0", DeprecationWarning, stacklevel=2) return self.register_iq_request_handler(type_, payload_cls, coro)
def register_iq_request_coro(self, type_, payload_cls, coro)
Alias of :meth:`register_iq_request_handler`. .. deprecated:: 0.10 This alias will be removed in version 1.0.
2.459217
2.409811
1.020502
type_ = self._coerce_enum(type_, structs.IQType) del self._iq_request_map[type_, payload_cls] self._logger.debug( "iq request coroutine unregistered: type=%r, payload=%r", type_, payload_cls)
def unregister_iq_request_handler(self, type_, payload_cls)
Unregister a coroutine previously registered with :meth:`register_iq_request_handler`. :param type_: IQ type to react to (must be a request type). :type type_: :class:`~structs.IQType` :param payload_cls: Payload class to react to (subclass of :class:`~xso.XSO`) :type payload_cls: :class:`~.XMLStreamClass` :raises KeyError: if no coroutine has been registered for the given ``(type_, payload_cls)`` pair :raises ValueError: if `type_` is not a valid :class:`~.IQType` (and cannot be cast to a :class:`~.IQType`) The match is solely made using the `type_` and `payload_cls` arguments, which have the same meaning as in :meth:`register_iq_request_coro`. .. versionchanged:: 0.10 Renamed from :meth:`unregister_iq_request_coro`. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.IQType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently.
5.337592
5.089345
1.048778
if type_ is not None: type_ = self._coerce_enum(type_, structs.MessageType) warnings.warn( "register_message_callback is deprecated; use " "aioxmpp.dispatcher.SimpleMessageDispatcher instead", DeprecationWarning, stacklevel=2 ) self._xxx_message_dispatcher.register_callback( type_, from_, cb, )
def register_message_callback(self, type_, from_, cb)
Register a callback to be called when a message is received. :param type_: Message type to listen for, or :data:`None` for a wildcard match. :type type_: :class:`~.MessageType` or :data:`None` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None` :param cb: Callback function to call :raises ValueError: if another function is already registered for the same ``(type_, from_)`` pair. :raises ValueError: if `type_` is not a valid :class:`~.MessageType` (and cannot be cast to a :class:`~.MessageType`) `cb` will be called whenever a message stanza matching the `type_` and `from_` is received, according to the wildcarding rules below. More specific callbacks win over less specific callbacks, and the match on the `from_` address takes precedence over the match on the `type_`. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.MessageType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated in favour of and is now implemented in terms of the :class:`aioxmpp.dispatcher.SimpleMessageDispatcher` service. It is equivalent to call :meth:`~.SimpleStanzaDispatcher.register_callback`, except that the latter is not deprecated.
4.444929
3.775554
1.177292
if type_ is not None: type_ = self._coerce_enum(type_, structs.MessageType) warnings.warn( "unregister_message_callback is deprecated; use " "aioxmpp.dispatcher.SimpleMessageDispatcher instead", DeprecationWarning, stacklevel=2 ) self._xxx_message_dispatcher.unregister_callback( type_, from_, )
def unregister_message_callback(self, type_, from_)
Unregister a callback previously registered with :meth:`register_message_callback`. :param type_: Message type to listen for. :type type_: :class:`~.MessageType` or :data:`None` :param from_: Sender JID to listen for. :type from_: :class:`~aioxmpp.JID` or :data:`None` :raises KeyError: if no function is currently registered for the given ``(type_, from_)`` pair. :raises ValueError: if `type_` is not a valid :class:`~.MessageType` (and cannot be cast to a :class:`~.MessageType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to both arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering the super-wildcard with both arguments set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.MessageType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated in favour of and is now implemented in terms of the :class:`aioxmpp.dispatcher.SimpleMessageDispatcher` service. It is equivalent to call :meth:`~.SimpleStanzaDispatcher.unregister_callback`, except that the latter is not deprecated.
4.843685
3.993384
1.212928
type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "register_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.register_callback( type_, from_, cb, )
def register_presence_callback(self, type_, from_, cb)
Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead.
4.777293
4.195565
1.138653
type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "unregister_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.unregister_callback( type_, from_, )
def unregister_presence_callback(self, type_, from_)
Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead.
5.17855
4.964123
1.043195
if self.running: raise RuntimeError("already started") self._start_prepare(xmlstream, self.recv_stanza) self._closed = False self._start_commit(xmlstream)
def start(self, xmlstream)
Start or resume the stanza stream on the given :class:`aioxmpp.protocol.XMLStream` `xmlstream`. This starts the main broker task, registers stanza classes at the `xmlstream` .
7.868005
7.723447
1.018717
if not self.running: return self._logger.debug("sending stop signal to task") self._task.cancel()
def stop(self)
Send a signal to the main broker task to terminate. You have to check :attr:`running` and possibly wait for it to become :data:`False` --- the task takes at least one loop through the event loop to terminate. It is guarenteed that the task will not attempt to send stanzas over the existing `xmlstream` after a call to :meth:`stop` has been made. It is legal to call :meth:`stop` even if the task is already stopped. It is a no-op in that case.
7.271867
6.095092
1.193069
if not self.running: return self.stop() try: yield from self._task except asyncio.CancelledError: pass
def wait_stop(self)
Stop the stream and wait for it to stop. See :meth:`stop` for the general stopping conditions. You can assume that :meth:`stop` is the first thing this coroutine calls.
5.792554
4.621554
1.253378
exc = DestructionRequested("close() called") if self.running: if self.sm_enabled: self._xmlstream.send_xso(nonza.SMAcknowledgement( counter=self._sm_inbound_ctr )) yield from self._xmlstream.close_and_wait() # does not raise yield from self.wait_stop() # may raise self._closed = True self._xmlstream_exception = exc self._destroy_stream_state(self._xmlstream_exception) if self.sm_enabled: self.stop_sm()
def close(self)
Close the stream and the underlying XML stream (if any is connected). This is essentially a way of saying "I do not want to use this stream anymore" (until the next call to :meth:`start`). If the stream is currently running, the XML stream is closed gracefully (potentially sending an SM ack), the worker is stopped and any Stream Management state is cleaned up. If an error occurs while the stream stops, the error is ignored. After the call to :meth:`close` has started, :meth:`on_failure` will not be emitted, even if the XML stream fails before closure has completed. After a call to :meth:`close`, the stream is stopped, all SM state is discarded and calls to :meth:`enqueue_stanza` raise a :class:`DestructionRequested` ``"close() called"``. Such a :class:`StanzaStream` can be re-started by calling :meth:`start`. .. versionchanged:: 0.8 Before 0.8, an error during a call to :meth:`close` would stop the stream from closing completely, and the exception was re-raised. If SM was enabled, the state would have been kept, allowing for resumption and ensuring that stanzas still enqueued or unacknowledged would get a chance to be sent. If you want to have guarantees that all stanzas sent up to a certain point are sent, you should be using :meth:`send_and_wait_for_sent` with stream management.
11.799036
8.320618
1.418048
self._logger.info("resuming SM stream with remote_ctr=%d", remote_ctr) # remove any acked stanzas self.sm_ack(remote_ctr) # reinsert the remaining stanzas for token in self._sm_unacked_list: self._active_queue.putleft_nowait(token) self._sm_unacked_list.clear()
def _resume_sm(self, remote_ctr)
Version of :meth:`resume_sm` which can be used during slow start.
7.123336
6.780234
1.050603
if self.running: raise RuntimeError("Cannot resume Stream Management while" " StanzaStream is running") self._start_prepare(xmlstream, self.recv_stanza) try: response = yield from protocol.send_and_wait_for( xmlstream, [ nonza.SMResume(previd=self.sm_id, counter=self._sm_inbound_ctr) ], [ nonza.SMResumed, nonza.SMFailed ] ) if isinstance(response, nonza.SMFailed): exc = errors.StreamNegotiationFailure( "Server rejected SM resumption" ) if response.counter is not None: self.sm_ack(response.counter) self._clear_unacked(StanzaState.DISCONNECTED) xmlstream.stanza_parser.remove_class( nonza.SMRequest) xmlstream.stanza_parser.remove_class( nonza.SMAcknowledgement) self.stop_sm() raise exc self._resume_sm(response.counter) except: # NOQA self._start_rollback(xmlstream) raise self._start_commit(xmlstream)
def resume_sm(self, xmlstream)
Resume an SM-enabled stream using the given `xmlstream`. If the server rejects the attempt to resume stream management, a :class:`.errors.StreamNegotiationFailure` is raised. The stream is then in stopped state and stream management has been stopped. .. warning:: This method cannot and does not check whether the server advertised support for stream management. Attempting to negotiate stream management without server support might lead to termination of the stream. If the XML stream dies at any point during the negotiation, the SM state is left unchanged. If no response has been received yet, the exception which caused the stream to die is re-raised. The state of the stream depends on whether the main task already noticed the dead stream. If negotiation succeeds, this coroutine resumes the stream management session and initiates the retransmission of any unacked stanzas. The stream is then in running state. .. versionchanged:: 0.11 Support for using the counter value provided some servers on a failed resumption was added. Stanzas which are covered by the counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas will be marked as :attr:`~StanzaState.DISCONNECTED`. This is in contrast to the behaviour when resumption fails *without* a counter given. In that case, stanzas which have not been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`.
6.00512
5.060768
1.186603
if not self.sm_enabled: raise RuntimeError("Stream Management is not enabled") self._logger.info("stopping SM stream") self._sm_enabled = False del self._sm_outbound_base del self._sm_inbound_ctr self._clear_unacked(StanzaState.SENT_WITHOUT_SM) del self._sm_unacked_list self._destroy_stream_state(ConnectionError( "stream management disabled" ))
def _stop_sm(self)
Version of :meth:`stop_sm` which can be called during startup.
10.087187
10.041049
1.004595
if not self._sm_enabled: raise RuntimeError("Stream Management is not enabled") self._logger.debug("sm_ack(%d)", remote_ctr) to_drop = (remote_ctr - self._sm_outbound_base) & 0xffffffff self._logger.debug("sm_ack: to drop %d, unacked: %d", to_drop, len(self._sm_unacked_list)) if to_drop > len(self._sm_unacked_list): raise errors.StreamNegotiationFailure( "acked more stanzas than have been sent " "(outbound_base={}, remote_ctr={})".format( self._sm_outbound_base, remote_ctr ) ) acked = self._sm_unacked_list[:to_drop] del self._sm_unacked_list[:to_drop] self._sm_outbound_base = remote_ctr if acked: self._logger.debug("%d stanzas acked by remote", len(acked)) for token in acked: token._set_state(StanzaState.ACKED)
def sm_ack(self, remote_ctr)
Process the remote stanza counter `remote_ctr`. Any acked stanzas are dropped from :attr:`sm_unacked_list` and put into :attr:`StanzaState.ACKED` state and the counters are increased accordingly. If called with an erroneous remote stanza counter :class:`.errors.StreamNegotationFailure` will be raised. Attempting to call this without Stream Management enabled results in a :class:`RuntimeError`.
3.302067
2.517157
1.311824
warnings.warn( r"send_iq_and_wait_for_reply is deprecated and will be removed in" r" 1.0", DeprecationWarning, stacklevel=1, ) return (yield from self.send(iq, timeout=timeout))
def send_iq_and_wait_for_reply(self, iq, *, timeout=None)
Send an IQ stanza `iq` and wait for the response. If `timeout` is not :data:`None`, it must be the time in seconds for which to wait for a response. If the response is a ``"result"`` IQ, the value of the :attr:`~aioxmpp.IQ.payload` attribute is returned. Otherwise, the exception generated from the :attr:`~aioxmpp.IQ.error` attribute is raised. .. seealso:: :meth:`register_iq_response_future` and :meth:`send_and_wait_for_sent` for other cases raising exceptions. .. deprecated:: 0.8 This method will be removed in 1.0. Use :meth:`send` instead. .. versionchanged:: 0.8 On a timeout, :class:`TimeoutError` is now raised instead of :class:`asyncio.TimeoutError`.
3.499982
3.093415
1.13143
warnings.warn( r"send_and_wait_for_sent is deprecated and will be removed in 1.0", DeprecationWarning, stacklevel=1, ) yield from self._enqueue(stanza)
def send_and_wait_for_sent(self, stanza)
Send the given `stanza` over the given :class:`StanzaStream` `stream`. .. deprecated:: 0.8 This method will be removed in 1.0. Use :meth:`send` instead.
4.253453
3.851065
1.104487
stanza.autoset_id() self._logger.debug("sending %r and waiting for it to be sent", stanza) if not isinstance(stanza, stanza_.IQ) or stanza.type_.is_response: if cb is not None: raise ValueError( "cb not supported with non-IQ non-request stanzas" ) yield from self._enqueue(stanza) return # we use the long way with a custom listener instead of a future here # to ensure that the callback is called synchronously from within the # queue handling loop. # we need that to ensure that the strong ordering guarantees reach the # `cb` function. fut = asyncio.Future() def nested_cb(task): nonlocal fut if task.exception() is None: fut.set_result(task.result()) else: fut.set_exception(task.exception()) def handler_ok(stanza): nonlocal fut if fut.cancelled(): return if cb is not None: try: nested_fut = cb(stanza) except Exception as exc: fut.set_exception(exc) else: if nested_fut is not None: nested_fut.add_done_callback(nested_cb) return # we can’t even use StanzaErrorAwareListener because we want to # forward error stanzas to the cb too... if stanza.type_.is_error: fut.set_exception(stanza.error.to_exception()) else: fut.set_result(stanza.payload) def handler_error(exc): nonlocal fut if fut.cancelled(): return fut.set_exception(exc) listener = callbacks.OneshotTagListener( handler_ok, handler_error, ) listener_tag = (stanza.to, stanza.id_) self._iq_response_map.add_listener( listener_tag, listener, ) try: yield from self._enqueue(stanza) except Exception: listener.cancel() raise try: if not timeout: reply = yield from fut else: try: reply = yield from asyncio.wait_for( fut, timeout=timeout ) except asyncio.TimeoutError: raise TimeoutError finally: try: self._iq_response_map.remove_listener(listener_tag) except KeyError: pass return reply
def _send_immediately(self, stanza, *, timeout=None, cb=None)
Send a stanza without waiting for the stream to be ready to send stanzas. This is only useful from within :class:`aioxmpp.node.Client` before the stream is fully established.
3.830621
3.806132
1.006434
parts = [ feature.encode("utf-8")+b"\x1f" for feature in features ] parts.sort() return b"".join(parts)+b"\x1c"
def _process_features(features)
Generate the `Features String` from an iterable of features. :param features: The features to generate the features string from. :type features: :class:`~collections.abc.Iterable` of :class:`str` :return: The `Features String` :rtype: :class:`bytes` Generate the `Features String` from the given `features` as specified in :xep:`390`.
4.803125
4.819849
0.99653
parts = [ _process_identity(identity) for identity in identities ] parts.sort() return b"".join(parts)+b"\x1c"
def _process_identities(identities)
Generate the `Identities String` from an iterable of identities. :param identities: The identities to generate the features string from. :type identities: :class:`~collections.abc.Iterable` of :class:`~.disco.xso.Identity` :return: The `Identities String` :rtype: :class:`bytes` Generate the `Identities String` from the given `identities` as specified in :xep:`390`.
6.086969
5.620315
1.08303
parts = [ _process_form(form) for form in exts ] parts.sort() return b"".join(parts)+b"\x1c"
def _process_extensions(exts)
Generate the `Extensions String` from an iterable of data forms. :param exts: The data forms to generate the extensions string from. :type exts: :class:`~collections.abc.Iterable` of :class:`~.forms.xso.Data` :return: The `Extensions String` :rtype: :class:`bytes` Generate the `Extensions String` from the given `exts` as specified in :xep:`390`.
7.861825
6.660774
1.180317
if class_ is None: class_ = type(instance) self._toplevels[class_] = instance
def set_toplevel_object(self, instance, class_=None)
Set the toplevel object to return from :meth:`get_toplevel_object` when asked for `class_` to `instance`. If `class_` is :data:`None`, the :func:`type` of the `instance` is used.
3.433911
4.59158
0.747871
result = expr.eval(self) iterator = iter(result) try: next(iterator) except StopIteration: return False else: return True finally: if hasattr(iterator, "close"): iterator.close()
def eval_bool(self, expr)
Evaluate the expression `expr` and return the truthness of its result. A result of an expression is said to be true if it contains at least one value. It has the same semantics as :func:`bool` on sequences.s
3.087175
2.948435
1.047055
if new_state > self._state: raise ValueError("cannot forward using rewind " "({} > {})".format(new_state, self._state)) self._state = new_state
def rewind(self, new_state)
Rewind can be used as an exceptional way to roll back the state of a :class:`OrderedStateMachine`. Rewinding is not the usual use case for an :class:`OrderedStateMachine`. Usually, if the current state `A` is greater than any given state `B`, it is assumed that state `B` cannot be reached anymore (which makes :meth:`wait_for` raise). It may make sense to go backwards though, and in cases where the ability to go backwards is sensible even if routines which previously attempted to wait for the state you are going backwards to failed, using a :class:`OrderedStateMachine` is still a good idea.
4.628192
4.721847
0.980166
if self._state == new_state: return if self._state > new_state: raise OrderedStateSkipped(new_state) fut = asyncio.Future(loop=self.loop) self._exact_waiters.append((new_state, fut)) yield from fut
def wait_for(self, new_state)
Wait for an exact state `new_state` to be reached by the state machine. If the state is skipped, that is, if a state which is greater than `new_state` is written to :attr:`state`, the coroutine raises :class:`OrderedStateSkipped` exception as it is not possible anymore that it can return successfully (see :attr:`state`).
4.982695
3.523314
1.414207
if not (self._state < new_state): return fut = asyncio.Future(loop=self.loop) self._least_waiters.append((new_state, fut)) yield from fut
def wait_for_at_least(self, new_state)
Wait for a state to be entered which is greater than or equal to `new_state` and return.
6.006816
5.549409
1.082424
Item.register_child( Item.registered_payload, cls, ) EventItem.register_child( EventItem.registered_payload, cls, ) return cls
def as_payload_class(cls)
Register the given class `cls` as Publish-Subscribe payload on both :class:`Item` and :class:`EventItem`. Return the class, to allow this to be used as decorator.
7.464141
4.846298
1.540174
presences = sorted( self.get_peer_resources(peer_jid).items(), key=lambda item: aioxmpp.structs.PresenceState.from_stanza(item[1]) ) if not presences: return None return presences[-1][1]
def get_most_available_stanza(self, peer_jid)
Obtain the stanza describing the most-available presence of the contact. :param peer_jid: Bare JID of the contact. :type peer_jid: :class:`aioxmpp.JID` :rtype: :class:`aioxmpp.Presence` or :data:`None` :return: The presence stanza of the most available resource or :data:`None` if there is no available resource. The "most available" resource is the one whose presence state orderest highest according to :class:`~aioxmpp.PresenceState`. If there is no available resource for a given `peer_jid`, :data:`None` is returned.
4.766711
4.19465
1.136379
try: d = dict(self._presences[peer_jid]) d.pop(None, None) return d except KeyError: return {}
def get_peer_resources(self, peer_jid)
Return a dict mapping resources of the given bare `peer_jid` to the presence state last received for that resource. Unavailable presence states are not included. If the bare JID is in a error state (i.e. an error presence stanza has been received), the returned mapping is empty.
5.197275
4.250421
1.222767
try: return self._presences[peer_jid.bare()][peer_jid.resource] except KeyError: pass try: return self._presences[peer_jid.bare()][None] except KeyError: pass
def get_stanza(self, peer_jid)
Return the last presence recieved for the given bare or full `peer_jid`. If the last presence was unavailable, the return value is :data:`None`, as if no presence was ever received. If no presence was ever received for the given bare JID, :data:`None` is returned.
2.925603
2.877553
1.016698
stanza = aioxmpp.Presence() self._state.apply_to_stanza(stanza) stanza.status.update(self._status) return stanza
def make_stanza(self)
Create and return a presence stanza with the current settings. :return: Presence stanza :rtype: :class:`aioxmpp.Presence`
7.006313
5.696356
1.229964
if not isinstance(priority, numbers.Integral): raise TypeError( "invalid priority: got {}, expected integer".format( type(priority) ) ) if not isinstance(state, aioxmpp.PresenceState): raise TypeError( "invalid state: got {}, expected aioxmpp.PresenceState".format( type(state), ) ) if isinstance(status, str): new_status = {None: status} else: new_status = dict(status) new_priority = int(priority) emit_state_event = self._state != state emit_overall_event = ( emit_state_event or self._priority != new_priority or self._status != new_status ) self._state = state self._status = new_status self._priority = new_priority if emit_state_event: self.on_presence_state_changed() if emit_overall_event: self.on_presence_changed() return self.resend_presence()
def set_presence(self, state, status={}, priority=0)
Change the presence broadcast by the client. :param state: New presence state to broadcast :type state: :class:`aioxmpp.PresenceState` :param status: New status information to broadcast :type status: :class:`dict` or :class:`str` :param priority: New priority for the resource :type priority: :class:`int` :return: Stanza token of the presence stanza or :data:`None` if the presence is unchanged or the stream is not connected. :rtype: :class:`~.stream.StanzaToken` If the client is currently connected, the new presence is broadcast immediately. `status` must be either a string or something which can be passed to the :class:`dict` constructor. If it is a string, it is wrapped into a dict using ``{None: status}``. The mapping must map :class:`~.LanguageTag` objects (or :data:`None`) to strings. The information will be used to generate internationalised presence status information. If you do not need internationalisation, simply use the string version of the argument.
2.556511
2.386725
1.071138
if self.client.established: return self.client.enqueue(self.make_stanza())
def resend_presence(self)
Re-send the currently configured presence. :return: Stanza token of the presence stanza or :data:`None` if the stream is not established. :rtype: :class:`~.stream.StanzaToken` .. note:: :meth:`set_presence` automatically broadcasts the new presence if any of the parameters changed.
14.351249
12.564834
1.142176
result = { "subject": ( (("commonName", x509.get_subject().commonName),), ) } for ext_idx in range(x509.get_extension_count()): ext = x509.get_extension(ext_idx) sn = ext.get_short_name() if sn != b"subjectAltName": continue data = pyasn1.codec.der.decoder.decode( ext.get_data(), asn1Spec=pyasn1_modules.rfc2459.SubjectAltName())[0] for name in data: dNSName = name.getComponentByPosition(2) if dNSName is None: continue if hasattr(dNSName, "isValue") and not dNSName.isValue: continue result.setdefault("subjectAltName", []).append( ("DNS", str(dNSName)) ) return result
def extract_python_dict_from_x509(x509)
Extract a python dictionary similar to the return value of :meth:`ssl.SSLSocket.getpeercert` from the given :class:`OpenSSL.crypto.X509` `x509` object. Note that by far not all attributes are included; only those required to use :func:`ssl.match_hostname` are extracted and put in the result. In the future, more attributes may be added.
2.710394
2.750087
0.985566
return pyasn1.codec.der.decoder.decode( blob, asn1Spec=pyasn1_modules.rfc2459.Certificate() )[0]
def blob_to_pyasn1(blob)
Convert an ASN.1 encoded certificate (such as obtained from :func:`extract_blob`) to a :mod:`pyasn1` structure and return the result.
3.066635
2.939644
1.043199
pk = pyasn1_struct.getComponentByName( "tbsCertificate" ).getComponentByName( "subjectPublicKeyInfo" ) return pyasn1.codec.der.encoder.encode(pk)
def extract_pk_blob_from_pyasn1(pyasn1_struct)
Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1` structure (which must represent a certificate).
3.346926
3.08438
1.085121
cert_structure = extract_python_dict_from_x509(x509) try: ssl.match_hostname(cert_structure, hostname) except ssl.CertificateError: return False return True
def check_x509_hostname(x509, hostname)
Check whether the given :class:`OpenSSL.crypto.X509` certificate `x509` matches the given `hostname`. Return :data:`True` if the name matches and :data:`False` otherwise. This uses :func:`ssl.match_hostname` and :func:`extract_python_dict_from_x509`.
4.243701
2.652637
1.599805
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2 | OpenSSL.SSL.OP_NO_SSLv3) ctx.set_verify(OpenSSL.SSL.VERIFY_PEER, default_verify_callback) return ctx
def default_ssl_context()
Return a sensibly configured :class:`OpenSSL.SSL.Context` context. The context has SSLv2 and SSLv3 disabled, and supports TLS 1.0+ (depending on the version of the SSL library). Tries to negotiate an XMPP c2s connection via ALPN (:rfc:`7301`).
1.900849
1.773227
1.071972
if not transport.get_extra_info("sslcontext"): transport = None last_auth_error = None for sasl_provider in sasl_providers: try: result = yield from sasl_provider.execute( jid, features, xmlstream, transport) except ValueError as err: raise errors.StreamNegotiationFailure( "invalid credentials: {}".format(err) ) from err except aiosasl.AuthenticationFailure as err: last_auth_error = err continue if result: features = yield from protocol.reset_stream_and_get_features( xmlstream ) break else: if last_auth_error: raise last_auth_error else: raise errors.SASLUnavailable("No common mechanisms") return features
def negotiate_sasl(transport, xmlstream, sasl_providers, negotiation_timeout, jid, features)
Perform SASL authentication on the given :class:`.protocol.XMLStream` `stream`. `transport` must be the :class:`asyncio.Transport` over which the `stream` runs. It is used to detect whether TLS is used and may be required by some SASL mechanisms. `sasl_providers` must be an iterable of :class:`SASLProvider` objects. They will be tried in iteration order to authenticate against the server. If one of the `sasl_providers` fails with a :class:`aiosasl.AuthenticationFailure` exception, the other providers are still tried; only if all providers fail, the last :class:`aiosasl.AuthenticationFailure` exception is re-raised. If no mechanism was able to authenticate but not due to authentication failures (other failures include no matching mechanism on the server side), :class:`aiosasl.SASLUnavailable` is raised. Return the :class:`.nonza.StreamFeatures` obtained after resetting the stream after successful SASL authentication. .. versionadded:: 0.6 .. deprecated:: 0.10 The `negotiation_timeout` argument is ignored. The timeout is controlled using the :attr:`~.XMLStream.deadtime_hard_limit` timeout of the stream. The argument will be removed in version 1.0. To prepare for this, please pass `jid` and `features` as keyword arguments.
4.11834
3.329257
1.237015
sasl_providers = tuple(sasl_providers) if not sasl_providers: raise ValueError("At least one SASL provider must be given.") for sasl_provider in sasl_providers: sasl_provider.execute # check that sasl_provider has execute method result = SecurityLayer( tls_provider.ssl_context_factory, tls_provider.certificate_verifier_factory, tls_provider.tls_required, sasl_providers ) return result
def security_layer(tls_provider, sasl_providers)
.. deprecated:: 0.6 Replaced by :class:`SecurityLayer`. Return a configured :class:`SecurityLayer`. `tls_provider` must be a :class:`STARTTLSProvider`. The return value can be passed to the constructor of :class:`~.node.Client`. Some very basic checking on the input is also performed.
3.748642
3.747392
1.000334
tls_kwargs = {} if certificate_verifier_factory is not None: tls_kwargs["certificate_verifier_factory"] = \ certificate_verifier_factory return SecurityLayer( ssl_context_factory, certificate_verifier_factory, True, ( PasswordSASLProvider( password_provider, max_auth_attempts=max_auth_attempts), ) )
def tls_with_password_based_authentication( password_provider, ssl_context_factory=default_ssl_context, max_auth_attempts=3, certificate_verifier_factory=PKIXCertificateVerifier)
Produce a commonly used :class:`SecurityLayer`, which uses TLS and password-based SASL authentication. If `ssl_context_factory` is not provided, an SSL context with TLSv1+ is used. `password_provider` must be a coroutine which is called with the jid as first and the number of attempt as second argument. It must return the password to us, or :data:`None` to abort. Return a :class:`SecurityLayer` instance. .. deprecated:: 0.7 Use :func:`make` instead.
3.67453
3.799212
0.967182
key = self._x509_key(x509) self._storage.setdefault(hostname, set()).add(key)
def pin(self, hostname, x509)
Pin an :class:`OpenSSL.crypto.X509` object `x509` for use with the given `hostname`. Which information exactly is used to identify the certificate depends :meth:`_x509_key`.
5.794683
3.90324
1.484583
key = self._x509_key(x509) try: pins = self._storage[hostname] except KeyError: return None if key in pins: return True return None
def query(self, hostname, x509)
Return true if the given :class:`OpenSSL.crypto.X509` object `x509` has previously been pinned for use with the given `hostname` and :data:`None` otherwise. Returning :data:`None` allows this method to be used with :class:`PinningPKIXCertificateVerifier`.
4.754226
3.829011
1.241633
return { hostname: sorted(self._encode_key(key) for key in pins) for hostname, pins in self._storage.items() }
def export_to_json(self)
Return a JSON dictionary which contains all the pins stored in this store.
13.682096
7.723949
1.771386
if override: self._storage = { hostname: set(self._decode_key(key) for key in pins) for hostname, pins in data.items() } return for hostname, pins in data.items(): existing_pins = self._storage.setdefault(hostname, set()) existing_pins.update(self._decode_key(key) for key in pins)
def import_from_json(self, data, *, override=False)
Import a JSON dictionary which must have the same format as exported by :meth:`export`. If *override* is true, the existing data in the pin store will be overriden with the data from `data`. Otherwise, the `data` will be merged into the store.
3.132041
3.153871
0.993078
try: mechanisms = features[SASLMechanisms] except KeyError: logger.error("No sasl mechanisms: %r", list(features)) raise errors.SASLUnavailable( "Remote side does not support SASL") from None remote_mechanism_list = mechanisms.get_mechanism_list() for our_mechanism in mechanism_classes: token = our_mechanism.any_supported(remote_mechanism_list) if token is not None: return our_mechanism, token return None, None
def _find_supported(self, features, mechanism_classes)
Find the first mechansim class which supports a mechanism announced in the given stream features. :param features: Current XMPP stream features :type features: :class:`~.nonza.StreamFeatures` :param mechanism_classes: SASL mechanism classes to use :type mechanism_classes: iterable of :class:`SASLMechanism` sub\\ *classes* :raises aioxmpp.errors.SASLUnavailable: if the peer does not announce SASL support :return: the :class:`SASLMechanism` subclass to use and a token :rtype: pair Return a supported SASL mechanism class, by looking the given stream features `features`. If no matching mechanism is found, ``(None, None)`` is returned. Otherwise, a pair consisting of the mechanism class and the value returned by the respective :meth:`~.sasl.SASLMechanism.any_supported` method is returned. The latter is an opaque token which must be passed to the `token` argument of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`.
4.929173
3.694458
1.334208
sm = aiosasl.SASLStateMachine(intf) try: yield from mechanism.authenticate(sm, token) return True except aiosasl.SASLFailure as err: if err.opaque_error in self.AUTHENTICATION_FAILURES: raise aiosasl.AuthenticationFailure( opaque_error=err.opaque_error, text=err.text) elif err.opaque_error in self.MECHANISM_REJECTED_FAILURES: return False raise
def _execute(self, intf, mechanism, token)
Execute a SASL authentication process. :param intf: SASL interface to use :type intf: :class:`~.sasl.SASLXMPPInterface` :param mechanism: SASL mechanism to use :type mechanism: :class:`aiosasl.SASLMechanism` :param token: The opaque token argument for the mechanism :type token: not :data:`None` :raises aiosasl.AuthenticationFailure: if authentication failed due to bad credentials :raises aiosasl.SASLFailure: on other SASL error conditions (such as protocol violations) :return: true if authentication succeeded, false if the mechanism has to be disabled :rtype: :class:`bool` This executes the SASL authentication process. The more specific exceptions are generated by inspecting the :attr:`aiosasl.SASLFailure.opaque_error` on exceptinos raised from the :class:`~.sasl.SASLXMPPInterface`. Other :class:`aiosasl.SASLFailure` exceptions are re-raised without modification.
4.940531
3.37153
1.465368
features_future = asyncio.Future(loop=loop) stream = protocol.XMLStream( to=domain, features_future=features_future, base_logger=base_logger, ) if base_logger is not None: logger = base_logger.getChild(type(self).__name__) else: logger = logging.getLogger(".".join([ __name__, type(self).__qualname__, ])) try: transport, _ = yield from ssl_transport.create_starttls_connection( loop, lambda: stream, host=host, port=port, peer_hostname=host, server_hostname=to_ascii(domain), use_starttls=True, ) except: # NOQA stream.abort() raise stream.deadtime_hard_limit = timedelta(seconds=negotiation_timeout) features = yield from features_future try: features[nonza.StartTLSFeature] except KeyError: if not metadata.tls_required: return transport, stream, (yield from features_future) logger.debug( "attempting STARTTLS despite not announced since it is" " required") try: response = yield from protocol.send_and_wait_for( stream, [ nonza.StartTLS(), ], [ nonza.StartTLSFailure, nonza.StartTLSProceed, ] ) except errors.StreamError: raise errors.TLSUnavailable( "STARTTLS not supported by server, but required by client" ) if not isinstance(response, nonza.StartTLSProceed): if metadata.tls_required: message = ( "server failed to STARTTLS" ) protocol.send_stream_error_and_close( stream, condition=errors.StreamErrorCondition.POLICY_VIOLATION, text=message, ) raise errors.TLSUnavailable(message) return transport, stream, (yield from features_future) verifier = metadata.certificate_verifier_factory() yield from verifier.pre_handshake( domain, host, port, metadata, ) ssl_context = metadata.ssl_context_factory() verifier.setup_context(ssl_context, transport) yield from stream.starttls( ssl_context=ssl_context, post_handshake_callback=verifier.post_handshake, ) features_future = yield from protocol.reset_stream_and_get_features( stream, timeout=negotiation_timeout, ) return transport, stream, features_future
def connect(self, loop, metadata, domain: str, host, port, negotiation_timeout, base_logger=None)
.. seealso:: :meth:`BaseConnector.connect` For general information on the :meth:`connect` method. Connect to `host` at TCP port number `port`. The :class:`aioxmpp.security_layer.SecurityLayer` object `metadata` is used to determine the parameters of the TLS connection. First, a normal TCP connection is opened and the stream header is sent. The stream features are waited for, and then STARTTLS is negotiated if possible. :attr:`~.security_layer.SecurityLayer.tls_required` is honoured: if it is true and TLS negotiation fails, :class:`~.errors.TLSUnavailable` is raised. TLS negotiation is always attempted if :attr:`~.security_layer.SecurityLayer.tls_required` is true, even if the server does not advertise a STARTTLS stream feature. This might help to prevent trivial downgrade attacks, and we don’t have anything to lose at this point anymore anyways. :attr:`~.security_layer.SecurityLayer.ssl_context_factory` and :attr:`~.security_layer.SecurityLayer.certificate_verifier_factory` are used to configure the TLS connection. .. versionchanged:: 0.10 The `negotiation_timeout` is set as :attr:`~.XMLStream.deadtime_hard_limit` on the returned XML stream.
4.0748
3.529445
1.154516
features_future = asyncio.Future(loop=loop) stream = protocol.XMLStream( to=domain, features_future=features_future, base_logger=base_logger, ) if base_logger is not None: logger = base_logger.getChild(type(self).__name__) else: logger = logging.getLogger(".".join([ __name__, type(self).__qualname__, ])) verifier = metadata.certificate_verifier_factory() yield from verifier.pre_handshake( domain, host, port, metadata, ) context_factory = self._context_factory_factory(logger, metadata, verifier) try: transport, _ = yield from ssl_transport.create_starttls_connection( loop, lambda: stream, host=host, port=port, peer_hostname=host, server_hostname=to_ascii(domain), post_handshake_callback=verifier.post_handshake, ssl_context_factory=context_factory, use_starttls=False, ) except: # NOQA stream.abort() raise stream.deadtime_hard_limit = timedelta(seconds=negotiation_timeout) return transport, stream, (yield from features_future)
def connect(self, loop, metadata, domain, host, port, negotiation_timeout, base_logger=None)
.. seealso:: :meth:`BaseConnector.connect` For general information on the :meth:`connect` method. Connect to `host` at TCP port number `port`. The :class:`aioxmpp.security_layer.SecurityLayer` object `metadata` is used to determine the parameters of the TLS connection. The connector connects to the server by directly establishing TLS; no XML stream is started before TLS negotiation, in accordance to :xep:`368` and how legacy SSL was handled in the past. :attr:`~.security_layer.SecurityLayer.ssl_context_factory` and :attr:`~.security_layer.SecurityLayer.certificate_verifier_factory` are used to configure the TLS connection. .. versionchanged:: 0.10 The `negotiation_timeout` is set as :attr:`~.XMLStream.deadtime_hard_limit` on the returned XML stream.
4.406449
3.848299
1.145038
if isinstance(tag, str): namespace_uri, sep, localname = tag.partition("}") if sep: if not namespace_uri.startswith("{"): raise ValueError("not a valid etree-format tag") namespace_uri = namespace_uri[1:] else: localname = namespace_uri namespace_uri = None return (namespace_uri, localname) elif len(tag) != 2: raise ValueError("not a valid tuple-format tag") else: if any(part is not None and not isinstance(part, str) for part in tag): raise TypeError("tuple-format tags must only contain str and None") if tag[1] is None: raise ValueError("tuple-format localname must not be None") return tag
def normalize_tag(tag)
Normalize an XML element tree `tag` into the tuple format. The following input formats are accepted: * ElementTree namespaced string, e.g. ``{uri:bar}foo`` * Unnamespaced tags, e.g. ``foo`` * Two-tuples consisting of `namespace_uri` and `localpart`; `namespace_uri` may be :data:`None` if the tag is supposed to be namespaceless. Otherwise it must be, like `localpart`, a :class:`str`. Return a two-tuple consisting the ``(namespace_uri, localpart)`` format.
3.073649
3.027082
1.015383
token, tracker = self.send_message_tracked(body) tracker.cancel() return token
def send_message(self, body)
Send a message to the conversation. :param msg: The message to send. :type msg: :class:`aioxmpp.Message` :return: The stanza token obtained from sending. :rtype: :class:`~aioxmpp.stream.StanzaToken` The default implementation simply calls :meth:`send_message_tracked` and immediately cancels the tracking object, returning only the stanza token. There is no need to provide proper address attributes on `msg`. Implementations will override those attributes with the values appropriate for the conversation. Some implementations may allow the user to choose a :attr:`~aioxmpp.Message.type_`, but others may simply stamp it over. Subclasses may override this method with a more specialised implementation. Subclasses which do not provide tracked message sending **must** override this method to provide untracked message sending. .. seealso:: The corresponding feature is :attr:`.ConversationFeature.SEND_MESSAGE`. See :attr:`features` for details.
19.872051
9.48411
2.0953
raise self._not_implemented_error("inviting entities")
def invite(self, address, text=None, *, mode=InviteMode.DIRECT, allow_upgrade=False)
Invite another entity to the conversation. :param address: The address of the entity to invite. :type address: :class:`aioxmpp.JID` :param text: A reason/accompanying text for the invitation. :param mode: The invitation mode to use. :type mode: :class:`~.im.InviteMode` :param allow_upgrade: Whether to allow creating a new conversation to satisfy the invitation. :type allow_upgrade: :class:`bool` :raises NotImplementedError: if the requested `mode` is not supported :raises ValueError: if `allow_upgrade` is false, but a new conversation is required. :return: The stanza token for the invitation and the possibly new conversation object :rtype: tuple of :class:`~.StanzaToken` and :class:`~.AbstractConversation` .. note:: Even though this is a coroutine, it returns a stanza token. The coroutine-ness may be needed to generate the invitation in the first place. Sending the actual invitation is done non-blockingly and the stanza token for that is returned. To wait until the invitation has been sent, unpack the stanza token from the result and await it. Return the new conversation object to use. In many cases, this will simply be the current conversation object, but in some cases (e.g. when someone is invited to a one-on-one conversation), a new conversation must be created and used. If `allow_upgrade` is false and a new conversation would be needed to invite an entity, :class:`ValueError` is raised. Additional features: :attr:`~.ConversationFeature.INVITE_DIRECT` Support for :attr:`~.im.InviteMode.DIRECT` mode. :attr:`~.ConversationFeature.INVITE_DIRECT_CONFIGURE` If a direct invitation is used, the conversation will be configured to allow the invitee to join before the invitation is sent. This may fail with a :class:`aioxmpp.errors.XMPPError`, in which case the error is re-raised and the invitation not sent. :attr:`~.ConversationFeature.INVITE_MEDIATED` Support for :attr:`~.im.InviteMode.MEDIATED` mode. :attr:`~.ConversationFeature.INVITE_UPGRADE` If `allow_upgrade` is :data:`True`, an upgrade will be performed and a new conversation is returned. If `allow_upgrade` is :data:`False`, the invite will fail. .. seealso:: The corresponding feature for this method is :attr:`.ConversationFeature.INVITE`. See :attr:`features` for details on the semantics of features.
36.90168
60.720695
0.607728
if sys.stdout.seekable(): # it’s a file return sys.stdout.buffer.raw if os.isatty(sys.stdin.fileno()): # it’s a tty, use fd 0 fd_to_use = 0 else: fd_to_use = 1 twrite, pwrite = await loop.connect_write_pipe( asyncio.streams.FlowControlMixin, os.fdopen(fd_to_use, "wb"), ) swrite = asyncio.StreamWriter( twrite, pwrite, None, loop, ) return swrite
async def stdout_writer()
This is a bit complex, as stdout can be a pipe or a file. If it is a file, we cannot use :meth:`asycnio.BaseEventLoop.connect_write_pipe`.
3.751824
3.506458
1.069975
try: enabled, (fun_name, fun_args, fun_kwargs) = _HASH_ALGO_MAP[algo] except KeyError: raise NotImplementedError( "hash algorithm {!r} unknown".format(algo) ) from None if not enabled: raise ValueError( "support of {} in XMPP is forbidden".format(algo) ) try: fun = getattr(hashlib, fun_name) except AttributeError as exc: raise NotImplementedError( "{} not supported by hashlib".format(algo) ) from exc return fun(*fun_args, **fun_kwargs)
def hash_from_algo(algo)
Return a :mod:`hashlib` hash given the :xep:`300` `algo`. :param algo: The algorithm identifier as defined in :xep:`300`. :type algo: :class:`str` :raises NotImplementedError: if the hash algortihm is not supported by :mod:`hashlib`. :raises ValueError: if the hash algorithm MUST NOT be supported. :return: A hash object from :mod:`hashlib` or compatible. If the `algo` is not supported by the :mod:`hashlib` module, :class:`NotImplementedError` is raised.
3.681801
3.316121
1.110273
try: enabled, algo = _HASH_ALGO_REVERSE_MAP[h.name] except KeyError: pass else: if not enabled: raise ValueError("support of {} in XMPP is forbidden".format( algo )) return algo if h.name == "blake2b": return "blake2b-{}".format(h.digest_size * 8) raise ValueError( "unknown hash implementation: {!r}".format(h) )
def algo_of_hash(h)
Return a :xep:`300` `algo` from a given :mod:`hashlib` hash. :param h: Hash object from :mod:`hashlib`. :raises ValueError: if `h` does not have a defined `algo` value. :raises ValueError: if the hash function MUST NOT be supported. :return: The `algo` value for the given hash. :rtype: :class:`str` .. warning:: Use with caution for :func:`hashlib.blake2b` hashes. :func:`algo_of_hash` cannot safely determine whether blake2b was initialised with a salt, personality, key or other non-default :xep:`300` mode. In such a case, the return value will be the matching ``blake2b-*`` `algo`, but the digest will not be compatible with the results of other implementations.
4.719816
3.95676
1.192849
disco_info = yield from self._disco_client.query_info(other_entity) intersection = disco_info.features & SUPPORTED_HASH_FEATURES if (not intersection and namespaces.xep0300_hashes2 not in disco_info.features): raise RuntimeError( "Remote does not support the urn:xmpp:hashes:2 feature.") return intersection
def select_common_hashes(self, other_entity)
Return the list of algos supported by us and `other_entity`. The algorithms are represented by their :xep:`300` URNs (`urn:xmpp:hash-function-text-names:...`). :param other_entity: the address of another entity :type other_entity: :class:`aioxmpp.JID` :returns: the identifiers of the hash algorithms supported by both us and the other entity :rtype: :class:`set` :raises RuntimeError: if the other entity does not support the :xep:`300` feature nor does not publish hash functions URNs we support. Note: This assumes the protocol is supported if valid hash function features are detected, even if `urn:xmpp:hashes:2` is not listed as a feature.
11.555995
5.808732
1.989418
fut = asyncio.Future() for signal in signals: signal.connect(fut, signal.AUTO_FUTURE) return fut
def first_signal(*signals)
Connect to multiple signals and wait for the first to emit. :param signals: Signals to connect to. :type signals: :class:`AdHocSignal` :return: An awaitable for the first signal to emit. The awaitable returns the first argument passed to the signal. If the first argument is an exception, the exception is re-raised from the awaitable. A common use-case is a situation where a class exposes a "on_finished" type signal and an "on_failure" type signal. :func:`first_signal` can be used to combine those nicely:: # e.g. a aioxmpp.im.conversation.AbstractConversation conversation = ... await first_signal( # emits without arguments when the conversation is successfully # entered conversation.on_enter, # emits with an exception when entering the conversation fails conversation.on_failure, ) # await first_signal(...) will either raise an exception (failed) or # return None (success) .. warning:: Only works with signals which emit with zero or one argument. Signals which emit with more than one argument or with keyword arguments are silently ignored! (Thus, if only such signals are connected, the future will never complete.) (This is a side-effect of the implementation of :meth:`AdHocSignal.AUTO_FUTURE`). .. note:: Does not work with coroutine signals (:class:`SyncAdHocSignal`).
9.539585
5.834167
1.635124
mode = mode or self.STRONG self.logger.debug("connecting %r with mode %r", f, mode) return self._connect(mode(f))
def connect(self, f, mode=None)
Connect an object `f` to the signal. The type the object needs to have depends on `mode`, but usually it needs to be a callable. :meth:`connect` returns an opaque token which can be used with :meth:`disconnect` to disconnect the object from the signal. The default value for `mode` is :attr:`STRONG`. Any decorator can be used as argument for `mode` and it is applied to `f`. The result is stored internally and is what will be called when the signal is being emitted. If the result of `mode` returns a false value during emission, the connection is removed. .. note:: The return values required by the callable returned by `mode` and the one required by a callable passed to `f` using the predefined modes are complementary! A callable `f` needs to return true to be removed from the connections, while a callable returned by the `mode` decorator needs to return false. Existing modes are listed below.
7.965405
6.497786
1.225865
for token, wrapper in list(self._connections.items()): try: keep = wrapper(args, kwargs) except Exception: self.logger.exception("listener attached to signal raised") keep = False if not keep: del self._connections[token]
def fire(self, *args, **kwargs)
Emit the signal, calling all connected objects in-line with the given arguments and in the order they were registered. :class:`AdHocSignal` provides full isolation with respect to exceptions. If a connected listener raises an exception, the other listeners are executed as normal, but the raising listener is removed from the signal. The exception is logged to :attr:`logger` and *not* re-raised, so that the caller of the signal is also not affected. Instead of calling :meth:`fire` explicitly, the ad-hoc signal object itself can be called, too.
7.380926
6.511399
1.133539
fut = asyncio.Future() self.connect(fut, self.AUTO_FUTURE) return fut
def future(self)
Return a :class:`asyncio.Future` which has been :meth:`connect`\\ -ed using :attr:`AUTO_FUTURE`. The token returned by :meth:`connect` is not returned; to remove the future from the signal, just cancel it.
11.344247
5.556413
2.041649
self.logger.debug("connecting %r", coro) return self._connect(coro)
def connect(self, coro)
The coroutine `coro` is connected to the signal. The coroutine must return a true value, unless it wants to be disconnected from the signal. .. note:: This is different from the return value convention with :attr:`AdHocSignal.STRONG` and :attr:`AdHocSignal.WEAK`. :meth:`connect` returns a token which can be used with :meth:`disconnect` to disconnect the coroutine.
5.575232
7.138677
0.780989
for token, coro in list(self._connections.items()): keep = yield from coro(*args, **kwargs) if not keep: del self._connections[token]
def fire(self, *args, **kwargs)
Emit the signal, calling all coroutines in-line with the given arguments and in the order they were registered. This is obviously a coroutine. Instead of calling :meth:`fire` explicitly, the ad-hoc signal object itself can be called, too.
6.038152
5.689381
1.061302
token = self.Token() self._filter_order.append((order, token, func)) self._filter_order.sort(key=lambda x: x[0]) return token
def register(self, func, order)
Add a function to the filter chain. :param func: A callable which is to be added to the filter chain. :param order: An object indicating the ordering of the function relative to the others. :return: Token representing the registration. Register the function `func` as a filter into the chain. `order` must be a value which is used as a sorting key to order the functions registered in the chain. The type of `order` depends on the use of the filter, as does the number of arguments and keyword arguments which `func` must accept. This will generally be documented at the place where the :class:`Filter` is used. Functions with the same order are sorted in the order of their addition, with the function which was added earliest first. Remember that all values passed to `order` which are registered at the same time in the same :class:`Filter` need to be totally orderable with respect to each other. The returned token can be used to :meth:`unregister` a filter.
4.824193
5.436595
0.887356
for _, _, func in self._filter_order: obj = func(obj, *args, **kwargs) if obj is None: return None return obj
def filter(self, obj, *args, **kwargs)
Filter the given object through the filter chain. :param obj: The object to filter :param args: Additional arguments to pass to each filter function. :param kwargs: Additional keyword arguments to pass to each filter function. :return: The filtered object or :data:`None` See the documentation of :class:`Filter` on how filtering operates. Returns the object returned by the last function in the filter chain or :data:`None` if any function returned :data:`None`.
4.122118
4.489673
0.918133
for i, (_, token, _) in enumerate(self._filter_order): if token == token_to_remove: break else: raise ValueError("unregistered token: {!r}".format( token_to_remove)) del self._filter_order[i]
def unregister(self, token_to_remove)
Unregister a filter function. :param token_to_remove: The token as returned by :meth:`register`. Unregister a function from the filter chain using the token returned by :meth:`register`.
3.680784
3.313857
1.110725
token = self.register(func, *args) try: yield finally: self.unregister(token)
def context_register(self, func, *args)
:term:`Context manager <context manager>` which temporarily registers a filter function. :param func: The filter function to register. :param order: The sorting key for the filter function. :rtype: :term:`context manager` :return: Context manager which temporarily registers the filter function. If :meth:`register` does not require `order` because it has been overridden in a subclass, the `order` argument can be omitted here, too. .. versionadded:: 0.9
4.581184
6.205893
0.738199
loop = asyncio.get_event_loop() if isinstance(timeout, timedelta): timeout = timeout.total_seconds() loop.call_later(timeout, self.close)
def set_timeout(self, timeout)
Automatically close the tracker after `timeout` has elapsed. :param timeout: The timeout after which the tracker is closed automatically. :type timeout: :class:`numbers.Real` or :class:`datetime.timedelta` If the `timeout` is not a :class:`datetime.timedelta` instance, it is assumed to be given as seconds. The timeout cannot be cancelled after it has been set. It starts at the very moment :meth:`set_timeout` is called.
3.049834
3.908183
0.780371
if self._closed: raise RuntimeError("message tracker is closed") # reject some transitions as documented if (self._state == MessageState.ABORTED or new_state == MessageState.IN_TRANSIT or (self._state == MessageState.ERROR and new_state == MessageState.DELIVERED_TO_SERVER) or (self._state == MessageState.ERROR and new_state == MessageState.ABORTED) or (self._state == MessageState.DELIVERED_TO_RECIPIENT and new_state == MessageState.DELIVERED_TO_SERVER) or (self._state == MessageState.SEEN_BY_RECIPIENT and new_state == MessageState.DELIVERED_TO_SERVER) or (self._state == MessageState.SEEN_BY_RECIPIENT and new_state == MessageState.DELIVERED_TO_RECIPIENT)): raise ValueError( "message tracker transition from {} to {} not allowed".format( self._state, new_state ) ) self._state = new_state self._response = response self.on_state_changed(self._state, self._response)
def _set_state(self, new_state, response=None)
Set the state of the tracker. :param new_state: The new state of the tracker. :type new_state: :class:`~.MessageState` member :param response: A stanza related to the new state. :type response: :class:`~.StanzaBase` or :data:`None` :raise ValueError: if a forbidden state transition is attempted. :raise RuntimeError: if the tracker is closed. The state of the tracker is set to the `new_state`. The :attr:`response` is also overriden with the new value, no matter if the new or old value is :data:`None` or not. The :meth:`on_state_changed` event is emitted. The following transitions are forbidden and attempting to perform them will raise :class:`ValueError`: * any state -> :attr:`~.MessageState.IN_TRANSIT` * :attr:`~.MessageState.DELIVERED_TO_RECIPIENT` -> :attr:`~.MessageState.DELIVERED_TO_SERVER` * :attr:`~.MessageState.SEEN_BY_RECIPIENT` -> :attr:`~.MessageState.DELIVERED_TO_RECIPIENT` * :attr:`~.MessageState.SEEN_BY_RECIPIENT` -> :attr:`~.MessageState.DELIVERED_TO_SERVER` * :attr:`~.MessageState.ABORTED` -> any state * :attr:`~.MessageState.ERROR` -> any state If the tracker is already :meth:`close`\\ -d, :class:`RuntimeError` is raised. This check happens *before* a test is made whether the transition is valid. This method is part of the "protected" interface.
2.3595
1.785826
1.321237
token = self.client.enqueue(stanza) self.attach_tracker(stanza, tracker, token) return token
def send_tracked(self, stanza, tracker)
Send a message stanza with tracking. :param stanza: Message stanza to send. :type stanza: :class:`aioxmpp.Message` :param tracker: Message tracker to use. :type tracker: :class:`~.MessageTracker` :rtype: :class:`~.StanzaToken` :return: The token used to send the stanza. If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is created. This configures tracking for the stanza as if by calling :meth:`attach_tracker` with a `token` and sends the stanza through the stream. .. seealso:: :meth:`attach_tracker` can be used if the stanza cannot be sent (e.g. because it is a carbon-copy) or has already been sent.
8.978313
11.570053
0.775996
if tracker is None: tracker = MessageTracker() stanza.autoset_id() key = stanza.to.bare(), stanza.id_ self._trackers[key] = tracker tracker.on_closed.connect( functools.partial(self._tracker_closed, key) ) if token is not None: token.future.add_done_callback( functools.partial( self._stanza_sent, tracker, token, ) ) return tracker
def attach_tracker(self, stanza, tracker=None, token=None)
Configure tracking for a stanza without sending it. :param stanza: Message stanza to send. :type stanza: :class:`aioxmpp.Message` :param tracker: Message tracker to use. :type tracker: :class:`~.MessageTracker` or :data:`None` :param token: Optional stanza token for more fine-grained tracking. :type token: :class:`~.StanzaToken` :rtype: :class:`~.MessageTracker` :return: The message tracker. If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is created. If `token` is not :data:`None`, updates to the stanza `token` are reflected in the `tracker`. If an error reply is received, the tracker will enter :class:`~.MessageState.ERROR` and the error will be set as :attr:`~.MessageTracker.response`. You should use :meth:`send_tracked` if possible. This method however is very useful if you need to track carbon copies of sent messages, as a stanza token is not available here and re-sending the message to obtain one is generally not desirable ☺.
4.477367
4.082073
1.096837
self.stop() self._task = asyncio.ensure_future(self._pinger(), loop=self._loop)
def start(self)
Start the pinging coroutine using the client and event loop which was passed to the constructor. :meth:`start` always behaves as if :meth:`stop` was called right before it.
5.813272
4.736115
1.227435
if task.exception() is None: self._on_fresh() return exc = task.exception() if isinstance(exc, aioxmpp.errors.XMPPError): if exc.condition in [ aioxmpp.errors.ErrorCondition.SERVICE_UNAVAILABLE, aioxmpp.errors.ErrorCondition.FEATURE_NOT_IMPLEMENTED]: self._on_fresh() return if exc.condition == aioxmpp.errors.ErrorCondition.ITEM_NOT_FOUND: return self._on_exited()
def _interpret_result(self, task)
Interpret the result of a ping. :param task: The pinger task. The result or exception of the `task` is interpreted as follows: * :data:`None` result: *positive* * :class:`aioxmpp.errors.XMPPError`, ``service-unavailable``: *positive* * :class:`aioxmpp.errors.XMPPError`, ``feature-not-implemented``: *positive* * :class:`aioxmpp.errors.XMPPError`, ``item-not-found``: *inconclusive* * :class:`aioxmpp.errors.XMPPError`: *negative* * :class:`asyncio.TimeoutError`: *inconclusive* * Any other exception: *inconclusive*
3.227417
2.665186
1.210954
self._monitor.notify_received() self._pinger.stop() self._mark_fresh()
def reset(self)
Reset the monitor. Reset the aliveness timeouts. Clear the stale state. Cancel and stop pinging. Call `on_fresh` if the stale state was set.
25.194536
13.764102
1.830453
if message.type_ == aioxmpp.MessageType.ERROR: raise ValueError("receipts cannot be generated for error messages") if message.xep0184_received: raise ValueError("receipts cannot be generated for receipts") if message.id_ is None: raise ValueError("receipts cannot be generated for id-less messages") reply = message.make_reply() reply.to = reply.to.bare() reply.xep0184_received = xso.Received(message.id_) return reply
def compose_receipt(message)
Compose a :xep:`184` delivery receipt for a :class:`~aioxmpp.Message`. :param message: The message to compose the receipt for. :type message: :class:`~aioxmpp.Message` :raises ValueError: if the input message is of type :attr:`~aioxmpp.MessageType.ERROR` :raises ValueError: if the input message is a message receipt itself :return: A message which serves as a receipt for the input message. :rtype: :class:`~aioxmpp.Message`
5.291995
3.874257
1.365938
if stanza.xep0184_received is not None: raise ValueError( "requesting delivery receipts for delivery receipts is not " "allowed" ) if stanza.type_ == aioxmpp.MessageType.ERROR: raise ValueError( "requesting delivery receipts for errors is not supported" ) if tracker is None: tracker = aioxmpp.tracking.MessageTracker() stanza.xep0184_request_receipt = True stanza.autoset_id() self._bare_jid_maps[stanza.to, stanza.id_] = tracker return tracker
def attach_tracker(self, stanza, tracker=None)
Return a new tracker or modify one to track the stanza. :param stanza: Stanza to track. :type stanza: :class:`aioxmpp.Message` :param tracker: Existing tracker to attach to. :type tracker: :class:`.tracking.MessageTracker` :raises ValueError: if the stanza is of type :attr:`~aioxmpp.MessageType.ERROR` :raises ValueError: if the stanza contains a delivery receipt :return: The message tracker for the stanza. :rtype: :class:`.tracking.MessageTracker` The `stanza` gets a :xep:`184` reciept request attached and internal handlers are set up to update the `tracker` state once a confirmation is received. .. warning:: See the :ref:`api-tracking-memory`.
5.618946
4.635147
1.212248
iq = aioxmpp.IQ( type_=aioxmpp.IQType.GET, payload=private_xml_xso.Query(query_xso) ) return (yield from self.client.send(iq))
def get_private_xml(self, query_xso)
Get the private XML data for the element `query_xso` from the server. :param query_xso: the object to retrieve. :returns: the stored private XML data. `query_xso` *must* serialize to an empty XML node of the wanted namespace and type and *must* be registered as private XML :class:`~private_xml_xso.Query` payload.
7.29904
5.049274
1.445562
iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=private_xml_xso.Query(xso) ) yield from self.client.send(iq)
def set_private_xml(self, xso)
Store the serialization of `xso` on the server as the private XML data for the namespace of `xso`. :param xso: the XSO whose serialization is send as private XML data.
10.59125
9.309352
1.1377
todo = [asyncio.ensure_future(fut_or_coro) for fut_or_coro in fut_or_coros] if not todo: return [] yield from asyncio.wait(todo) results = [] exceptions = [] for fut in todo: if fut.exception() is not None: exceptions.append(fut.exception()) else: results.append(fut.result()) if exceptions: raise aioxmpp.errors.GatherError(message, exceptions) return results
def gather_reraise_multi(*fut_or_coros, message="gather_reraise_multi")
Wrap all the arguments `fut_or_coros` in futures with :func:`asyncio.ensure_future` and wait until all of them are finish or fail. :param fut_or_coros: the futures or coroutines to wait for :type fut_or_coros: future or coroutine :param message: the message included with the raised :class:`aioxmpp.errrors.GatherError` in the case of failure. :type message: :class:`str` :returns: the list of the results of the arguments. :raises aioxmpp.errors.GatherError: if any of the futures or coroutines fail. If an exception was raised, reraise all exceptions wrapped in a :class:`aioxmpp.errors.GatherError` with the message set to `message`. .. note:: This is similar to the standard function :func:`asyncio.gather`, but avoids the in-band signalling of raised exceptions as return values, by raising exceptions bundled as a :class:`aioxmpp.errors.GatherError`. .. note:: Use this function only if you are either a) not interested in the return values, or b) only interested in the return values if all futures are successful.
2.43234
2.434556
0.99909
if isinstance(rand_token, int): rand_token = rand_token.to_bytes( (rand_token.bit_length() + 7) // 8, "little" ) e = base64.urlsafe_b64encode(rand_token).rstrip(b"=").decode("ascii") return ":" + e if isinstance(rand_token, bytes): e = base64.urlsafe_b64encode(rand_token).rstrip(b"=").decode("ascii") if not e: e = "." return e raise TypeError("rand_token musst be a bytes or int instance")
def to_nmtoken(rand_token)
Convert a (random) token given as raw :class:`bytes` or :class:`int` to a valid NMTOKEN <https://www.w3.org/TR/xml/#NT-Nmtoken>. The encoding as a valid nmtoken is injective, ensuring that two different inputs cannot yield the same token. Nevertheless, it is recommended to only use one kind of inputs (integers or bytes of a consistent length) in one context.
2.396914
2.31334
1.036127
return self.localizable_string.localize( formatter, translator, *self.args, **self.kwargs )
def localize(self, formatter, translator)
Return a localized version of the `localizable_string` passed to the consturctor. It is formatted using the `formatter` with the `args` and `kwargs` passed to the constructor of :class:`UserError`.
4.90948
3.033066
1.618653
res = yield from self._private_xml.get_private_xml( bookmark_xso.Storage() ) return res.registered_payload.bookmarks
def _get_bookmarks(self)
Get the stored bookmarks from the server. :returns: a list of bookmarks
43.368954
43.282703
1.001993
storage = bookmark_xso.Storage() storage.bookmarks[:] = bookmarks yield from self._private_xml.set_private_xml(storage)
def _set_bookmarks(self, bookmarks)
Set the bookmarks stored on the server.
25.435946
21.795998
1.167001
self.logger.debug("diffing %s, %s", self._bookmark_cache, new_bookmarks) def subdivide(level, old, new): if len(old) == len(new) == 1: old_entry = old.pop() new_entry = new.pop() if old_entry == new_entry: pass else: self.on_bookmark_changed(old_entry, new_entry) return ([], []) elif len(old) == 0: return ([], new) elif len(new) == 0: return (old, []) else: try: groups = {} for entry in old: group = groups.setdefault( entry.secondary[level], ([], []) ) group[0].append(entry) for entry in new: group = groups.setdefault( entry.secondary[level], ([], []) ) group[1].append(entry) except IndexError: # the classification is exhausted, this means # all entries in this bin are equal by the # defininition of bookmark equivalence! common = min(len(old), len(new)) assert old[:common] == new[:common] return (old[common:], new[common:]) old_unhandled, new_unhandled = [], [] for old, new in groups.values(): unhandled = subdivide(level+1, old, new) old_unhandled += unhandled[0] new_unhandled += unhandled[1] # match up unhandleds as changes as early as possible i = -1 for i, (old_entry, new_entry) in enumerate( zip(old_unhandled, new_unhandled)): self.logger.debug("changed %s -> %s", old_entry, new_entry) self.on_bookmark_changed(old_entry, new_entry) i += 1 return old_unhandled[i:], new_unhandled[i:] # group the bookmarks into groups whose elements may transform # among one another by on_bookmark_changed events. This information # is given by the type of the bookmark and the .primary property changable_groups = {} for item in self._bookmark_cache: group = changable_groups.setdefault( (type(item), item.primary), ([], []) ) group[0].append(item) for item in new_bookmarks: group = changable_groups.setdefault( (type(item), item.primary), ([], []) ) group[1].append(item) for old, new in changable_groups.values(): # the first branches are fast paths which should catch # most cases – especially all cases where each bare jid of # a conference bookmark or each url of an url bookmark is # only used in one bookmark if len(old) == len(new) == 1: old_entry = old.pop() new_entry = new.pop() if old_entry == new_entry: # the bookmark is unchanged, do not emit an event pass else: self.logger.debug("changed %s -> %s", old_entry, new_entry) self.on_bookmark_changed(old_entry, new_entry) elif len(new) == 0: for removed in old: self.logger.debug("removed %s", removed) self.on_bookmark_removed(removed) elif len(old) == 0: for added in new: self.logger.debug("added %s", added) self.on_bookmark_added(added) else: old, new = subdivide(0, old, new) assert len(old) == 0 or len(new) == 0 for removed in old: self.logger.debug("removed %s", removed) self.on_bookmark_removed(removed) for added in new: self.logger.debug("added %s", added) self.on_bookmark_added(added) self._bookmark_cache = new_bookmarks
def _diff_emit_update(self, new_bookmarks)
Diff the bookmark cache and the new bookmark state, emit signals as needed and set the bookmark cache to the new data.
2.79608
2.761167
1.012644
with (yield from self._lock): bookmarks = yield from self._get_bookmarks() self._diff_emit_update(bookmarks) return bookmarks
def get_bookmarks(self)
Get the stored bookmarks from the server. Causes signals to be fired to reflect the changes. :returns: a list of bookmarks
9.319599
12.63365
0.737681
with (yield from self._lock): yield from self._set_bookmarks(bookmarks) self._diff_emit_update(bookmarks)
def set_bookmarks(self, bookmarks)
Store the sequence of bookmarks `bookmarks`. Causes signals to be fired to reflect the changes. .. note:: This should normally not be used. It does not mitigate the race condition between clients concurrently modifying the bookmarks and may lead to data loss. Use :meth:`add_bookmark`, :meth:`discard_bookmark` and :meth:`update_bookmark` instead. This method still has use-cases (modifying the bookmarklist at large, e.g. by syncing the remote store with local data).
11.640615
11.982989
0.971428
with (yield from self._lock): bookmarks = yield from self._get_bookmarks() try: modified_bookmarks = list(bookmarks) if new_bookmark not in bookmarks: modified_bookmarks.append(new_bookmark) yield from self._set_bookmarks(modified_bookmarks) retries = 0 bookmarks = yield from self._get_bookmarks() while retries < max_retries: if new_bookmark in bookmarks: break modified_bookmarks = list(bookmarks) modified_bookmarks.append(new_bookmark) yield from self._set_bookmarks(modified_bookmarks) bookmarks = yield from self._get_bookmarks() retries += 1 if new_bookmark not in bookmarks: raise RuntimeError("Could not add bookmark") finally: self._diff_emit_update(bookmarks)
def add_bookmark(self, new_bookmark, *, max_retries=3)
Add a bookmark and check whether it was successfully added to the bookmark list. Already existant bookmarks are not added twice. :param new_bookmark: the bookmark to add :type new_bookmark: an instance of :class:`~bookmark_xso.Bookmark` :param max_retries: the number of retries if setting the bookmark fails :type max_retries: :class:`int` :raises RuntimeError: if the bookmark is not in the bookmark list after `max_retries` retries. After setting the bookmark it is checked, whether the bookmark is in the online storage, if it is not it is tried again at most `max_retries` times to add the bookmark. A :class:`RuntimeError` is raised if the bookmark could not be added successfully after `max_retries`.
2.260977
2.39847
0.942675
with (yield from self._lock): bookmarks = yield from self._get_bookmarks() occurences = bookmarks.count(bookmark_to_remove) try: if not occurences: return modified_bookmarks = list(bookmarks) modified_bookmarks.remove(bookmark_to_remove) yield from self._set_bookmarks(modified_bookmarks) retries = 0 bookmarks = yield from self._get_bookmarks() new_occurences = bookmarks.count(bookmark_to_remove) while retries < max_retries: if new_occurences < occurences: break modified_bookmarks = list(bookmarks) modified_bookmarks.remove(bookmark_to_remove) yield from self._set_bookmarks(modified_bookmarks) bookmarks = yield from self._get_bookmarks() new_occurences = bookmarks.count(bookmark_to_remove) retries += 1 if new_occurences >= occurences: raise RuntimeError("Could not remove bookmark") finally: self._diff_emit_update(bookmarks)
def discard_bookmark(self, bookmark_to_remove, *, max_retries=3)
Remove a bookmark and check it has been removed. :param bookmark_to_remove: the bookmark to remove :type bookmark_to_remove: a :class:`~bookmark_xso.Bookmark` subclass. :param max_retries: the number of retries of removing the bookmark fails. :type max_retries: :class:`int` :raises RuntimeError: if the bookmark is not removed from bookmark list after `max_retries` retries. If there are multiple occurences of the same bookmark exactly one is removed. This does nothing if the bookmarks does not match an existing bookmark according to bookmark-equality. After setting the bookmark it is checked, whether the bookmark is removed in the online storage, if it is not it is tried again at most `max_retries` times to remove the bookmark. A :class:`RuntimeError` is raised if the bookmark could not be removed successfully after `max_retries`.
2.102671
2.17794
0.965441
def replace_bookmark(bookmarks, old, new): modified_bookmarks = list(bookmarks) try: i = bookmarks.index(old) modified_bookmarks[i] = new except ValueError: modified_bookmarks.append(new) return modified_bookmarks with (yield from self._lock): bookmarks = yield from self._get_bookmarks() try: yield from self._set_bookmarks( replace_bookmark(bookmarks, old, new) ) retries = 0 bookmarks = yield from self._get_bookmarks() while retries < max_retries: if new in bookmarks: break yield from self._set_bookmarks( replace_bookmark(bookmarks, old, new) ) bookmarks = yield from self._get_bookmarks() retries += 1 if new not in bookmarks: raise RuntimeError("Cold not update bookmark") finally: self._diff_emit_update(bookmarks)
def update_bookmark(self, old, new, *, max_retries=3)
Update a bookmark and check it was successful. The bookmark matches an existing bookmark `old` according to bookmark equalitiy and replaces it by `new`. The bookmark `new` is added if no bookmark matching `old` exists. :param old: the bookmark to replace :type bookmark_to_remove: a :class:`~bookmark_xso.Bookmark` subclass. :param new: the replacement bookmark :type bookmark_to_remove: a :class:`~bookmark_xso.Bookmark` subclass. :param max_retries: the number of retries of removing the bookmark fails. :type max_retries: :class:`int` :raises RuntimeError: if the bookmark is not in the bookmark list after `max_retries` retries. After replacing the bookmark it is checked, whether the bookmark `new` is in the online storage, if it is not it is tried again at most `max_retries` times to replace the bookmark. A :class:`RuntimeError` is raised if the bookmark could not be replaced successfully after `max_retries`. .. note:: Do not modify a bookmark retrieved from the signals or from :meth:`get_bookmarks` to obtain the bookmark `new`, this will lead to data corruption as they are passed by reference. Instead use :func:`copy.copy` and modify the copy.
2.672424
2.598312
1.028523
for (category, type_), names in self._identities.items(): for lang, name in names.items(): yield category, type_, lang, name if not names: yield category, type_, None, None
def iter_identities(self, stanza=None)
Return an iterator of tuples describing the identities of the node. :param stanza: The IQ request stanza :type stanza: :class:`~aioxmpp.IQ` or :data:`None` :rtype: iterable of (:class:`str`, :class:`str`, :class:`str` or :data:`None`, :class:`str` or :data:`None`) tuples :return: :xep:`30` identities of this node `stanza` can be the :class:`aioxmpp.IQ` stanza of the request. This can be used to hide a node depending on who is asking. If the returned iterable is empty, the :class:`~.DiscoServer` returns an ``<item-not-found/>`` error. `stanza` may be :data:`None` if the identities are queried without a specific request context. In that case, implementors should assume that the result is visible to everybody. .. note:: Subclasses must allow :data:`None` for `stanza` and default it to :data:`None`. Return an iterator which yields tuples consisting of the category, the type, the language code and the name of each identity declared in this :class:`Node`. Both the language code and the name may be :data:`None`, if no names or a name without language code have been declared.
5.522493
3.604571
1.532081
return itertools.chain( iter(self.STATIC_FEATURES), iter(self._features) )
def iter_features(self, stanza=None)
Return an iterator which yields the features of the node. :param stanza: The IQ request stanza :type stanza: :class:`~aioxmpp.IQ` :rtype: iterable of :class:`str` :return: :xep:`30` features of this node `stanza` is the :class:`aioxmpp.IQ` stanza of the request. This can be used to filter the list according to who is asking (not recommended). `stanza` may be :data:`None` if the features are queried without a specific request context. In that case, implementors should assume that the result is visible to everybody. .. note:: Subclasses must allow :data:`None` for `stanza` and default it to :data:`None`. The features are returned as strings. The features demanded by :xep:`30` are always returned.
11.290795
15.399117
0.733211
if var in self._features or var in self.STATIC_FEATURES: raise ValueError("feature already claimed: {!r}".format(var)) self._features.add(var) self.on_info_changed()
def register_feature(self, var)
Register a feature with the namespace variable `var`. If the feature is already registered or part of the default :xep:`30` features, a :class:`ValueError` is raised.
6.200716
6.228513
0.995537