body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
async def pin(self, *, reason: Optional[str]=None) -> None: '|coro|\n\n Pins the message.\n\n You must have the :attr:`~Permissions.manage_messages` permission to do\n this in a non-private channel context.\n\n Parameters\n -----------\n reason: Optional[:class:`str`]\n The reason for pinning the message. Shows up on the audit log.\n\n .. versionadded:: 1.4\n\n Raises\n -------\n Forbidden\n You do not have permissions to pin the message.\n NotFound\n The message or channel was not found or deleted.\n HTTPException\n Pinning the message failed, probably due to the channel\n having more than 50 pinned messages.\n ' (await self._state.http.pin_message(self.channel.id, self.id, reason=reason)) self.pinned = True
1,276,763,971,700,800,000
|coro| Pins the message. You must have the :attr:`~Permissions.manage_messages` permission to do this in a non-private channel context. Parameters ----------- reason: Optional[:class:`str`] The reason for pinning the message. Shows up on the audit log. .. versionadded:: 1.4 Raises ------- Forbidden You do not have permissions to pin the message. NotFound The message or channel was not found or deleted. HTTPException Pinning the message failed, probably due to the channel having more than 50 pinned messages.
discord/message.py
pin
NQN-Discord/discord.py
python
async def pin(self, *, reason: Optional[str]=None) -> None: '|coro|\n\n Pins the message.\n\n You must have the :attr:`~Permissions.manage_messages` permission to do\n this in a non-private channel context.\n\n Parameters\n -----------\n reason: Optional[:class:`str`]\n The reason for pinning the message. Shows up on the audit log.\n\n .. versionadded:: 1.4\n\n Raises\n -------\n Forbidden\n You do not have permissions to pin the message.\n NotFound\n The message or channel was not found or deleted.\n HTTPException\n Pinning the message failed, probably due to the channel\n having more than 50 pinned messages.\n ' (await self._state.http.pin_message(self.channel.id, self.id, reason=reason)) self.pinned = True
async def unpin(self, *, reason: Optional[str]=None) -> None: '|coro|\n\n Unpins the message.\n\n You must have the :attr:`~Permissions.manage_messages` permission to do\n this in a non-private channel context.\n\n Parameters\n -----------\n reason: Optional[:class:`str`]\n The reason for unpinning the message. Shows up on the audit log.\n\n .. versionadded:: 1.4\n\n Raises\n -------\n Forbidden\n You do not have permissions to unpin the message.\n NotFound\n The message or channel was not found or deleted.\n HTTPException\n Unpinning the message failed.\n ' (await self._state.http.unpin_message(self.channel.id, self.id, reason=reason)) self.pinned = False
1,803,090,453,734,832,400
|coro| Unpins the message. You must have the :attr:`~Permissions.manage_messages` permission to do this in a non-private channel context. Parameters ----------- reason: Optional[:class:`str`] The reason for unpinning the message. Shows up on the audit log. .. versionadded:: 1.4 Raises ------- Forbidden You do not have permissions to unpin the message. NotFound The message or channel was not found or deleted. HTTPException Unpinning the message failed.
discord/message.py
unpin
NQN-Discord/discord.py
python
async def unpin(self, *, reason: Optional[str]=None) -> None: '|coro|\n\n Unpins the message.\n\n You must have the :attr:`~Permissions.manage_messages` permission to do\n this in a non-private channel context.\n\n Parameters\n -----------\n reason: Optional[:class:`str`]\n The reason for unpinning the message. Shows up on the audit log.\n\n .. versionadded:: 1.4\n\n Raises\n -------\n Forbidden\n You do not have permissions to unpin the message.\n NotFound\n The message or channel was not found or deleted.\n HTTPException\n Unpinning the message failed.\n ' (await self._state.http.unpin_message(self.channel.id, self.id, reason=reason)) self.pinned = False
async def add_reaction(self, emoji: EmojiInputType, /) -> None: '|coro|\n\n Adds a reaction to the message.\n\n The emoji may be a unicode emoji or a custom guild :class:`Emoji`.\n\n You must have the :attr:`~Permissions.read_message_history` permission\n to use this. If nobody else has reacted to the message using this\n emoji, the :attr:`~Permissions.add_reactions` permission is required.\n\n .. versionchanged:: 2.0\n\n ``emoji`` parameter is now positional-only.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n ------------\n emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]\n The emoji to react with.\n\n Raises\n --------\n HTTPException\n Adding the reaction failed.\n Forbidden\n You do not have the proper permissions to react to the message.\n NotFound\n The emoji you specified was not found.\n TypeError\n The emoji parameter is invalid.\n ' emoji = convert_emoji_reaction(emoji) (await self._state.http.add_reaction(self.channel.id, self.id, emoji))
-5,923,241,129,052,162,000
|coro| Adds a reaction to the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. You must have the :attr:`~Permissions.read_message_history` permission to use this. If nobody else has reacted to the message using this emoji, the :attr:`~Permissions.add_reactions` permission is required. .. versionchanged:: 2.0 ``emoji`` parameter is now positional-only. .. versionchanged:: 2.0 This function will now raise :exc:`TypeError` instead of ``InvalidArgument``. Parameters ------------ emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to react with. Raises -------- HTTPException Adding the reaction failed. Forbidden You do not have the proper permissions to react to the message. NotFound The emoji you specified was not found. TypeError The emoji parameter is invalid.
discord/message.py
add_reaction
NQN-Discord/discord.py
python
async def add_reaction(self, emoji: EmojiInputType, /) -> None: '|coro|\n\n Adds a reaction to the message.\n\n The emoji may be a unicode emoji or a custom guild :class:`Emoji`.\n\n You must have the :attr:`~Permissions.read_message_history` permission\n to use this. If nobody else has reacted to the message using this\n emoji, the :attr:`~Permissions.add_reactions` permission is required.\n\n .. versionchanged:: 2.0\n\n ``emoji`` parameter is now positional-only.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n ------------\n emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]\n The emoji to react with.\n\n Raises\n --------\n HTTPException\n Adding the reaction failed.\n Forbidden\n You do not have the proper permissions to react to the message.\n NotFound\n The emoji you specified was not found.\n TypeError\n The emoji parameter is invalid.\n ' emoji = convert_emoji_reaction(emoji) (await self._state.http.add_reaction(self.channel.id, self.id, emoji))
async def remove_reaction(self, emoji: Union[(EmojiInputType, Reaction)], member: Snowflake) -> None: '|coro|\n\n Remove a reaction by the member from the message.\n\n The emoji may be a unicode emoji or a custom guild :class:`Emoji`.\n\n If the reaction is not your own (i.e. ``member`` parameter is not you) then\n the :attr:`~Permissions.manage_messages` permission is needed.\n\n The ``member`` parameter must represent a member and meet\n the :class:`abc.Snowflake` abc.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n ------------\n emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]\n The emoji to remove.\n member: :class:`abc.Snowflake`\n The member for which to remove the reaction.\n\n Raises\n --------\n HTTPException\n Removing the reaction failed.\n Forbidden\n You do not have the proper permissions to remove the reaction.\n NotFound\n The member or emoji you specified was not found.\n TypeError\n The emoji parameter is invalid.\n ' emoji = convert_emoji_reaction(emoji) if (member.id == self._state.self_id): (await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji)) else: (await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id))
3,879,932,355,644,813,000
|coro| Remove a reaction by the member from the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. If the reaction is not your own (i.e. ``member`` parameter is not you) then the :attr:`~Permissions.manage_messages` permission is needed. The ``member`` parameter must represent a member and meet the :class:`abc.Snowflake` abc. .. versionchanged:: 2.0 This function will now raise :exc:`TypeError` instead of ``InvalidArgument``. Parameters ------------ emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to remove. member: :class:`abc.Snowflake` The member for which to remove the reaction. Raises -------- HTTPException Removing the reaction failed. Forbidden You do not have the proper permissions to remove the reaction. NotFound The member or emoji you specified was not found. TypeError The emoji parameter is invalid.
discord/message.py
remove_reaction
NQN-Discord/discord.py
python
async def remove_reaction(self, emoji: Union[(EmojiInputType, Reaction)], member: Snowflake) -> None: '|coro|\n\n Remove a reaction by the member from the message.\n\n The emoji may be a unicode emoji or a custom guild :class:`Emoji`.\n\n If the reaction is not your own (i.e. ``member`` parameter is not you) then\n the :attr:`~Permissions.manage_messages` permission is needed.\n\n The ``member`` parameter must represent a member and meet\n the :class:`abc.Snowflake` abc.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n ------------\n emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]\n The emoji to remove.\n member: :class:`abc.Snowflake`\n The member for which to remove the reaction.\n\n Raises\n --------\n HTTPException\n Removing the reaction failed.\n Forbidden\n You do not have the proper permissions to remove the reaction.\n NotFound\n The member or emoji you specified was not found.\n TypeError\n The emoji parameter is invalid.\n ' emoji = convert_emoji_reaction(emoji) if (member.id == self._state.self_id): (await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji)) else: (await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id))
async def clear_reaction(self, emoji: Union[(EmojiInputType, Reaction)]) -> None: '|coro|\n\n Clears a specific reaction from the message.\n\n The emoji may be a unicode emoji or a custom guild :class:`Emoji`.\n\n You need the :attr:`~Permissions.manage_messages` permission to use this.\n\n .. versionadded:: 1.3\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n -----------\n emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]\n The emoji to clear.\n\n Raises\n --------\n HTTPException\n Clearing the reaction failed.\n Forbidden\n You do not have the proper permissions to clear the reaction.\n NotFound\n The emoji you specified was not found.\n TypeError\n The emoji parameter is invalid.\n ' emoji = convert_emoji_reaction(emoji) (await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji))
3,887,587,292,013,516,000
|coro| Clears a specific reaction from the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. You need the :attr:`~Permissions.manage_messages` permission to use this. .. versionadded:: 1.3 .. versionchanged:: 2.0 This function will now raise :exc:`TypeError` instead of ``InvalidArgument``. Parameters ----------- emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to clear. Raises -------- HTTPException Clearing the reaction failed. Forbidden You do not have the proper permissions to clear the reaction. NotFound The emoji you specified was not found. TypeError The emoji parameter is invalid.
discord/message.py
clear_reaction
NQN-Discord/discord.py
python
async def clear_reaction(self, emoji: Union[(EmojiInputType, Reaction)]) -> None: '|coro|\n\n Clears a specific reaction from the message.\n\n The emoji may be a unicode emoji or a custom guild :class:`Emoji`.\n\n You need the :attr:`~Permissions.manage_messages` permission to use this.\n\n .. versionadded:: 1.3\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n -----------\n emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]\n The emoji to clear.\n\n Raises\n --------\n HTTPException\n Clearing the reaction failed.\n Forbidden\n You do not have the proper permissions to clear the reaction.\n NotFound\n The emoji you specified was not found.\n TypeError\n The emoji parameter is invalid.\n ' emoji = convert_emoji_reaction(emoji) (await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji))
async def clear_reactions(self) -> None: '|coro|\n\n Removes all the reactions from the message.\n\n You need the :attr:`~Permissions.manage_messages` permission to use this.\n\n Raises\n --------\n HTTPException\n Removing the reactions failed.\n Forbidden\n You do not have the proper permissions to remove all the reactions.\n ' (await self._state.http.clear_reactions(self.channel.id, self.id))
2,889,683,392,026,840,000
|coro| Removes all the reactions from the message. You need the :attr:`~Permissions.manage_messages` permission to use this. Raises -------- HTTPException Removing the reactions failed. Forbidden You do not have the proper permissions to remove all the reactions.
discord/message.py
clear_reactions
NQN-Discord/discord.py
python
async def clear_reactions(self) -> None: '|coro|\n\n Removes all the reactions from the message.\n\n You need the :attr:`~Permissions.manage_messages` permission to use this.\n\n Raises\n --------\n HTTPException\n Removing the reactions failed.\n Forbidden\n You do not have the proper permissions to remove all the reactions.\n ' (await self._state.http.clear_reactions(self.channel.id, self.id))
async def create_thread(self, *, name: str, auto_archive_duration: ThreadArchiveDuration=MISSING, slowmode_delay: Optional[int]=None, reason: Optional[str]=None) -> Thread: "|coro|\n\n Creates a public thread from this message.\n\n You must have :attr:`~discord.Permissions.create_public_threads` in order to\n create a public thread from a message.\n\n The channel this message belongs in must be a :class:`TextChannel`.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n name: :class:`str`\n The name of the thread.\n auto_archive_duration: :class:`int`\n The duration in minutes before a thread is automatically archived for inactivity.\n If not provided, the channel's default auto archive duration is used.\n slowmode_delay: Optional[:class:`int`]\n Specifies the slowmode rate limit for user in this channel, in seconds.\n The maximum value possible is `21600`. By default no slowmode rate limit\n if this is ``None``.\n reason: Optional[:class:`str`]\n The reason for creating a new thread. Shows up on the audit log.\n\n Raises\n -------\n Forbidden\n You do not have permissions to create a thread.\n HTTPException\n Creating the thread failed.\n ValueError\n This message does not have guild info attached.\n\n Returns\n --------\n :class:`.Thread`\n The created thread.\n " if (self.guild is None): raise ValueError('This message does not have guild info attached.') default_auto_archive_duration: ThreadArchiveDuration = getattr(self.channel, 'default_auto_archive_duration', 1440) data = (await self._state.http.start_thread_with_message(self.channel.id, self.id, name=name, auto_archive_duration=(auto_archive_duration or default_auto_archive_duration), rate_limit_per_user=slowmode_delay, reason=reason)) return Thread(guild=self.guild, state=self._state, data=data)
-1,888,505,170,059,735,300
|coro| Creates a public thread from this message. You must have :attr:`~discord.Permissions.create_public_threads` in order to create a public thread from a message. The channel this message belongs in must be a :class:`TextChannel`. .. versionadded:: 2.0 Parameters ----------- name: :class:`str` The name of the thread. auto_archive_duration: :class:`int` The duration in minutes before a thread is automatically archived for inactivity. If not provided, the channel's default auto archive duration is used. slowmode_delay: Optional[:class:`int`] Specifies the slowmode rate limit for user in this channel, in seconds. The maximum value possible is `21600`. By default no slowmode rate limit if this is ``None``. reason: Optional[:class:`str`] The reason for creating a new thread. Shows up on the audit log. Raises ------- Forbidden You do not have permissions to create a thread. HTTPException Creating the thread failed. ValueError This message does not have guild info attached. Returns -------- :class:`.Thread` The created thread.
discord/message.py
create_thread
NQN-Discord/discord.py
python
async def create_thread(self, *, name: str, auto_archive_duration: ThreadArchiveDuration=MISSING, slowmode_delay: Optional[int]=None, reason: Optional[str]=None) -> Thread: "|coro|\n\n Creates a public thread from this message.\n\n You must have :attr:`~discord.Permissions.create_public_threads` in order to\n create a public thread from a message.\n\n The channel this message belongs in must be a :class:`TextChannel`.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n name: :class:`str`\n The name of the thread.\n auto_archive_duration: :class:`int`\n The duration in minutes before a thread is automatically archived for inactivity.\n If not provided, the channel's default auto archive duration is used.\n slowmode_delay: Optional[:class:`int`]\n Specifies the slowmode rate limit for user in this channel, in seconds.\n The maximum value possible is `21600`. By default no slowmode rate limit\n if this is ``None``.\n reason: Optional[:class:`str`]\n The reason for creating a new thread. Shows up on the audit log.\n\n Raises\n -------\n Forbidden\n You do not have permissions to create a thread.\n HTTPException\n Creating the thread failed.\n ValueError\n This message does not have guild info attached.\n\n Returns\n --------\n :class:`.Thread`\n The created thread.\n " if (self.guild is None): raise ValueError('This message does not have guild info attached.') default_auto_archive_duration: ThreadArchiveDuration = getattr(self.channel, 'default_auto_archive_duration', 1440) data = (await self._state.http.start_thread_with_message(self.channel.id, self.id, name=name, auto_archive_duration=(auto_archive_duration or default_auto_archive_duration), rate_limit_per_user=slowmode_delay, reason=reason)) return Thread(guild=self.guild, state=self._state, data=data)
async def reply(self, content: Optional[str]=None, **kwargs: Any) -> Message: '|coro|\n\n A shortcut method to :meth:`.abc.Messageable.send` to reply to the\n :class:`.Message`.\n\n .. versionadded:: 1.6\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` or\n :exc:`ValueError` instead of ``InvalidArgument``.\n\n Raises\n --------\n ~discord.HTTPException\n Sending the message failed.\n ~discord.Forbidden\n You do not have the proper permissions to send the message.\n ValueError\n The ``files`` list is not of the appropriate size\n TypeError\n You specified both ``file`` and ``files``.\n\n Returns\n ---------\n :class:`.Message`\n The message that was sent.\n ' return (await self.channel.send(content, reference=self, **kwargs))
-6,370,809,901,766,997,000
|coro| A shortcut method to :meth:`.abc.Messageable.send` to reply to the :class:`.Message`. .. versionadded:: 1.6 .. versionchanged:: 2.0 This function will now raise :exc:`TypeError` or :exc:`ValueError` instead of ``InvalidArgument``. Raises -------- ~discord.HTTPException Sending the message failed. ~discord.Forbidden You do not have the proper permissions to send the message. ValueError The ``files`` list is not of the appropriate size TypeError You specified both ``file`` and ``files``. Returns --------- :class:`.Message` The message that was sent.
discord/message.py
reply
NQN-Discord/discord.py
python
async def reply(self, content: Optional[str]=None, **kwargs: Any) -> Message: '|coro|\n\n A shortcut method to :meth:`.abc.Messageable.send` to reply to the\n :class:`.Message`.\n\n .. versionadded:: 1.6\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` or\n :exc:`ValueError` instead of ``InvalidArgument``.\n\n Raises\n --------\n ~discord.HTTPException\n Sending the message failed.\n ~discord.Forbidden\n You do not have the proper permissions to send the message.\n ValueError\n The ``files`` list is not of the appropriate size\n TypeError\n You specified both ``file`` and ``files``.\n\n Returns\n ---------\n :class:`.Message`\n The message that was sent.\n ' return (await self.channel.send(content, reference=self, **kwargs))
def to_reference(self, *, fail_if_not_exists: bool=True) -> MessageReference: 'Creates a :class:`~discord.MessageReference` from the current message.\n\n .. versionadded:: 1.6\n\n Parameters\n ----------\n fail_if_not_exists: :class:`bool`\n Whether replying using the message reference should raise :class:`HTTPException`\n if the message no longer exists or Discord could not fetch the message.\n\n .. versionadded:: 1.7\n\n Returns\n ---------\n :class:`~discord.MessageReference`\n The reference to this message.\n ' return MessageReference.from_message(self, fail_if_not_exists=fail_if_not_exists)
-2,354,094,330,619,156,500
Creates a :class:`~discord.MessageReference` from the current message. .. versionadded:: 1.6 Parameters ---------- fail_if_not_exists: :class:`bool` Whether replying using the message reference should raise :class:`HTTPException` if the message no longer exists or Discord could not fetch the message. .. versionadded:: 1.7 Returns --------- :class:`~discord.MessageReference` The reference to this message.
discord/message.py
to_reference
NQN-Discord/discord.py
python
def to_reference(self, *, fail_if_not_exists: bool=True) -> MessageReference: 'Creates a :class:`~discord.MessageReference` from the current message.\n\n .. versionadded:: 1.6\n\n Parameters\n ----------\n fail_if_not_exists: :class:`bool`\n Whether replying using the message reference should raise :class:`HTTPException`\n if the message no longer exists or Discord could not fetch the message.\n\n .. versionadded:: 1.7\n\n Returns\n ---------\n :class:`~discord.MessageReference`\n The reference to this message.\n ' return MessageReference.from_message(self, fail_if_not_exists=fail_if_not_exists)
@utils.cached_slot_property('_cs_raw_mentions') def raw_mentions(self) -> List[int]: 'List[:class:`int`]: A property that returns an array of user IDs matched with\n the syntax of ``<@user_id>`` in the message content.\n\n This allows you to receive the user IDs of mentioned users\n even in a private message context.\n ' return [int(x) for x in re.findall('<@!?([0-9]{15,20})>', self.content)]
-2,450,987,508,572,274,700
List[:class:`int`]: A property that returns an array of user IDs matched with the syntax of ``<@user_id>`` in the message content. This allows you to receive the user IDs of mentioned users even in a private message context.
discord/message.py
raw_mentions
NQN-Discord/discord.py
python
@utils.cached_slot_property('_cs_raw_mentions') def raw_mentions(self) -> List[int]: 'List[:class:`int`]: A property that returns an array of user IDs matched with\n the syntax of ``<@user_id>`` in the message content.\n\n This allows you to receive the user IDs of mentioned users\n even in a private message context.\n ' return [int(x) for x in re.findall('<@!?([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_raw_channel_mentions') def raw_channel_mentions(self) -> List[int]: 'List[:class:`int`]: A property that returns an array of channel IDs matched with\n the syntax of ``<#channel_id>`` in the message content.\n ' return [int(x) for x in re.findall('<#([0-9]{15,20})>', self.content)]
2,727,383,027,906,410,500
List[:class:`int`]: A property that returns an array of channel IDs matched with the syntax of ``<#channel_id>`` in the message content.
discord/message.py
raw_channel_mentions
NQN-Discord/discord.py
python
@utils.cached_slot_property('_cs_raw_channel_mentions') def raw_channel_mentions(self) -> List[int]: 'List[:class:`int`]: A property that returns an array of channel IDs matched with\n the syntax of ``<#channel_id>`` in the message content.\n ' return [int(x) for x in re.findall('<#([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_raw_role_mentions') def raw_role_mentions(self) -> List[int]: 'List[:class:`int`]: A property that returns an array of role IDs matched with\n the syntax of ``<@&role_id>`` in the message content.\n ' return [int(x) for x in re.findall('<@&([0-9]{15,20})>', self.content)]
4,036,653,611,598,352,400
List[:class:`int`]: A property that returns an array of role IDs matched with the syntax of ``<@&role_id>`` in the message content.
discord/message.py
raw_role_mentions
NQN-Discord/discord.py
python
@utils.cached_slot_property('_cs_raw_role_mentions') def raw_role_mentions(self) -> List[int]: 'List[:class:`int`]: A property that returns an array of role IDs matched with\n the syntax of ``<@&role_id>`` in the message content.\n ' return [int(x) for x in re.findall('<@&([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_clean_content') def clean_content(self) -> str: ':class:`str`: A property that returns the content in a "cleaned up"\n manner. This basically means that mentions are transformed\n into the way the client shows it. e.g. ``<#id>`` will transform\n into ``#name``.\n\n This will also transform @everyone and @here mentions into\n non-mentions.\n\n .. note::\n\n This *does not* affect markdown. If you want to escape\n or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown`\n respectively, along with this function.\n ' if self.guild: def resolve_member(id: int) -> str: m = (self.guild.get_member(id) or utils.get(self.mentions, id=id)) return (f'@{m.display_name}' if m else '@deleted-user') def resolve_role(id: int) -> str: r = (self.guild.get_role(id) or utils.get(self.role_mentions, id=id)) return (f'@{r.name}' if r else '@deleted-role') def resolve_channel(id: int) -> str: c = self.guild._resolve_channel(id) return (f'#{c.name}' if c else '#deleted-channel') else: def resolve_member(id: int) -> str: m = utils.get(self.mentions, id=id) return (f'@{m.display_name}' if m else '@deleted-user') def resolve_role(id: int) -> str: return '@deleted-role' def resolve_channel(id: int) -> str: return f'#deleted-channel' transforms = {'@': resolve_member, '@!': resolve_member, '#': resolve_channel, '@&': resolve_role} def repl(match: re.Match) -> str: type = match[1] id = int(match[2]) transformed = transforms[type](id) return transformed result = re.sub('<(@[!&]?|#)([0-9]{15,20})>', repl, self.content) return escape_mentions(result)
-2,081,618,440,603,724,500
:class:`str`: A property that returns the content in a "cleaned up" manner. This basically means that mentions are transformed into the way the client shows it. e.g. ``<#id>`` will transform into ``#name``. This will also transform @everyone and @here mentions into non-mentions. .. note:: This *does not* affect markdown. If you want to escape or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown` respectively, along with this function.
discord/message.py
clean_content
NQN-Discord/discord.py
python
@utils.cached_slot_property('_cs_clean_content') def clean_content(self) -> str: ':class:`str`: A property that returns the content in a "cleaned up"\n manner. This basically means that mentions are transformed\n into the way the client shows it. e.g. ``<#id>`` will transform\n into ``#name``.\n\n This will also transform @everyone and @here mentions into\n non-mentions.\n\n .. note::\n\n This *does not* affect markdown. If you want to escape\n or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown`\n respectively, along with this function.\n ' if self.guild: def resolve_member(id: int) -> str: m = (self.guild.get_member(id) or utils.get(self.mentions, id=id)) return (f'@{m.display_name}' if m else '@deleted-user') def resolve_role(id: int) -> str: r = (self.guild.get_role(id) or utils.get(self.role_mentions, id=id)) return (f'@{r.name}' if r else '@deleted-role') def resolve_channel(id: int) -> str: c = self.guild._resolve_channel(id) return (f'#{c.name}' if c else '#deleted-channel') else: def resolve_member(id: int) -> str: m = utils.get(self.mentions, id=id) return (f'@{m.display_name}' if m else '@deleted-user') def resolve_role(id: int) -> str: return '@deleted-role' def resolve_channel(id: int) -> str: return f'#deleted-channel' transforms = {'@': resolve_member, '@!': resolve_member, '#': resolve_channel, '@&': resolve_role} def repl(match: re.Match) -> str: type = match[1] id = int(match[2]) transformed = transforms[type](id) return transformed result = re.sub('<(@[!&]?|#)([0-9]{15,20})>', repl, self.content) return escape_mentions(result)
@property def created_at(self) -> datetime.datetime: ":class:`datetime.datetime`: The message's creation time in UTC." return utils.snowflake_time(self.id)
2,226,906,350,220,452,000
:class:`datetime.datetime`: The message's creation time in UTC.
discord/message.py
created_at
NQN-Discord/discord.py
python
@property def created_at(self) -> datetime.datetime: return utils.snowflake_time(self.id)
@property def edited_at(self) -> Optional[datetime.datetime]: 'Optional[:class:`datetime.datetime`]: An aware UTC datetime object containing the edited time of the message.' return self._edited_timestamp
-2,929,397,423,306,802,000
Optional[:class:`datetime.datetime`]: An aware UTC datetime object containing the edited time of the message.
discord/message.py
edited_at
NQN-Discord/discord.py
python
@property def edited_at(self) -> Optional[datetime.datetime]: return self._edited_timestamp
def is_system(self) -> bool: ':class:`bool`: Whether the message is a system message.\n\n A system message is a message that is constructed entirely by the Discord API\n in response to something.\n\n .. versionadded:: 1.3\n ' return (self.type not in (MessageType.default, MessageType.reply, MessageType.chat_input_command, MessageType.context_menu_command, MessageType.thread_starter_message))
-3,696,030,808,504,782,300
:class:`bool`: Whether the message is a system message. A system message is a message that is constructed entirely by the Discord API in response to something. .. versionadded:: 1.3
discord/message.py
is_system
NQN-Discord/discord.py
python
def is_system(self) -> bool: ':class:`bool`: Whether the message is a system message.\n\n A system message is a message that is constructed entirely by the Discord API\n in response to something.\n\n .. versionadded:: 1.3\n ' return (self.type not in (MessageType.default, MessageType.reply, MessageType.chat_input_command, MessageType.context_menu_command, MessageType.thread_starter_message))
@utils.cached_slot_property('_cs_system_content') def system_content(self) -> Optional[str]: ':class:`str`: A property that returns the content that is rendered\n regardless of the :attr:`Message.type`.\n\n In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\\,\n this just returns the regular :attr:`Message.content`. Otherwise this\n returns an English message denoting the contents of the system message.\n ' if (self.type is MessageType.default): return self.content if (self.type is MessageType.recipient_add): if (self.channel.type is ChannelType.group): return f'{self.author.name} added {self.mentions[0].name} to the group.' else: return f'{self.author.name} added {self.mentions[0].name} to the thread.' if (self.type is MessageType.recipient_remove): if (self.channel.type is ChannelType.group): return f'{self.author.name} removed {self.mentions[0].name} from the group.' else: return f'{self.author.name} removed {self.mentions[0].name} from the thread.' if (self.type is MessageType.channel_name_change): return f'{self.author.name} changed the channel name: **{self.content}**' if (self.type is MessageType.channel_icon_change): return f'{self.author.name} changed the channel icon.' if (self.type is MessageType.pins_add): return f'{self.author.name} pinned a message to this channel.' if (self.type is MessageType.new_member): formats = ['{0} joined the party.', '{0} is here.', 'Welcome, {0}. We hope you brought pizza.', 'A wild {0} appeared.', '{0} just landed.', '{0} just slid into the server.', '{0} just showed up!', 'Welcome {0}. Say hi!', '{0} hopped into the server.', 'Everyone welcome {0}!', "Glad you're here, {0}.", 'Good to see you, {0}.', 'Yay you made it, {0}!'] created_at_ms = int((self.created_at.timestamp() * 1000)) return formats[(created_at_ms % len(formats))].format(self.author.name) if (self.type is MessageType.premium_guild_subscription): if (not self.content): return f'{self.author.name} just boosted the server!' else: return f'{self.author.name} just boosted the server **{self.content}** times!' if (self.type is MessageType.premium_guild_tier_1): if (not self.content): return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 1!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 1!**' if (self.type is MessageType.premium_guild_tier_2): if (not self.content): return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 2!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 2!**' if (self.type is MessageType.premium_guild_tier_3): if (not self.content): return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 3!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 3!**' if (self.type is MessageType.channel_follow_add): return f'{self.author.name} has added {self.content} to this channel. Its most important updates will show up here.' if (self.type is MessageType.guild_stream): return f'{self.author.name} is live! Now streaming {self.author.activity.name}' if (self.type is MessageType.guild_discovery_disqualified): return 'This server has been removed from Server Discovery because it no longer passes all the requirements. Check Server Settings for more details.' if (self.type is MessageType.guild_discovery_requalified): return 'This server is eligible for Server Discovery again and has been automatically relisted!' if (self.type is MessageType.guild_discovery_grace_period_initial_warning): return 'This server has failed Discovery activity requirements for 1 week. If this server fails for 4 weeks in a row, it will be automatically removed from Discovery.' if (self.type is MessageType.guild_discovery_grace_period_final_warning): return 'This server has failed Discovery activity requirements for 3 weeks in a row. If this server fails for 1 more week, it will be removed from Discovery.' if (self.type is MessageType.thread_created): return f'{self.author.name} started a thread: **{self.content}**. See all **threads**.' if (self.type is MessageType.reply): return self.content if (self.type is MessageType.thread_starter_message): if ((self.reference is None) or (self.reference.resolved is None)): return "Sorry, we couldn't load the first message in this thread" return self.reference.resolved.content if (self.type is MessageType.guild_invite_reminder): return 'Wondering who to invite?\nStart by inviting anyone who can help you build the server!'
-4,320,280,416,202,102,300
:class:`str`: A property that returns the content that is rendered regardless of the :attr:`Message.type`. In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\, this just returns the regular :attr:`Message.content`. Otherwise this returns an English message denoting the contents of the system message.
discord/message.py
system_content
NQN-Discord/discord.py
python
@utils.cached_slot_property('_cs_system_content') def system_content(self) -> Optional[str]: ':class:`str`: A property that returns the content that is rendered\n regardless of the :attr:`Message.type`.\n\n In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\\,\n this just returns the regular :attr:`Message.content`. Otherwise this\n returns an English message denoting the contents of the system message.\n ' if (self.type is MessageType.default): return self.content if (self.type is MessageType.recipient_add): if (self.channel.type is ChannelType.group): return f'{self.author.name} added {self.mentions[0].name} to the group.' else: return f'{self.author.name} added {self.mentions[0].name} to the thread.' if (self.type is MessageType.recipient_remove): if (self.channel.type is ChannelType.group): return f'{self.author.name} removed {self.mentions[0].name} from the group.' else: return f'{self.author.name} removed {self.mentions[0].name} from the thread.' if (self.type is MessageType.channel_name_change): return f'{self.author.name} changed the channel name: **{self.content}**' if (self.type is MessageType.channel_icon_change): return f'{self.author.name} changed the channel icon.' if (self.type is MessageType.pins_add): return f'{self.author.name} pinned a message to this channel.' if (self.type is MessageType.new_member): formats = ['{0} joined the party.', '{0} is here.', 'Welcome, {0}. We hope you brought pizza.', 'A wild {0} appeared.', '{0} just landed.', '{0} just slid into the server.', '{0} just showed up!', 'Welcome {0}. Say hi!', '{0} hopped into the server.', 'Everyone welcome {0}!', "Glad you're here, {0}.", 'Good to see you, {0}.', 'Yay you made it, {0}!'] created_at_ms = int((self.created_at.timestamp() * 1000)) return formats[(created_at_ms % len(formats))].format(self.author.name) if (self.type is MessageType.premium_guild_subscription): if (not self.content): return f'{self.author.name} just boosted the server!' else: return f'{self.author.name} just boosted the server **{self.content}** times!' if (self.type is MessageType.premium_guild_tier_1): if (not self.content): return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 1!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 1!**' if (self.type is MessageType.premium_guild_tier_2): if (not self.content): return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 2!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 2!**' if (self.type is MessageType.premium_guild_tier_3): if (not self.content): return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 3!**' else: return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 3!**' if (self.type is MessageType.channel_follow_add): return f'{self.author.name} has added {self.content} to this channel. Its most important updates will show up here.' if (self.type is MessageType.guild_stream): return f'{self.author.name} is live! Now streaming {self.author.activity.name}' if (self.type is MessageType.guild_discovery_disqualified): return 'This server has been removed from Server Discovery because it no longer passes all the requirements. Check Server Settings for more details.' if (self.type is MessageType.guild_discovery_requalified): return 'This server is eligible for Server Discovery again and has been automatically relisted!' if (self.type is MessageType.guild_discovery_grace_period_initial_warning): return 'This server has failed Discovery activity requirements for 1 week. If this server fails for 4 weeks in a row, it will be automatically removed from Discovery.' if (self.type is MessageType.guild_discovery_grace_period_final_warning): return 'This server has failed Discovery activity requirements for 3 weeks in a row. If this server fails for 1 more week, it will be removed from Discovery.' if (self.type is MessageType.thread_created): return f'{self.author.name} started a thread: **{self.content}**. See all **threads**.' if (self.type is MessageType.reply): return self.content if (self.type is MessageType.thread_starter_message): if ((self.reference is None) or (self.reference.resolved is None)): return "Sorry, we couldn't load the first message in this thread" return self.reference.resolved.content if (self.type is MessageType.guild_invite_reminder): return 'Wondering who to invite?\nStart by inviting anyone who can help you build the server!'
async def edit(self, content: Optional[str]=MISSING, embed: Optional[Embed]=MISSING, embeds: Sequence[Embed]=MISSING, attachments: Sequence[Union[(Attachment, File)]]=MISSING, suppress: bool=False, delete_after: Optional[float]=None, allowed_mentions: Optional[AllowedMentions]=MISSING, view: Optional[View]=MISSING) -> Message: "|coro|\n\n Edits the message.\n\n The content must be able to be transformed into a string via ``str(content)``.\n\n .. versionchanged:: 1.3\n The ``suppress`` keyword-only parameter was added.\n\n .. versionchanged:: 2.0\n Edits are no longer in-place, the newly edited message is returned instead.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n -----------\n content: Optional[:class:`str`]\n The new content to replace the message with.\n Could be ``None`` to remove the content.\n embed: Optional[:class:`Embed`]\n The new embed to replace the original with.\n Could be ``None`` to remove the embed.\n embeds: List[:class:`Embed`]\n The new embeds to replace the original with. Must be a maximum of 10.\n To remove all embeds ``[]`` should be passed.\n\n .. versionadded:: 2.0\n attachments: List[Union[:class:`Attachment`, :class:`File`]]\n A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed\n then all attachments are removed.\n\n .. note::\n\n New files will always appear after current attachments.\n\n .. versionadded:: 2.0\n suppress: :class:`bool`\n Whether to suppress embeds for the message. This removes\n all the embeds if set to ``True``. If set to ``False``\n this brings the embeds back if they were suppressed.\n Using this parameter requires :attr:`~.Permissions.manage_messages`.\n delete_after: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background\n before deleting the message we just edited. If the deletion fails,\n then it is silently ignored.\n allowed_mentions: Optional[:class:`~discord.AllowedMentions`]\n Controls the mentions being processed in this message. If this is\n passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.\n The merging behaviour only overrides attributes that have been explicitly passed\n to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.\n If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`\n are used instead.\n\n .. versionadded:: 1.4\n view: Optional[:class:`~discord.ui.View`]\n The updated view to update this message with. If ``None`` is passed then\n the view is removed.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to suppress a message without permissions or\n edited a message's content or embed that isn't yours.\n TypeError\n You specified both ``embed`` and ``embeds``\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n " if (content is not MISSING): previous_allowed_mentions = self._state.allowed_mentions else: previous_allowed_mentions = None if (suppress is not MISSING): flags = MessageFlags._from_value(self.flags.value) flags.suppress_embeds = suppress else: flags = MISSING if (view is not MISSING): self._state.prevent_view_updates_for(self.id) params = handle_message_parameters(content=content, flags=flags, embed=embed, embeds=embeds, attachments=attachments, view=view, allowed_mentions=allowed_mentions, previous_allowed_mentions=previous_allowed_mentions) data = (await self._state.http.edit_message(self.channel.id, self.id, params=params)) message = Message(state=self._state, channel=self.channel, data=data) if (view and (not view.is_finished())): self._state.store_view(view, self.id) if (delete_after is not None): (await self.delete(delay=delete_after)) return message
712,034,988,315,412,700
|coro| Edits the message. The content must be able to be transformed into a string via ``str(content)``. .. versionchanged:: 1.3 The ``suppress`` keyword-only parameter was added. .. versionchanged:: 2.0 Edits are no longer in-place, the newly edited message is returned instead. .. versionchanged:: 2.0 This function will now raise :exc:`TypeError` instead of ``InvalidArgument``. Parameters ----------- content: Optional[:class:`str`] The new content to replace the message with. Could be ``None`` to remove the content. embed: Optional[:class:`Embed`] The new embed to replace the original with. Could be ``None`` to remove the embed. embeds: List[:class:`Embed`] The new embeds to replace the original with. Must be a maximum of 10. To remove all embeds ``[]`` should be passed. .. versionadded:: 2.0 attachments: List[Union[:class:`Attachment`, :class:`File`]] A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed then all attachments are removed. .. note:: New files will always appear after current attachments. .. versionadded:: 2.0 suppress: :class:`bool` Whether to suppress embeds for the message. This removes all the embeds if set to ``True``. If set to ``False`` this brings the embeds back if they were suppressed. Using this parameter requires :attr:`~.Permissions.manage_messages`. delete_after: Optional[:class:`float`] If provided, the number of seconds to wait in the background before deleting the message we just edited. If the deletion fails, then it is silently ignored. allowed_mentions: Optional[:class:`~discord.AllowedMentions`] Controls the mentions being processed in this message. If this is passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`. The merging behaviour only overrides attributes that have been explicitly passed to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`. If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions` are used instead. .. versionadded:: 1.4 view: Optional[:class:`~discord.ui.View`] The updated view to update this message with. If ``None`` is passed then the view is removed. Raises ------- HTTPException Editing the message failed. Forbidden Tried to suppress a message without permissions or edited a message's content or embed that isn't yours. TypeError You specified both ``embed`` and ``embeds`` Returns -------- :class:`Message` The newly edited message.
discord/message.py
edit
NQN-Discord/discord.py
python
async def edit(self, content: Optional[str]=MISSING, embed: Optional[Embed]=MISSING, embeds: Sequence[Embed]=MISSING, attachments: Sequence[Union[(Attachment, File)]]=MISSING, suppress: bool=False, delete_after: Optional[float]=None, allowed_mentions: Optional[AllowedMentions]=MISSING, view: Optional[View]=MISSING) -> Message: "|coro|\n\n Edits the message.\n\n The content must be able to be transformed into a string via ``str(content)``.\n\n .. versionchanged:: 1.3\n The ``suppress`` keyword-only parameter was added.\n\n .. versionchanged:: 2.0\n Edits are no longer in-place, the newly edited message is returned instead.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n -----------\n content: Optional[:class:`str`]\n The new content to replace the message with.\n Could be ``None`` to remove the content.\n embed: Optional[:class:`Embed`]\n The new embed to replace the original with.\n Could be ``None`` to remove the embed.\n embeds: List[:class:`Embed`]\n The new embeds to replace the original with. Must be a maximum of 10.\n To remove all embeds ``[]`` should be passed.\n\n .. versionadded:: 2.0\n attachments: List[Union[:class:`Attachment`, :class:`File`]]\n A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed\n then all attachments are removed.\n\n .. note::\n\n New files will always appear after current attachments.\n\n .. versionadded:: 2.0\n suppress: :class:`bool`\n Whether to suppress embeds for the message. This removes\n all the embeds if set to ``True``. If set to ``False``\n this brings the embeds back if they were suppressed.\n Using this parameter requires :attr:`~.Permissions.manage_messages`.\n delete_after: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background\n before deleting the message we just edited. If the deletion fails,\n then it is silently ignored.\n allowed_mentions: Optional[:class:`~discord.AllowedMentions`]\n Controls the mentions being processed in this message. If this is\n passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.\n The merging behaviour only overrides attributes that have been explicitly passed\n to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.\n If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`\n are used instead.\n\n .. versionadded:: 1.4\n view: Optional[:class:`~discord.ui.View`]\n The updated view to update this message with. If ``None`` is passed then\n the view is removed.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to suppress a message without permissions or\n edited a message's content or embed that isn't yours.\n TypeError\n You specified both ``embed`` and ``embeds``\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n " if (content is not MISSING): previous_allowed_mentions = self._state.allowed_mentions else: previous_allowed_mentions = None if (suppress is not MISSING): flags = MessageFlags._from_value(self.flags.value) flags.suppress_embeds = suppress else: flags = MISSING if (view is not MISSING): self._state.prevent_view_updates_for(self.id) params = handle_message_parameters(content=content, flags=flags, embed=embed, embeds=embeds, attachments=attachments, view=view, allowed_mentions=allowed_mentions, previous_allowed_mentions=previous_allowed_mentions) data = (await self._state.http.edit_message(self.channel.id, self.id, params=params)) message = Message(state=self._state, channel=self.channel, data=data) if (view and (not view.is_finished())): self._state.store_view(view, self.id) if (delete_after is not None): (await self.delete(delay=delete_after)) return message
async def add_files(self, *files: File) -> Message: "|coro|\n\n Adds new files to the end of the message attachments.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n \\*files: :class:`File`\n New files to add to the message.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to edit a message that isn't yours.\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n " return (await self.edit(attachments=[*self.attachments, *files]))
-3,431,482,641,138,352,600
|coro| Adds new files to the end of the message attachments. .. versionadded:: 2.0 Parameters ----------- \*files: :class:`File` New files to add to the message. Raises ------- HTTPException Editing the message failed. Forbidden Tried to edit a message that isn't yours. Returns -------- :class:`Message` The newly edited message.
discord/message.py
add_files
NQN-Discord/discord.py
python
async def add_files(self, *files: File) -> Message: "|coro|\n\n Adds new files to the end of the message attachments.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n \\*files: :class:`File`\n New files to add to the message.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to edit a message that isn't yours.\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n " return (await self.edit(attachments=[*self.attachments, *files]))
async def remove_attachments(self, *attachments: Attachment) -> Message: "|coro|\n\n Removes attachments from the message.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n \\*attachments: :class:`Attachment`\n Attachments to remove from the message.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to edit a message that isn't yours.\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n " return (await self.edit(attachments=[a for a in self.attachments if (a not in attachments)]))
1,765,899,621,065,355,500
|coro| Removes attachments from the message. .. versionadded:: 2.0 Parameters ----------- \*attachments: :class:`Attachment` Attachments to remove from the message. Raises ------- HTTPException Editing the message failed. Forbidden Tried to edit a message that isn't yours. Returns -------- :class:`Message` The newly edited message.
discord/message.py
remove_attachments
NQN-Discord/discord.py
python
async def remove_attachments(self, *attachments: Attachment) -> Message: "|coro|\n\n Removes attachments from the message.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n \\*attachments: :class:`Attachment`\n Attachments to remove from the message.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to edit a message that isn't yours.\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n " return (await self.edit(attachments=[a for a in self.attachments if (a not in attachments)]))
def get_default_options(max_num_machines: int=1, max_wallclock_seconds: int=1800, with_mpi: bool=False) -> dict: 'Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.\n\n :param max_num_machines: set the number of nodes, default=1\n :param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800\n :param with_mpi: whether to run the calculation with MPI enabled\n ' return {'resources': {'num_machines': int(max_num_machines)}, 'max_wallclock_seconds': int(max_wallclock_seconds), 'withmpi': with_mpi}
231,383,380,334,743,800
Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`. :param max_num_machines: set the number of nodes, default=1 :param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800 :param with_mpi: whether to run the calculation with MPI enabled
aiida_abinit/utils/resources.py
get_default_options
azadoks/aiida-abinit
python
def get_default_options(max_num_machines: int=1, max_wallclock_seconds: int=1800, with_mpi: bool=False) -> dict: 'Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.\n\n :param max_num_machines: set the number of nodes, default=1\n :param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800\n :param with_mpi: whether to run the calculation with MPI enabled\n ' return {'resources': {'num_machines': int(max_num_machines)}, 'max_wallclock_seconds': int(max_wallclock_seconds), 'withmpi': with_mpi}
def seconds_to_timelimit(seconds: int) -> str: 'Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.\n\n :param seconds: time limit in seconds\n :returns: Slurm-notation time limit (hours:minutes:seconds)\n ' days = (seconds // 86400) seconds -= (days * 86400) hours = (seconds // 3600) seconds -= (hours * 3600) minutes = (seconds // 60) seconds -= (minutes * 60) timelimit = '' if (days > 0): timelimit += f'{days}-' if (hours > 0): timelimit += f'{hours:02d}:' timelimit += f'{minutes:02d}:{seconds:02d}' return timelimit
-2,278,181,695,880,247,000
Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`. :param seconds: time limit in seconds :returns: Slurm-notation time limit (hours:minutes:seconds)
aiida_abinit/utils/resources.py
seconds_to_timelimit
azadoks/aiida-abinit
python
def seconds_to_timelimit(seconds: int) -> str: 'Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.\n\n :param seconds: time limit in seconds\n :returns: Slurm-notation time limit (hours:minutes:seconds)\n ' days = (seconds // 86400) seconds -= (days * 86400) hours = (seconds // 3600) seconds -= (hours * 3600) minutes = (seconds // 60) seconds -= (minutes * 60) timelimit = if (days > 0): timelimit += f'{days}-' if (hours > 0): timelimit += f'{hours:02d}:' timelimit += f'{minutes:02d}:{seconds:02d}' return timelimit
def get_data_type(self): '\n Get the type of the field.\n ' return Field.DATA_TYPE_STRING
-6,874,559,618,132,447,000
Get the type of the field.
splunk_eventgen/splunk_app/lib/mod_input/fields.py
get_data_type
kamaljitsingh76/eventgen
python
def get_data_type(self): '\n \n ' return Field.DATA_TYPE_STRING
def __init__(self, name, title, description, required_on_create=True, required_on_edit=False): '\n Create the field.\n\n Arguments:\n name -- Set the name of the field (e.g. "database_server")\n title -- Set the human readable title (e.g. "Database server")\n description -- Set the human-readable description of the field\n (e.g. "The IP or domain name of the database server")\n required_on_create -- If "true", the parameter is required on input stanza creation.\n required_on_edit -- If "true", the parameter is required on input stanza modification.\n\n Default values for required_on_create and required_on_edit match the\n documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.\n ' if ((name is None) or (len(name.strip()) == 0)): raise ValueError('The name parameter cannot be empty.') if ((title is None) or (len(title.strip()) == 0)): raise ValueError('The title parameter cannot be empty.') if ((description is None) or (len(description.strip()) == 0)): raise ValueError('The description parameter cannot be empty.') self.name = name self.title = title self.description = description self.required_on_create = required_on_create self.required_on_edit = required_on_edit
-2,107,707,271,251,261,400
Create the field. Arguments: name -- Set the name of the field (e.g. "database_server") title -- Set the human readable title (e.g. "Database server") description -- Set the human-readable description of the field (e.g. "The IP or domain name of the database server") required_on_create -- If "true", the parameter is required on input stanza creation. required_on_edit -- If "true", the parameter is required on input stanza modification. Default values for required_on_create and required_on_edit match the documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
splunk_eventgen/splunk_app/lib/mod_input/fields.py
__init__
kamaljitsingh76/eventgen
python
def __init__(self, name, title, description, required_on_create=True, required_on_edit=False): '\n Create the field.\n\n Arguments:\n name -- Set the name of the field (e.g. "database_server")\n title -- Set the human readable title (e.g. "Database server")\n description -- Set the human-readable description of the field\n (e.g. "The IP or domain name of the database server")\n required_on_create -- If "true", the parameter is required on input stanza creation.\n required_on_edit -- If "true", the parameter is required on input stanza modification.\n\n Default values for required_on_create and required_on_edit match the\n documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.\n ' if ((name is None) or (len(name.strip()) == 0)): raise ValueError('The name parameter cannot be empty.') if ((title is None) or (len(title.strip()) == 0)): raise ValueError('The title parameter cannot be empty.') if ((description is None) or (len(description.strip()) == 0)): raise ValueError('The description parameter cannot be empty.') self.name = name self.title = title self.description = description self.required_on_create = required_on_create self.required_on_edit = required_on_edit
def to_python(self, value): '\n Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.\n\n Arguments:\n value -- The value to convert\n ' return value
-3,484,464,218,273,584,600
Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid. Arguments: value -- The value to convert
splunk_eventgen/splunk_app/lib/mod_input/fields.py
to_python
kamaljitsingh76/eventgen
python
def to_python(self, value): '\n Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.\n\n Arguments:\n value -- The value to convert\n ' return value
def to_string(self, value): '\n Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is\n invalid.\n\n Arguments:\n value -- The value to convert\n ' return str(value)
6,424,125,471,210,500,000
Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is invalid. Arguments: value -- The value to convert
splunk_eventgen/splunk_app/lib/mod_input/fields.py
to_string
kamaljitsingh76/eventgen
python
def to_string(self, value): '\n Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is\n invalid.\n\n Arguments:\n value -- The value to convert\n ' return str(value)
def parse_cron(self, value): 'Check for valid cron string.' fields = value.split() if ((len(fields) == 5) and all([self.cron_rx.match(i) for i in fields])): return True return False
1,671,326,357,499,886,000
Check for valid cron string.
splunk_eventgen/splunk_app/lib/mod_input/fields.py
parse_cron
kamaljitsingh76/eventgen
python
def parse_cron(self, value): fields = value.split() if ((len(fields) == 5) and all([self.cron_rx.match(i) for i in fields])): return True return False
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True): '\n\n Parameters\n ----------\n documents : iterable of iterable of str\n Iterable of documents, if given - use them to initialization.\n id_range : int, optional\n Number of hash-values in table, used as `id = myhash(key) % id_range`.\n myhash : function\n Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.\n debug : bool\n If True - store raw tokens mapping (as str <-> id).\n If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.\n\n ' self.myhash = myhash self.id_range = id_range self.debug = debug self.token2id = {} self.id2token = {} self.dfs = {} self.dfs_debug = {} self.num_docs = 0 self.num_pos = 0 self.num_nnz = 0 self.allow_update = True if (documents is not None): self.add_documents(documents)
-2,451,617,401,247,219,700
Parameters ---------- documents : iterable of iterable of str Iterable of documents, if given - use them to initialization. id_range : int, optional Number of hash-values in table, used as `id = myhash(key) % id_range`. myhash : function Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default. debug : bool If True - store raw tokens mapping (as str <-> id). If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
gensim/gensim/corpora/hashdictionary.py
__init__
Abas-Khan/thesis
python
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True): '\n\n Parameters\n ----------\n documents : iterable of iterable of str\n Iterable of documents, if given - use them to initialization.\n id_range : int, optional\n Number of hash-values in table, used as `id = myhash(key) % id_range`.\n myhash : function\n Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.\n debug : bool\n If True - store raw tokens mapping (as str <-> id).\n If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.\n\n ' self.myhash = myhash self.id_range = id_range self.debug = debug self.token2id = {} self.id2token = {} self.dfs = {} self.dfs_debug = {} self.num_docs = 0 self.num_pos = 0 self.num_nnz = 0 self.allow_update = True if (documents is not None): self.add_documents(documents)
def __getitem__(self, tokenid): 'Get all words that have mapped to the given id so far, as a set.\n\n Warnings\n --------\n Works only if `debug=True`.\n\n Parameters\n ----------\n tokenid : int\n Token identifier (result of hashing).\n\n Return\n ------\n set of str\n Set of all corresponding words.\n\n ' return self.id2token.get(tokenid, set())
7,491,187,019,800,647,000
Get all words that have mapped to the given id so far, as a set. Warnings -------- Works only if `debug=True`. Parameters ---------- tokenid : int Token identifier (result of hashing). Return ------ set of str Set of all corresponding words.
gensim/gensim/corpora/hashdictionary.py
__getitem__
Abas-Khan/thesis
python
def __getitem__(self, tokenid): 'Get all words that have mapped to the given id so far, as a set.\n\n Warnings\n --------\n Works only if `debug=True`.\n\n Parameters\n ----------\n tokenid : int\n Token identifier (result of hashing).\n\n Return\n ------\n set of str\n Set of all corresponding words.\n\n ' return self.id2token.get(tokenid, set())
def restricted_hash(self, token): 'Calculate id of the given token.\n Also keep track of what words were mapped to what ids, for debugging reasons.\n\n Parameters\n ----------\n token : str\n Input token.\n\n Return\n ------\n int\n Hash value of `token`.\n\n ' h = (self.myhash(utils.to_utf8(token)) % self.id_range) if self.debug: self.token2id[token] = h self.id2token.setdefault(h, set()).add(token) return h
-3,578,973,074,447,717,400
Calculate id of the given token. Also keep track of what words were mapped to what ids, for debugging reasons. Parameters ---------- token : str Input token. Return ------ int Hash value of `token`.
gensim/gensim/corpora/hashdictionary.py
restricted_hash
Abas-Khan/thesis
python
def restricted_hash(self, token): 'Calculate id of the given token.\n Also keep track of what words were mapped to what ids, for debugging reasons.\n\n Parameters\n ----------\n token : str\n Input token.\n\n Return\n ------\n int\n Hash value of `token`.\n\n ' h = (self.myhash(utils.to_utf8(token)) % self.id_range) if self.debug: self.token2id[token] = h self.id2token.setdefault(h, set()).add(token) return h
def __len__(self): 'Get the number of distinct ids = the entire dictionary size.' return self.id_range
3,453,205,310,461,073,400
Get the number of distinct ids = the entire dictionary size.
gensim/gensim/corpora/hashdictionary.py
__len__
Abas-Khan/thesis
python
def __len__(self): return self.id_range
def keys(self): 'Get a list of all token ids.' return range(len(self))
2,063,100,109,681,629,000
Get a list of all token ids.
gensim/gensim/corpora/hashdictionary.py
keys
Abas-Khan/thesis
python
def keys(self): return range(len(self))
def add_documents(self, documents): 'Build dictionary from a collection of documents.\n\n Notes\n -----\n This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.\n\n Parameters\n ----------\n documents : iterable of list of str\n Collection of documents.\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> dct = HashDictionary(corpus)\n >>> "sparta" in dct.token2id\n False\n >>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary\n >>> "sparta" in dct.token2id\n True\n\n ' for (docno, document) in enumerate(documents): if ((docno % 10000) == 0): logger.info('adding document #%i to %s', docno, self) self.doc2bow(document, allow_update=True) logger.info('built %s from %i documents (total %i corpus positions)', self, self.num_docs, self.num_pos)
4,811,325,091,924,992,000
Build dictionary from a collection of documents. Notes ----- This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`. Parameters ---------- documents : iterable of list of str Collection of documents. Examples -------- >>> from gensim.corpora import HashDictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = HashDictionary(corpus) >>> "sparta" in dct.token2id False >>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary >>> "sparta" in dct.token2id True
gensim/gensim/corpora/hashdictionary.py
add_documents
Abas-Khan/thesis
python
def add_documents(self, documents): 'Build dictionary from a collection of documents.\n\n Notes\n -----\n This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.\n\n Parameters\n ----------\n documents : iterable of list of str\n Collection of documents.\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> dct = HashDictionary(corpus)\n >>> "sparta" in dct.token2id\n False\n >>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary\n >>> "sparta" in dct.token2id\n True\n\n ' for (docno, document) in enumerate(documents): if ((docno % 10000) == 0): logger.info('adding document #%i to %s', docno, self) self.doc2bow(document, allow_update=True) logger.info('built %s from %i documents (total %i corpus positions)', self, self.num_docs, self.num_pos)
def doc2bow(self, document, allow_update=False, return_missing=False): 'Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].\n\n Notes\n -----\n Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing\n is done on the words in `document` (apply tokenization, stemming etc) before calling this method.\n\n If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall\n corpus statistics and document frequencies. For each id appearing in this document, increase its document\n frequency (`self.dfs`) by one.\n\n Parameters\n ----------\n document : list of str\n Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).\n allow_update : bool, optional\n If True - update dictionary in the process.\n return_missing : bool, optional\n Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.\n\n Return\n ------\n list of (int, int)\n Document in Bag-of-words (BoW) format.\n list of (int, int), dict\n If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> dct = HashDictionary(corpus)\n >>> dct.doc2bow(["this","is","máma"])\n [(1721, 1), (5280, 1), (22493, 1)]\n >>> dct.doc2bow(["this","is","máma"], return_missing=True)\n ([(1721, 1), (5280, 1), (22493, 1)], {})\n\n ' result = {} missing = {} document = sorted(document) for (word_norm, group) in itertools.groupby(document): frequency = len(list(group)) tokenid = self.restricted_hash(word_norm) result[tokenid] = (result.get(tokenid, 0) + frequency) if self.debug: self.dfs_debug[word_norm] = (self.dfs_debug.get(word_norm, 0) + 1) if (allow_update or self.allow_update): self.num_docs += 1 self.num_pos += len(document) self.num_nnz += len(result) if self.debug: for tokenid in iterkeys(result): self.dfs[tokenid] = (self.dfs.get(tokenid, 0) + 1) result = sorted(iteritems(result)) if return_missing: return (result, missing) else: return result
-1,872,236,048,489,406,500
Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)]. Notes ----- Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing is done on the words in `document` (apply tokenization, stemming etc) before calling this method. If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall corpus statistics and document frequencies. For each id appearing in this document, increase its document frequency (`self.dfs`) by one. Parameters ---------- document : list of str Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode). allow_update : bool, optional If True - update dictionary in the process. return_missing : bool, optional Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK. Return ------ list of (int, int) Document in Bag-of-words (BoW) format. list of (int, int), dict If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary. Examples -------- >>> from gensim.corpora import HashDictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = HashDictionary(corpus) >>> dct.doc2bow(["this","is","máma"]) [(1721, 1), (5280, 1), (22493, 1)] >>> dct.doc2bow(["this","is","máma"], return_missing=True) ([(1721, 1), (5280, 1), (22493, 1)], {})
gensim/gensim/corpora/hashdictionary.py
doc2bow
Abas-Khan/thesis
python
def doc2bow(self, document, allow_update=False, return_missing=False): 'Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].\n\n Notes\n -----\n Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing\n is done on the words in `document` (apply tokenization, stemming etc) before calling this method.\n\n If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall\n corpus statistics and document frequencies. For each id appearing in this document, increase its document\n frequency (`self.dfs`) by one.\n\n Parameters\n ----------\n document : list of str\n Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).\n allow_update : bool, optional\n If True - update dictionary in the process.\n return_missing : bool, optional\n Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.\n\n Return\n ------\n list of (int, int)\n Document in Bag-of-words (BoW) format.\n list of (int, int), dict\n If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> dct = HashDictionary(corpus)\n >>> dct.doc2bow(["this","is","máma"])\n [(1721, 1), (5280, 1), (22493, 1)]\n >>> dct.doc2bow(["this","is","máma"], return_missing=True)\n ([(1721, 1), (5280, 1), (22493, 1)], {})\n\n ' result = {} missing = {} document = sorted(document) for (word_norm, group) in itertools.groupby(document): frequency = len(list(group)) tokenid = self.restricted_hash(word_norm) result[tokenid] = (result.get(tokenid, 0) + frequency) if self.debug: self.dfs_debug[word_norm] = (self.dfs_debug.get(word_norm, 0) + 1) if (allow_update or self.allow_update): self.num_docs += 1 self.num_pos += len(document) self.num_nnz += len(result) if self.debug: for tokenid in iterkeys(result): self.dfs[tokenid] = (self.dfs.get(tokenid, 0) + 1) result = sorted(iteritems(result)) if return_missing: return (result, missing) else: return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000): 'Filter tokens in dictionary by frequency.\n\n Parameters\n ----------\n no_below : int, optional\n Keep tokens which are contained in at least `no_below` documents.\n no_above : float, optional\n Keep tokens which are contained in no more than `no_above` documents\n (fraction of total corpus size, not an absolute number).\n keep_n : int, optional\n Keep only the first `keep_n` most frequent tokens.\n\n Notes\n -----\n For tokens that appear in:\n\n #. Less than `no_below` documents (absolute number) or \n\n #. More than `no_above` documents (fraction of total corpus size, **not absolute number**).\n #. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).\n\n Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn\'t depend on the number\n of tokens seen, this doesn\'t really "remove" anything.\n It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> dct = HashDictionary(corpus)\n >>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)\n >>> print dct.token2id\n {\'maso\': 15025}\n\n ' no_above_abs = int((no_above * self.num_docs)) ok = [item for item in iteritems(self.dfs_debug) if (no_below <= item[1] <= no_above_abs)] ok = frozenset((word for (word, freq) in sorted(ok, key=(lambda x: (- x[1])))[:keep_n])) self.dfs_debug = {word: freq for (word, freq) in iteritems(self.dfs_debug) if (word in ok)} self.token2id = {token: tokenid for (token, tokenid) in iteritems(self.token2id) if (token in self.dfs_debug)} self.id2token = {tokenid: {token for token in tokens if (token in self.dfs_debug)} for (tokenid, tokens) in iteritems(self.id2token)} self.dfs = {tokenid: freq for (tokenid, freq) in iteritems(self.dfs) if self.id2token.get(tokenid, set())} logger.info('kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents', no_below, no_above_abs, (100.0 * no_above))
-678,942,940,960,847,700
Filter tokens in dictionary by frequency. Parameters ---------- no_below : int, optional Keep tokens which are contained in at least `no_below` documents. no_above : float, optional Keep tokens which are contained in no more than `no_above` documents (fraction of total corpus size, not an absolute number). keep_n : int, optional Keep only the first `keep_n` most frequent tokens. Notes ----- For tokens that appear in: #. Less than `no_below` documents (absolute number) or #. More than `no_above` documents (fraction of total corpus size, **not absolute number**). #. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`). Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number of tokens seen, this doesn't really "remove" anything. It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint. Examples -------- >>> from gensim.corpora import HashDictionary >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> dct = HashDictionary(corpus) >>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1) >>> print dct.token2id {'maso': 15025}
gensim/gensim/corpora/hashdictionary.py
filter_extremes
Abas-Khan/thesis
python
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000): 'Filter tokens in dictionary by frequency.\n\n Parameters\n ----------\n no_below : int, optional\n Keep tokens which are contained in at least `no_below` documents.\n no_above : float, optional\n Keep tokens which are contained in no more than `no_above` documents\n (fraction of total corpus size, not an absolute number).\n keep_n : int, optional\n Keep only the first `keep_n` most frequent tokens.\n\n Notes\n -----\n For tokens that appear in:\n\n #. Less than `no_below` documents (absolute number) or \n\n #. More than `no_above` documents (fraction of total corpus size, **not absolute number**).\n #. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).\n\n Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn\'t depend on the number\n of tokens seen, this doesn\'t really "remove" anything.\n It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> dct = HashDictionary(corpus)\n >>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)\n >>> print dct.token2id\n {\'maso\': 15025}\n\n ' no_above_abs = int((no_above * self.num_docs)) ok = [item for item in iteritems(self.dfs_debug) if (no_below <= item[1] <= no_above_abs)] ok = frozenset((word for (word, freq) in sorted(ok, key=(lambda x: (- x[1])))[:keep_n])) self.dfs_debug = {word: freq for (word, freq) in iteritems(self.dfs_debug) if (word in ok)} self.token2id = {token: tokenid for (token, tokenid) in iteritems(self.token2id) if (token in self.dfs_debug)} self.id2token = {tokenid: {token for token in tokens if (token in self.dfs_debug)} for (tokenid, tokens) in iteritems(self.id2token)} self.dfs = {tokenid: freq for (tokenid, freq) in iteritems(self.dfs) if self.id2token.get(tokenid, set())} logger.info('kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents', no_below, no_above_abs, (100.0 * no_above))
def save_as_text(self, fname): 'Save this HashDictionary to a text file.\n\n Parameters\n ----------\n fname : str\n Path to output file.\n\n Notes\n -----\n The format is:\n `id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.\n\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>> from gensim.test.utils import get_tmpfile\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> data = HashDictionary(corpus)\n >>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))\n\n ' logger.info(('saving HashDictionary mapping to %s' % fname)) with utils.smart_open(fname, 'wb') as fout: for tokenid in self.keys(): words = sorted(self[tokenid]) if words: words_df = [(word, self.dfs_debug.get(word, 0)) for word in words] words_df = [('%s(%i)' % item) for item in sorted(words_df, key=(lambda x: (- x[1])))] words_df = '\t'.join(words_df) fout.write(utils.to_utf8(('%i\t%i\t%s\n' % (tokenid, self.dfs.get(tokenid, 0), words_df))))
1,265,885,426,637,448,000
Save this HashDictionary to a text file. Parameters ---------- fname : str Path to output file. Notes ----- The format is: `id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`. Examples -------- >>> from gensim.corpora import HashDictionary >>> from gensim.test.utils import get_tmpfile >>> >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]] >>> data = HashDictionary(corpus) >>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
gensim/gensim/corpora/hashdictionary.py
save_as_text
Abas-Khan/thesis
python
def save_as_text(self, fname): 'Save this HashDictionary to a text file.\n\n Parameters\n ----------\n fname : str\n Path to output file.\n\n Notes\n -----\n The format is:\n `id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.\n\n\n Examples\n --------\n >>> from gensim.corpora import HashDictionary\n >>> from gensim.test.utils import get_tmpfile\n >>>\n >>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]\n >>> data = HashDictionary(corpus)\n >>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))\n\n ' logger.info(('saving HashDictionary mapping to %s' % fname)) with utils.smart_open(fname, 'wb') as fout: for tokenid in self.keys(): words = sorted(self[tokenid]) if words: words_df = [(word, self.dfs_debug.get(word, 0)) for word in words] words_df = [('%s(%i)' % item) for item in sorted(words_df, key=(lambda x: (- x[1])))] words_df = '\t'.join(words_df) fout.write(utils.to_utf8(('%i\t%i\t%s\n' % (tokenid, self.dfs.get(tokenid, 0), words_df))))
def dict_url(conf): 'Add all url from file url.ini with\n key = name of the parking end value is\n the url.\n\n :returns: dictionnary with all parking and url\n :rtype: dict\n ' url = configparser.ConfigParser() logging.debug('initializing the variable url') url.read(conf) logging.debug('read the file') logging.debug('all url in file %s', list(url['url'])) res = {} for simple_url in list(url['url']): parking = url['name'][simple_url] link = url['url'][simple_url] adress = url['adress'][simple_url] res[parking] = (link, adress) logging.info('this is the dict with keys and urls %s', res) return res
57,583,237,643,510,530
Add all url from file url.ini with key = name of the parking end value is the url. :returns: dictionnary with all parking and url :rtype: dict
backend/function_park/dict_url.py
dict_url
Mancid/data_parking_montpellier
python
def dict_url(conf): 'Add all url from file url.ini with\n key = name of the parking end value is\n the url.\n\n :returns: dictionnary with all parking and url\n :rtype: dict\n ' url = configparser.ConfigParser() logging.debug('initializing the variable url') url.read(conf) logging.debug('read the file') logging.debug('all url in file %s', list(url['url'])) res = {} for simple_url in list(url['url']): parking = url['name'][simple_url] link = url['url'][simple_url] adress = url['adress'][simple_url] res[parking] = (link, adress) logging.info('this is the dict with keys and urls %s', res) return res
def pad_batch_equal(batch, padding_values: AxesParams=0, ratio: AxesParams=0.5): '\n Pad each element of ``batch`` to obtain a correctly shaped array.\n\n References\n ----------\n `pad_to_shape`\n ' max_shapes = np.max(lmap(np.shape, batch), axis=0) if (max_shapes.size != 0): batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch] return np.array(batch)
-3,535,126,758,089,575,000
Pad each element of ``batch`` to obtain a correctly shaped array. References ---------- `pad_to_shape`
dpipe/batch_iter/utils.py
pad_batch_equal
neuro-ml/deep_pipe
python
def pad_batch_equal(batch, padding_values: AxesParams=0, ratio: AxesParams=0.5): '\n Pad each element of ``batch`` to obtain a correctly shaped array.\n\n References\n ----------\n `pad_to_shape`\n ' max_shapes = np.max(lmap(np.shape, batch), axis=0) if (max_shapes.size != 0): batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch] return np.array(batch)
def unpack_args(func: Callable, *args, **kwargs): '\n Returns a function that takes an iterable and unpacks it while calling ``func``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n\n Examples\n --------\n >>> def add(x, y):\n >>> return x + y\n >>>\n >>> add_ = unpack_args(add)\n >>> add(1, 2) == add_([1, 2])\n >>> True\n ' def wrapper(xs, *args_, **kwargs_): return func(*xs, *args_, *args, **kwargs_, **kwargs) return wrapper
2,034,436,781,968,058,400
Returns a function that takes an iterable and unpacks it while calling ``func``. ``args`` and ``kwargs`` are passed to ``func`` as additional arguments. Examples -------- >>> def add(x, y): >>> return x + y >>> >>> add_ = unpack_args(add) >>> add(1, 2) == add_([1, 2]) >>> True
dpipe/batch_iter/utils.py
unpack_args
neuro-ml/deep_pipe
python
def unpack_args(func: Callable, *args, **kwargs): '\n Returns a function that takes an iterable and unpacks it while calling ``func``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n\n Examples\n --------\n >>> def add(x, y):\n >>> return x + y\n >>>\n >>> add_ = unpack_args(add)\n >>> add(1, 2) == add_([1, 2])\n >>> True\n ' def wrapper(xs, *args_, **kwargs_): return func(*xs, *args_, *args, **kwargs_, **kwargs) return wrapper
def multiply(func: Callable, *args, **kwargs): '\n Returns a function that takes an iterable and maps ``func`` over it.\n Useful when multiple batches require the same function.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n ' def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple: return tuple((func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)) return wrapped
-8,247,229,217,920,018,000
Returns a function that takes an iterable and maps ``func`` over it. Useful when multiple batches require the same function. ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
dpipe/batch_iter/utils.py
multiply
neuro-ml/deep_pipe
python
def multiply(func: Callable, *args, **kwargs): '\n Returns a function that takes an iterable and maps ``func`` over it.\n Useful when multiple batches require the same function.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n ' def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple: return tuple((func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)) return wrapped
def apply_at(index: AxesLike, func: Callable, *args, **kwargs): '\n Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n\n Examples\n --------\n >>> first_sqr = apply_at(0, np.square)\n >>> first_sqr([3, 2, 1])\n >>> (9, 2, 1)\n ' index = set(np.atleast_1d(index).tolist()) def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple: index_ = {((i + len(xs)) if (i < 0) else i) for i in index} for idx in index_: if ((idx < 0) or (idx >= len(xs))): raise IndexError(f'Index {idx} out of bounds.') return tuple(((func(x, *args_, *args, **kwargs_, **kwargs) if (i in index_) else x) for (i, x) in enumerate(xs))) return wrapped
-7,314,694,831,249,120,000
Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``. ``args`` and ``kwargs`` are passed to ``func`` as additional arguments. Examples -------- >>> first_sqr = apply_at(0, np.square) >>> first_sqr([3, 2, 1]) >>> (9, 2, 1)
dpipe/batch_iter/utils.py
apply_at
neuro-ml/deep_pipe
python
def apply_at(index: AxesLike, func: Callable, *args, **kwargs): '\n Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n\n Examples\n --------\n >>> first_sqr = apply_at(0, np.square)\n >>> first_sqr([3, 2, 1])\n >>> (9, 2, 1)\n ' index = set(np.atleast_1d(index).tolist()) def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple: index_ = {((i + len(xs)) if (i < 0) else i) for i in index} for idx in index_: if ((idx < 0) or (idx >= len(xs))): raise IndexError(f'Index {idx} out of bounds.') return tuple(((func(x, *args_, *args, **kwargs_, **kwargs) if (i in index_) else x) for (i, x) in enumerate(xs))) return wrapped
def zip_apply(*functions: Callable, **kwargs): '\n Returns a function that takes an iterable and zips ``functions`` over it.\n\n ``kwargs`` are passed to each function as additional arguments.\n\n Examples\n --------\n >>> zipper = zip_apply(np.square, np.sqrt)\n >>> zipper([4, 9])\n >>> (16, 3)\n ' def wrapped(xs: Sequence, *args, **kwargs_) -> tuple: return tuple((func(x, *args, **kwargs_, **kwargs) for (func, x) in zip(functions, xs))) return wrapped
3,684,211,283,208,276,500
Returns a function that takes an iterable and zips ``functions`` over it. ``kwargs`` are passed to each function as additional arguments. Examples -------- >>> zipper = zip_apply(np.square, np.sqrt) >>> zipper([4, 9]) >>> (16, 3)
dpipe/batch_iter/utils.py
zip_apply
neuro-ml/deep_pipe
python
def zip_apply(*functions: Callable, **kwargs): '\n Returns a function that takes an iterable and zips ``functions`` over it.\n\n ``kwargs`` are passed to each function as additional arguments.\n\n Examples\n --------\n >>> zipper = zip_apply(np.square, np.sqrt)\n >>> zipper([4, 9])\n >>> (16, 3)\n ' def wrapped(xs: Sequence, *args, **kwargs_) -> tuple: return tuple((func(x, *args, **kwargs_, **kwargs) for (func, x) in zip(functions, xs))) return wrapped
def random_apply(p: float, func: Callable, *args, **kwargs): '\n Returns a function that applies ``func`` with a given probability ``p``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n ' def wrapped(*args_, **kwargs_): if np.random.binomial(1, p): return func(*args_, *args, **kwargs_, **kwargs) return squeeze_first(args_) return wrapped
3,067,490,294,753,660,400
Returns a function that applies ``func`` with a given probability ``p``. ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
dpipe/batch_iter/utils.py
random_apply
neuro-ml/deep_pipe
python
def random_apply(p: float, func: Callable, *args, **kwargs): '\n Returns a function that applies ``func`` with a given probability ``p``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n ' def wrapped(*args_, **kwargs_): if np.random.binomial(1, p): return func(*args_, *args, **kwargs_, **kwargs) return squeeze_first(args_) return wrapped
def sample_args(func: Callable, *args: Callable, **kwargs: Callable): '\n Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.\n\n Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.\n\n Examples\n --------\n >>> from scipy.ndimage import rotate\n >>>\n >>> random_rotate = sample_args(rotate, angle=np.random.normal)\n >>> random_rotate(x)\n >>> # same as\n >>> rotate(x, angle=np.random.normal())\n ' def wrapped(*args_, **kwargs_): return func(*args_, *[arg() for arg in args], **kwargs_, **{name: arg() for (name, arg) in kwargs.items()}) return wrapped
7,986,351,789,278,345,000
Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``. Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value. Examples -------- >>> from scipy.ndimage import rotate >>> >>> random_rotate = sample_args(rotate, angle=np.random.normal) >>> random_rotate(x) >>> # same as >>> rotate(x, angle=np.random.normal())
dpipe/batch_iter/utils.py
sample_args
neuro-ml/deep_pipe
python
def sample_args(func: Callable, *args: Callable, **kwargs: Callable): '\n Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.\n\n Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.\n\n Examples\n --------\n >>> from scipy.ndimage import rotate\n >>>\n >>> random_rotate = sample_args(rotate, angle=np.random.normal)\n >>> random_rotate(x)\n >>> # same as\n >>> rotate(x, angle=np.random.normal())\n ' def wrapped(*args_, **kwargs_): return func(*args_, *[arg() for arg in args], **kwargs_, **{name: arg() for (name, arg) in kwargs.items()}) return wrapped
def __init__(self, group=None, load=False, *args, **kwargs): '\n Validate that group is either a string or a tuple of valid entry point groups, or if it\n is not specified use the tuple of all recognized entry point groups.\n ' valid_entry_point_groups = get_entry_point_groups() if (group is None): self._groups = tuple(valid_entry_point_groups) else: if isinstance(group, six.string_types): invalidated_groups = tuple([group]) elif isinstance(group, tuple): invalidated_groups = group else: raise ValueError('invalid type for group') groups = [] for grp in invalidated_groups: if (not grp.startswith(ENTRY_POINT_GROUP_PREFIX)): grp = (ENTRY_POINT_GROUP_PREFIX + grp) if (grp not in valid_entry_point_groups): raise ValueError('entry point group {} is not recognized'.format(grp)) groups.append(grp) self._groups = tuple(groups) self._init_entry_points() self.load = load super(PluginParamType, self).__init__(*args, **kwargs)
4,148,809,400,762,458,000
Validate that group is either a string or a tuple of valid entry point groups, or if it is not specified use the tuple of all recognized entry point groups.
aiida/cmdline/params/types/plugin.py
__init__
DanielMarchand/aiida_core
python
def __init__(self, group=None, load=False, *args, **kwargs): '\n Validate that group is either a string or a tuple of valid entry point groups, or if it\n is not specified use the tuple of all recognized entry point groups.\n ' valid_entry_point_groups = get_entry_point_groups() if (group is None): self._groups = tuple(valid_entry_point_groups) else: if isinstance(group, six.string_types): invalidated_groups = tuple([group]) elif isinstance(group, tuple): invalidated_groups = group else: raise ValueError('invalid type for group') groups = [] for grp in invalidated_groups: if (not grp.startswith(ENTRY_POINT_GROUP_PREFIX)): grp = (ENTRY_POINT_GROUP_PREFIX + grp) if (grp not in valid_entry_point_groups): raise ValueError('entry point group {} is not recognized'.format(grp)) groups.append(grp) self._groups = tuple(groups) self._init_entry_points() self.load = load super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self): '\n Populate entry point information that will be used later on. This should only be called\n once in the constructor after setting self.groups because the groups should not be changed\n after instantiation\n ' self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)] self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
7,916,297,276,349,707,000
Populate entry point information that will be used later on. This should only be called once in the constructor after setting self.groups because the groups should not be changed after instantiation
aiida/cmdline/params/types/plugin.py
_init_entry_points
DanielMarchand/aiida_core
python
def _init_entry_points(self): '\n Populate entry point information that will be used later on. This should only be called\n once in the constructor after setting self.groups because the groups should not be changed\n after instantiation\n ' self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)] self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property def has_potential_ambiguity(self): '\n Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name\n is specified. This will happen if one ore more groups share an entry point with a common name\n ' return (len(self._entry_point_names) != len(set(self._entry_point_names)))
8,702,056,656,915,079,000
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name is specified. This will happen if one ore more groups share an entry point with a common name
aiida/cmdline/params/types/plugin.py
has_potential_ambiguity
DanielMarchand/aiida_core
python
@property def has_potential_ambiguity(self): '\n Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name\n is specified. This will happen if one ore more groups share an entry point with a common name\n ' return (len(self._entry_point_names) != len(set(self._entry_point_names)))
def get_valid_arguments(self): '\n Return a list of all available plugins for the groups configured for this PluginParamType instance.\n If the entry point names are not unique, because there are multiple groups that contain an entry\n point that has an identical name, we need to prefix the names with the full group name\n\n :returns: list of valid entry point strings\n ' if self.has_potential_ambiguity: fmt = EntryPointFormat.FULL return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for (group, ep) in self._entry_points]) return sorted(self._entry_point_names)
-4,401,533,748,211,632,000
Return a list of all available plugins for the groups configured for this PluginParamType instance. If the entry point names are not unique, because there are multiple groups that contain an entry point that has an identical name, we need to prefix the names with the full group name :returns: list of valid entry point strings
aiida/cmdline/params/types/plugin.py
get_valid_arguments
DanielMarchand/aiida_core
python
def get_valid_arguments(self): '\n Return a list of all available plugins for the groups configured for this PluginParamType instance.\n If the entry point names are not unique, because there are multiple groups that contain an entry\n point that has an identical name, we need to prefix the names with the full group name\n\n :returns: list of valid entry point strings\n ' if self.has_potential_ambiguity: fmt = EntryPointFormat.FULL return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for (group, ep) in self._entry_points]) return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''): '\n Return a list of plugins starting with incomplete\n ' if (incomplete == ''): return self.get_valid_arguments() if self.has_potential_ambiguity: possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)] else: possibilites = [] fmt = get_entry_point_string_format(incomplete) for (group, entry_point) in self._entry_points: entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt) if entry_point_string.startswith(incomplete): possibilites.append(entry_point_string) return possibilites
2,020,066,774,342,964,200
Return a list of plugins starting with incomplete
aiida/cmdline/params/types/plugin.py
get_possibilities
DanielMarchand/aiida_core
python
def get_possibilities(self, incomplete=): '\n \n ' if (incomplete == ): return self.get_valid_arguments() if self.has_potential_ambiguity: possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)] else: possibilites = [] fmt = get_entry_point_string_format(incomplete) for (group, entry_point) in self._entry_points: entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt) if entry_point_string.startswith(incomplete): possibilites.append(entry_point_string) return possibilites
def complete(self, ctx, incomplete): '\n Return possible completions based on an incomplete value\n\n :returns: list of tuples of valid entry points (matching incomplete) and a description\n ' return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
-8,842,338,152,452,906,000
Return possible completions based on an incomplete value :returns: list of tuples of valid entry points (matching incomplete) and a description
aiida/cmdline/params/types/plugin.py
complete
DanielMarchand/aiida_core
python
def complete(self, ctx, incomplete): '\n Return possible completions based on an incomplete value\n\n :returns: list of tuples of valid entry points (matching incomplete) and a description\n ' return [(p, ) for p in self.get_possibilities(incomplete=incomplete)]
def get_entry_point_from_string(self, entry_point_string): '\n Validate a given entry point string, which means that it should have a valid entry point string format\n and that the entry point unambiguously corresponds to an entry point in the groups configured for this\n instance of PluginParameterType.\n\n :returns: the entry point if valid\n :raises: ValueError if the entry point string is invalid\n ' group = None name = None entry_point_format = get_entry_point_string_format(entry_point_string) if (entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL)): (group, name) = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR) if (entry_point_format == EntryPointFormat.PARTIAL): group = (ENTRY_POINT_GROUP_PREFIX + group) if (group not in self.groups): raise ValueError('entry point group {} is not supported by this parameter') elif (entry_point_format == EntryPointFormat.MINIMAL): name = entry_point_string matching_groups = [group for (group, entry_point) in self._entry_points if (entry_point.name == name)] if (len(matching_groups) > 1): raise ValueError("entry point '{}' matches more than one valid entry point group [{}], please specify an explicit group prefix".format(name, ' '.join(matching_groups))) elif (not matching_groups): raise ValueError("entry point '{}' is not valid for any of the allowed entry point groups: {}".format(name, ' '.join(self.groups))) else: group = matching_groups[0] else: ValueError('invalid entry point string format: {}'.format(entry_point_string)) try: entry_point = get_entry_point(group, name) except exceptions.EntryPointError as exception: raise ValueError(exception) return entry_point
2,415,760,058,202,702,000
Validate a given entry point string, which means that it should have a valid entry point string format and that the entry point unambiguously corresponds to an entry point in the groups configured for this instance of PluginParameterType. :returns: the entry point if valid :raises: ValueError if the entry point string is invalid
aiida/cmdline/params/types/plugin.py
get_entry_point_from_string
DanielMarchand/aiida_core
python
def get_entry_point_from_string(self, entry_point_string): '\n Validate a given entry point string, which means that it should have a valid entry point string format\n and that the entry point unambiguously corresponds to an entry point in the groups configured for this\n instance of PluginParameterType.\n\n :returns: the entry point if valid\n :raises: ValueError if the entry point string is invalid\n ' group = None name = None entry_point_format = get_entry_point_string_format(entry_point_string) if (entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL)): (group, name) = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR) if (entry_point_format == EntryPointFormat.PARTIAL): group = (ENTRY_POINT_GROUP_PREFIX + group) if (group not in self.groups): raise ValueError('entry point group {} is not supported by this parameter') elif (entry_point_format == EntryPointFormat.MINIMAL): name = entry_point_string matching_groups = [group for (group, entry_point) in self._entry_points if (entry_point.name == name)] if (len(matching_groups) > 1): raise ValueError("entry point '{}' matches more than one valid entry point group [{}], please specify an explicit group prefix".format(name, ' '.join(matching_groups))) elif (not matching_groups): raise ValueError("entry point '{}' is not valid for any of the allowed entry point groups: {}".format(name, ' '.join(self.groups))) else: group = matching_groups[0] else: ValueError('invalid entry point string format: {}'.format(entry_point_string)) try: entry_point = get_entry_point(group, name) except exceptions.EntryPointError as exception: raise ValueError(exception) return entry_point
@decorators.with_dbenv() def convert(self, value, param, ctx): '\n Convert the string value to an entry point instance, if the value can be successfully parsed\n into an actual entry point. Will raise click.BadParameter if validation fails.\n ' if (not value): raise click.BadParameter('plugin name cannot be empty') try: entry_point = self.get_entry_point_from_string(value) except ValueError as exception: raise click.BadParameter(str(exception)) if self.load: try: return entry_point.load() except exceptions.LoadingEntryPointError as exception: raise click.BadParameter(str(exception)) else: return entry_point
-4,141,031,112,145,093,600
Convert the string value to an entry point instance, if the value can be successfully parsed into an actual entry point. Will raise click.BadParameter if validation fails.
aiida/cmdline/params/types/plugin.py
convert
DanielMarchand/aiida_core
python
@decorators.with_dbenv() def convert(self, value, param, ctx): '\n Convert the string value to an entry point instance, if the value can be successfully parsed\n into an actual entry point. Will raise click.BadParameter if validation fails.\n ' if (not value): raise click.BadParameter('plugin name cannot be empty') try: entry_point = self.get_entry_point_from_string(value) except ValueError as exception: raise click.BadParameter(str(exception)) if self.load: try: return entry_point.load() except exceptions.LoadingEntryPointError as exception: raise click.BadParameter(str(exception)) else: return entry_point
async def execute(self, query: str) -> None: "Execute an EdgeQL command (or commands).\n\n Example:\n\n .. code-block:: pycon\n\n >>> await con.execute('''\n ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };\n ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };\n ... ''')\n " (await self._ensure_transaction()) (await self._connection_impl._protocol.simple_query(query, enums.Capability.EXECUTE))
702,758,126,970,604,200
Execute an EdgeQL command (or commands). Example: .. code-block:: pycon >>> await con.execute(''' ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 }; ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x }; ... ''')
edgedb/transaction.py
execute
edgedb/edgedb-python
python
async def execute(self, query: str) -> None: "Execute an EdgeQL command (or commands).\n\n Example:\n\n .. code-block:: pycon\n\n >>> await con.execute('\n ... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };\n ... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };\n ... ')\n " (await self._ensure_transaction()) (await self._connection_impl._protocol.simple_query(query, enums.Capability.EXECUTE))
async def start(self) -> None: 'Enter the transaction or savepoint block.' (await self._start()) self._borrow()
8,014,743,481,410,684,000
Enter the transaction or savepoint block.
edgedb/transaction.py
start
edgedb/edgedb-python
python
async def start(self) -> None: (await self._start()) self._borrow()
async def commit(self) -> None: 'Exit the transaction or savepoint block and commit changes.' if self._managed: raise errors.InterfaceError('cannot manually commit from within an `async with` block') (await self._commit())
8,287,463,235,824,175,000
Exit the transaction or savepoint block and commit changes.
edgedb/transaction.py
commit
edgedb/edgedb-python
python
async def commit(self) -> None: if self._managed: raise errors.InterfaceError('cannot manually commit from within an `async with` block') (await self._commit())
async def rollback(self) -> None: 'Exit the transaction or savepoint block and rollback changes.' if self._managed: raise errors.InterfaceError('cannot manually rollback from within an `async with` block') (await self._rollback())
-7,523,355,772,203,383,000
Exit the transaction or savepoint block and rollback changes.
edgedb/transaction.py
rollback
edgedb/edgedb-python
python
async def rollback(self) -> None: if self._managed: raise errors.InterfaceError('cannot manually rollback from within an `async with` block') (await self._rollback())
def start(self) -> None: 'Enter the transaction or savepoint block.' self._start() self._borrow()
-6,337,271,078,978,315,000
Enter the transaction or savepoint block.
edgedb/transaction.py
start
edgedb/edgedb-python
python
def start(self) -> None: self._start() self._borrow()
def commit(self) -> None: 'Exit the transaction or savepoint block and commit changes.' if self._managed: raise errors.InterfaceError('cannot manually commit from within a `with` block') self._commit()
-7,869,346,230,001,466,000
Exit the transaction or savepoint block and commit changes.
edgedb/transaction.py
commit
edgedb/edgedb-python
python
def commit(self) -> None: if self._managed: raise errors.InterfaceError('cannot manually commit from within a `with` block') self._commit()
def rollback(self) -> None: 'Exit the transaction or savepoint block and rollback changes.' if self._managed: raise errors.InterfaceError('cannot manually rollback from within a `with` block') self._rollback()
-1,726,692,286,727,184,100
Exit the transaction or savepoint block and rollback changes.
edgedb/transaction.py
rollback
edgedb/edgedb-python
python
def rollback(self) -> None: if self._managed: raise errors.InterfaceError('cannot manually rollback from within a `with` block') self._rollback()
def get_character_url(name): "Gets a character's tibia.com URL" return (url_character + urllib.parse.quote(name.encode('iso-8859-1')))
-297,893,429,502,317,060
Gets a character's tibia.com URL
NabBot-master/utils/tibia.py
get_character_url
LadyKeladry/Guardian-Bot
python
def get_character_url(name): return (url_character + urllib.parse.quote(name.encode('iso-8859-1')))
@asyncio.coroutine def get_highscores(server, category, pagenum, profession=0, tries=5): 'Gets a specific page of the highscores\n Each list element is a dictionary with the following keys: rank, name, value.\n May return ERROR_NETWORK' url = url_highscores.format(server, category, profession, pagenum) try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category, pagenum)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_highscores(server, category, pagenum, profession, tries)) return ret try: start_index = content.index('<td style="width: 20%;" >Vocation</td>') end_index = content.index('<div style="float: left;"><b>&raquo; Pages:') content = content[start_index:end_index] except ValueError: if (tries == 0): log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category, pagenum)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_highscores(server, category, pagenum, profession, tries)) return ret if (category == 'loyalty'): regex_deaths = '<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>' pattern = re.compile(regex_deaths, (re.MULTILINE + re.S)) matches = re.findall(pattern, content) scoreList = [] for m in matches: scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')}) else: regex_deaths = '<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>' pattern = re.compile(regex_deaths, (re.MULTILINE + re.S)) matches = re.findall(pattern, content) scoreList = [] for m in matches: scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')}) return scoreList
-5,330,932,196,894,501,000
Gets a specific page of the highscores Each list element is a dictionary with the following keys: rank, name, value. May return ERROR_NETWORK
NabBot-master/utils/tibia.py
get_highscores
LadyKeladry/Guardian-Bot
python
@asyncio.coroutine def get_highscores(server, category, pagenum, profession=0, tries=5): 'Gets a specific page of the highscores\n Each list element is a dictionary with the following keys: rank, name, value.\n May return ERROR_NETWORK' url = url_highscores.format(server, category, profession, pagenum) try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category, pagenum)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_highscores(server, category, pagenum, profession, tries)) return ret try: start_index = content.index('<td style="width: 20%;" >Vocation</td>') end_index = content.index('<div style="float: left;"><b>&raquo; Pages:') content = content[start_index:end_index] except ValueError: if (tries == 0): log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category, pagenum)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_highscores(server, category, pagenum, profession, tries)) return ret if (category == 'loyalty'): regex_deaths = '<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>' pattern = re.compile(regex_deaths, (re.MULTILINE + re.S)) matches = re.findall(pattern, content) scoreList = [] for m in matches: scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', )}) else: regex_deaths = '<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>' pattern = re.compile(regex_deaths, (re.MULTILINE + re.S)) matches = re.findall(pattern, content) scoreList = [] for m in matches: scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', )}) return scoreList
@asyncio.coroutine def get_server_online(server, tries=5): 'Returns a list of all the online players in current server.\n\n Each list element is a dictionary with the following keys: name, level' server = server.capitalize() url = ('https://secure.tibia.com/community/?subtopic=worlds&world=' + server) onlineList = [] try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server)) return onlineList else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_server_online(server, tries)) return ret while ((not content) and (tries > 0)): try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: tries -= 1 try: start_index = content.index('<div class="BoxContent"') end_index = content.index('<div id="ThemeboxesColumn" >') content = content[start_index:end_index] except ValueError: if (tries == 0): log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server)) return onlineList else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_server_online(server, tries)) return ret regex_members = '<a href="https://secure.tibia.com/community/\\?subtopic=characters&name=(.+?)" >.+?</a></td><td style="width:10%;" >(.+?)</td>' pattern = re.compile(regex_members, (re.MULTILINE + re.S)) m = re.findall(pattern, content) if m: for (name, level) in m: name = urllib.parse.unquote_plus(name) onlineList.append({'name': name, 'level': int(level)}) return onlineList
-5,595,284,433,105,008,000
Returns a list of all the online players in current server. Each list element is a dictionary with the following keys: name, level
NabBot-master/utils/tibia.py
get_server_online
LadyKeladry/Guardian-Bot
python
@asyncio.coroutine def get_server_online(server, tries=5): 'Returns a list of all the online players in current server.\n\n Each list element is a dictionary with the following keys: name, level' server = server.capitalize() url = ('https://secure.tibia.com/community/?subtopic=worlds&world=' + server) onlineList = [] try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server)) return onlineList else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_server_online(server, tries)) return ret while ((not content) and (tries > 0)): try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: tries -= 1 try: start_index = content.index('<div class="BoxContent"') end_index = content.index('<div id="ThemeboxesColumn" >') content = content[start_index:end_index] except ValueError: if (tries == 0): log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server)) return onlineList else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_server_online(server, tries)) return ret regex_members = '<a href="https://secure.tibia.com/community/\\?subtopic=characters&name=(.+?)" >.+?</a></td><td style="width:10%;" >(.+?)</td>' pattern = re.compile(regex_members, (re.MULTILINE + re.S)) m = re.findall(pattern, content) if m: for (name, level) in m: name = urllib.parse.unquote_plus(name) onlineList.append({'name': name, 'level': int(level)}) return onlineList
@asyncio.coroutine def get_guild_online(guildname, titlecase=True, tries=5): "Returns a guild's world and online member list in a dictionary.\n\n The dictionary contains the following keys: name, logo_url, world and members.\n The key members contains a list where each element is a dictionary with the following keys:\n rank, name, title, vocation, level, joined.\n Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case.\n May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly." gstats_url = ('http://guildstats.eu/guild?guild=' + urllib.parse.quote(guildname)) guild = {} if (not titlecase): try: page = (yield from aiohttp.get(gstats_url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret try: content.index('<div class="footer">') except ValueError: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret if ('<div>Sorry!' in content): return ERROR_DOESNTEXIST try: content.index('General info') content.index('Recruitment') except Exception: log.error('getGuildOnline: -IMPORTANT- guildstats.eu seems to have changed their websites format.') return ERROR_NETWORK startIndex = content.index('General info') endIndex = content.index('Recruitment') content = content[startIndex:endIndex] m = re.search('<a href="set=(.+?)"', content) if m: guildname = urllib.parse.unquote_plus(m.group(1)) else: guildname = guildname.title() tibia_url = (('https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName=' + urllib.parse.quote(guildname)) + '&onlyshowonline=1') try: page = (yield from aiohttp.get(tibia_url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret try: startIndex = content.index('<div class="BoxContent"') endIndex = content.index('<div id="ThemeboxesColumn" >') content = content[startIndex:endIndex] except ValueError: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret if ('<div class="Text" >Error</div>' in content): if titlecase: ret = (yield from get_guild_online(guildname, False)) return ret else: return ERROR_DOESNTEXIST m = re.search('founded on (\\w+) on ([^.]+)', content) if m: guild['world'] = m.group(1) m = re.search('Their home on \\w+ is ([^\\.]+)', content) if m: guild['guildhall'] = m.group(1) m = re.search('<IMG SRC=\\"([^\\"]+)\\" W', content) if m: guild['logo_url'] = m.group(1) regex_members = '<TR BGCOLOR=#[\\dABCDEF]+><TD>(.+?)</TD>\\s</td><TD><A HREF="https://secure.tibia.com/community/\\?subtopic=characters&name=(.+?)">.+?</A> *\\(*(.*?)\\)*</TD>\\s<TD>(.+?)</TD>\\s<TD>(.+?)</TD>\\s<TD>(.+?)</TD>' pattern = re.compile(regex_members, (re.MULTILINE + re.S)) m = re.findall(pattern, content) guild['members'] = [] if m: for (rank, name, title, vocation, level, joined) in m: rank = ('' if (rank == '&#160;') else rank) name = urllib.parse.unquote_plus(name) joined = joined.replace('&#160;', '-') guild['members'].append({'rank': rank, 'name': name, 'title': title, 'vocation': vocation, 'level': level, 'joined': joined}) guild['name'] = guildname return guild
-7,008,559,775,826,219,000
Returns a guild's world and online member list in a dictionary. The dictionary contains the following keys: name, logo_url, world and members. The key members contains a list where each element is a dictionary with the following keys: rank, name, title, vocation, level, joined. Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case. May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly.
NabBot-master/utils/tibia.py
get_guild_online
LadyKeladry/Guardian-Bot
python
@asyncio.coroutine def get_guild_online(guildname, titlecase=True, tries=5): "Returns a guild's world and online member list in a dictionary.\n\n The dictionary contains the following keys: name, logo_url, world and members.\n The key members contains a list where each element is a dictionary with the following keys:\n rank, name, title, vocation, level, joined.\n Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case.\n May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly." gstats_url = ('http://guildstats.eu/guild?guild=' + urllib.parse.quote(guildname)) guild = {} if (not titlecase): try: page = (yield from aiohttp.get(gstats_url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret try: content.index('<div class="footer">') except ValueError: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret if ('<div>Sorry!' in content): return ERROR_DOESNTEXIST try: content.index('General info') content.index('Recruitment') except Exception: log.error('getGuildOnline: -IMPORTANT- guildstats.eu seems to have changed their websites format.') return ERROR_NETWORK startIndex = content.index('General info') endIndex = content.index('Recruitment') content = content[startIndex:endIndex] m = re.search('<a href="set=(.+?)"', content) if m: guildname = urllib.parse.unquote_plus(m.group(1)) else: guildname = guildname.title() tibia_url = (('https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName=' + urllib.parse.quote(guildname)) + '&onlyshowonline=1') try: page = (yield from aiohttp.get(tibia_url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret try: startIndex = content.index('<div class="BoxContent"') endIndex = content.index('<div id="ThemeboxesColumn" >') content = content[startIndex:endIndex] except ValueError: if (tries == 0): log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_guild_online(guildname, titlecase, tries)) return ret if ('<div class="Text" >Error</div>' in content): if titlecase: ret = (yield from get_guild_online(guildname, False)) return ret else: return ERROR_DOESNTEXIST m = re.search('founded on (\\w+) on ([^.]+)', content) if m: guild['world'] = m.group(1) m = re.search('Their home on \\w+ is ([^\\.]+)', content) if m: guild['guildhall'] = m.group(1) m = re.search('<IMG SRC=\\"([^\\"]+)\\" W', content) if m: guild['logo_url'] = m.group(1) regex_members = '<TR BGCOLOR=#[\\dABCDEF]+><TD>(.+?)</TD>\\s</td><TD><A HREF="https://secure.tibia.com/community/\\?subtopic=characters&name=(.+?)">.+?</A> *\\(*(.*?)\\)*</TD>\\s<TD>(.+?)</TD>\\s<TD>(.+?)</TD>\\s<TD>(.+?)</TD>' pattern = re.compile(regex_members, (re.MULTILINE + re.S)) m = re.findall(pattern, content) guild['members'] = [] if m: for (rank, name, title, vocation, level, joined) in m: rank = ( if (rank == '&#160;') else rank) name = urllib.parse.unquote_plus(name) joined = joined.replace('&#160;', '-') guild['members'].append({'rank': rank, 'name': name, 'title': title, 'vocation': vocation, 'level': level, 'joined': joined}) guild['name'] = guildname return guild
@asyncio.coroutine def get_character(name, tries=5): "Returns a dictionary with a player's info\n\n The dictionary contains the following keys: name, deleted, level, vocation, world, residence,\n married, gender, guild, last,login, chars*.\n *chars is list that contains other characters in the same account (if not hidden).\n Each list element is dictionary with the keys: name, world.\n May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly." try: url = (url_character + urllib.parse.quote(name.encode('iso-8859-1'))) except UnicodeEncodeError: return ERROR_DOESNTEXIST char = dict() try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getPlayer: Couldn't fetch {0}, network error.".format(name)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_character(name, tries)) return ret try: startIndex = content.index('<div class="BoxContent"') endIndex = content.index('<B>Search Character</B>') content = content[startIndex:endIndex] except ValueError: if (tries == 0): log.error("getPlayer: Couldn't fetch {0}, network error.".format(name)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_character(name, tries)) return ret if ('Name:</td><td>' not in content): return ERROR_DOESNTEXIST m = re.search('Name:</td><td>([^<,]+)', content) if m: char['name'] = m.group(1).strip() m = re.search(', will be deleted at ([^<]+)', content) if m: char['deleted'] = True m = re.search('Vocation:</td><td>([^<]+)', content) if m: char['vocation'] = m.group(1) m = re.search('Level:</td><td>(\\d+)', content) if m: char['level'] = int(m.group(1)) for onchar in global_online_list: if (onchar.split('_', 1)[1] == char['name']): c = userDatabase.cursor() c.execute('SELECT last_level FROM chars WHERE name LIKE ?', (char['name'],)) result = c.fetchone() if result: char['level'] = abs(result['last_level']) c.close() break m = re.search('World:</td><td>([^<]+)', content) if m: char['world'] = m.group(1) m = re.search('Residence:</td><td>([^<]+)', content) if m: char['residence'] = m.group(1) m = re.search('Married To:</td><td>?.+name=([^"]+)', content) if m: char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1') m = re.search('Sex:</td><td>([^<]+)', content) if m: if (m.group(1) == 'male'): char['gender'] = 'male' else: char['gender'] = 'female' m = re.search('Membership:</td><td>([^<]+)\\sof the', content) if m: char['rank'] = m.group(1) m = re.search('GuildName=.*?([^&]+).+', content) if m: char['guild'] = urllib.parse.unquote_plus(m.group(1)) m = re.search('House:</td><td> <a href=\\"https://secure\\.tibia\\.com/community/\\?subtopic=houses.+houseid=(\\d+)&amp;character=(?:[^&]+)&amp;action=characters\\" >([^<]+)</a> \\(([^(]+)\\) is paid until ([A-z]+).*?;(\\d+).*?;(\\d+)', content) if m: char['house_id'] = m.group(1) char['house'] = m.group(2) char['house_town'] = m.group(3) m = re.search('Last Login:</td><td>([^<]+)', content) if m: lastLogin = m.group(1).replace('&#160;', ' ').replace(',', '') if ('never' in lastLogin): char['last_login'] = None else: char['last_login'] = lastLogin c = userDatabase.cursor() c.execute('SELECT user_id FROM chars WHERE name LIKE ?', (char['name'],)) result = c.fetchone() char['owner_id'] = (None if (result is None) else result['user_id']) c = userDatabase.cursor() c.execute('SELECT vocation, name, id, world FROM chars WHERE name LIKE ?', (name,)) result = c.fetchone() if result: if (result['vocation'] != char['vocation']): c.execute('UPDATE chars SET vocation = ? WHERE id = ?', (char['vocation'], result['id'])) log.info("{0}'s vocation was set to {1} from {2} during get_character()".format(char['name'], char['vocation'], result['vocation'])) if (result['name'] != char['name']): c.execute('UPDATE chars SET name = ? WHERE id = ?', (char['name'], result['id'])) log.info('{0} was renamed to {1} during get_character()'.format(result['name'], char['name'])) if (result['world'] != char['world']): c.execute('UPDATE chars SET world = ? WHERE id = ?', (char['world'], result['id'])) log.info("{0}'s world was set to {1} from {2} during get_character()".format(char['name'], char['world'], result['world'])) c = userDatabase.cursor() for category in highscores_categories: c.execute((((('SELECT ' + category) + ',') + category) + '_rank FROM chars WHERE name LIKE ?'), (name,)) result = c.fetchone() if result: if ((result[category] is not None) and (result[(category + '_rank')] is not None)): char[category] = result[category] char[(category + '_rank')] = result[(category + '_rank')] char['deaths'] = [] regex_deaths = 'valign="top" >([^<]+)</td><td>(.+?)</td></tr>' pattern = re.compile(regex_deaths, (re.MULTILINE + re.S)) matches = re.findall(pattern, content) for m in matches: death_time = m[0].replace('&#160;', ' ').replace(',', '') death_level = '' death_killer = '' death_by_player = False if (m[1].find('Died') != (- 1)): regex_deathinfo_monster = 'Level (\\d+) by ([^.]+)' pattern = re.compile(regex_deathinfo_monster, (re.MULTILINE + re.S)) m_deathinfo_monster = re.search(pattern, m[1]) if m_deathinfo_monster: death_level = m_deathinfo_monster.group(1) death_killer = m_deathinfo_monster.group(2) else: regex_deathinfo_player = 'Level (\\d+) by .+?name=([^"]+)' pattern = re.compile(regex_deathinfo_player, (re.MULTILINE + re.S)) m_deathinfo_player = re.search(pattern, m[1]) if m_deathinfo_player: death_level = m_deathinfo_player.group(1) death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2)) death_by_player = True try: char['deaths'].append({'time': death_time, 'level': int(death_level), 'killer': death_killer, 'byPlayer': death_by_player}) except ValueError: continue char['chars'] = [] try: startIndex = content.index('<B>Characters</B>') content = content[startIndex:] regex_chars = '<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\\"([^\\"]+)' pattern = re.compile(regex_chars, (re.MULTILINE + re.S)) m = re.findall(pattern, content) if m: for (world, name) in m: name = urllib.parse.unquote_plus(name) char['chars'].append({'name': name, 'world': world}) except Exception: pass return char
538,496,630,067,673,900
Returns a dictionary with a player's info The dictionary contains the following keys: name, deleted, level, vocation, world, residence, married, gender, guild, last,login, chars*. *chars is list that contains other characters in the same account (if not hidden). Each list element is dictionary with the keys: name, world. May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly.
NabBot-master/utils/tibia.py
get_character
LadyKeladry/Guardian-Bot
python
@asyncio.coroutine def get_character(name, tries=5): "Returns a dictionary with a player's info\n\n The dictionary contains the following keys: name, deleted, level, vocation, world, residence,\n married, gender, guild, last,login, chars*.\n *chars is list that contains other characters in the same account (if not hidden).\n Each list element is dictionary with the keys: name, world.\n May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly." try: url = (url_character + urllib.parse.quote(name.encode('iso-8859-1'))) except UnicodeEncodeError: return ERROR_DOESNTEXIST char = dict() try: page = (yield from aiohttp.get(url)) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("getPlayer: Couldn't fetch {0}, network error.".format(name)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_character(name, tries)) return ret try: startIndex = content.index('<div class="BoxContent"') endIndex = content.index('<B>Search Character</B>') content = content[startIndex:endIndex] except ValueError: if (tries == 0): log.error("getPlayer: Couldn't fetch {0}, network error.".format(name)) return ERROR_NETWORK else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) ret = (yield from get_character(name, tries)) return ret if ('Name:</td><td>' not in content): return ERROR_DOESNTEXIST m = re.search('Name:</td><td>([^<,]+)', content) if m: char['name'] = m.group(1).strip() m = re.search(', will be deleted at ([^<]+)', content) if m: char['deleted'] = True m = re.search('Vocation:</td><td>([^<]+)', content) if m: char['vocation'] = m.group(1) m = re.search('Level:</td><td>(\\d+)', content) if m: char['level'] = int(m.group(1)) for onchar in global_online_list: if (onchar.split('_', 1)[1] == char['name']): c = userDatabase.cursor() c.execute('SELECT last_level FROM chars WHERE name LIKE ?', (char['name'],)) result = c.fetchone() if result: char['level'] = abs(result['last_level']) c.close() break m = re.search('World:</td><td>([^<]+)', content) if m: char['world'] = m.group(1) m = re.search('Residence:</td><td>([^<]+)', content) if m: char['residence'] = m.group(1) m = re.search('Married To:</td><td>?.+name=([^"]+)', content) if m: char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1') m = re.search('Sex:</td><td>([^<]+)', content) if m: if (m.group(1) == 'male'): char['gender'] = 'male' else: char['gender'] = 'female' m = re.search('Membership:</td><td>([^<]+)\\sof the', content) if m: char['rank'] = m.group(1) m = re.search('GuildName=.*?([^&]+).+', content) if m: char['guild'] = urllib.parse.unquote_plus(m.group(1)) m = re.search('House:</td><td> <a href=\\"https://secure\\.tibia\\.com/community/\\?subtopic=houses.+houseid=(\\d+)&amp;character=(?:[^&]+)&amp;action=characters\\" >([^<]+)</a> \\(([^(]+)\\) is paid until ([A-z]+).*?;(\\d+).*?;(\\d+)', content) if m: char['house_id'] = m.group(1) char['house'] = m.group(2) char['house_town'] = m.group(3) m = re.search('Last Login:</td><td>([^<]+)', content) if m: lastLogin = m.group(1).replace('&#160;', ' ').replace(',', ) if ('never' in lastLogin): char['last_login'] = None else: char['last_login'] = lastLogin c = userDatabase.cursor() c.execute('SELECT user_id FROM chars WHERE name LIKE ?', (char['name'],)) result = c.fetchone() char['owner_id'] = (None if (result is None) else result['user_id']) c = userDatabase.cursor() c.execute('SELECT vocation, name, id, world FROM chars WHERE name LIKE ?', (name,)) result = c.fetchone() if result: if (result['vocation'] != char['vocation']): c.execute('UPDATE chars SET vocation = ? WHERE id = ?', (char['vocation'], result['id'])) log.info("{0}'s vocation was set to {1} from {2} during get_character()".format(char['name'], char['vocation'], result['vocation'])) if (result['name'] != char['name']): c.execute('UPDATE chars SET name = ? WHERE id = ?', (char['name'], result['id'])) log.info('{0} was renamed to {1} during get_character()'.format(result['name'], char['name'])) if (result['world'] != char['world']): c.execute('UPDATE chars SET world = ? WHERE id = ?', (char['world'], result['id'])) log.info("{0}'s world was set to {1} from {2} during get_character()".format(char['name'], char['world'], result['world'])) c = userDatabase.cursor() for category in highscores_categories: c.execute((((('SELECT ' + category) + ',') + category) + '_rank FROM chars WHERE name LIKE ?'), (name,)) result = c.fetchone() if result: if ((result[category] is not None) and (result[(category + '_rank')] is not None)): char[category] = result[category] char[(category + '_rank')] = result[(category + '_rank')] char['deaths'] = [] regex_deaths = 'valign="top" >([^<]+)</td><td>(.+?)</td></tr>' pattern = re.compile(regex_deaths, (re.MULTILINE + re.S)) matches = re.findall(pattern, content) for m in matches: death_time = m[0].replace('&#160;', ' ').replace(',', ) death_level = death_killer = death_by_player = False if (m[1].find('Died') != (- 1)): regex_deathinfo_monster = 'Level (\\d+) by ([^.]+)' pattern = re.compile(regex_deathinfo_monster, (re.MULTILINE + re.S)) m_deathinfo_monster = re.search(pattern, m[1]) if m_deathinfo_monster: death_level = m_deathinfo_monster.group(1) death_killer = m_deathinfo_monster.group(2) else: regex_deathinfo_player = 'Level (\\d+) by .+?name=([^"]+)' pattern = re.compile(regex_deathinfo_player, (re.MULTILINE + re.S)) m_deathinfo_player = re.search(pattern, m[1]) if m_deathinfo_player: death_level = m_deathinfo_player.group(1) death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2)) death_by_player = True try: char['deaths'].append({'time': death_time, 'level': int(death_level), 'killer': death_killer, 'byPlayer': death_by_player}) except ValueError: continue char['chars'] = [] try: startIndex = content.index('<B>Characters</B>') content = content[startIndex:] regex_chars = '<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\\"([^\\"]+)' pattern = re.compile(regex_chars, (re.MULTILINE + re.S)) m = re.findall(pattern, content) if m: for (world, name) in m: name = urllib.parse.unquote_plus(name) char['chars'].append({'name': name, 'world': world}) except Exception: pass return char
def get_rashid_city() -> str: 'Returns the city Rashid is currently in.' offset = (get_tibia_time_zone() - get_local_timezone()) tibia_time = (datetime.now() + timedelta(hours=(offset - 10))) return ['Svargrond', 'Liberty Bay', 'Port Hope', 'Ankrahmun', 'Darashia', 'Edron', 'Carlin'][tibia_time.weekday()]
-2,082,319,499,515,995,000
Returns the city Rashid is currently in.
NabBot-master/utils/tibia.py
get_rashid_city
LadyKeladry/Guardian-Bot
python
def get_rashid_city() -> str: offset = (get_tibia_time_zone() - get_local_timezone()) tibia_time = (datetime.now() + timedelta(hours=(offset - 10))) return ['Svargrond', 'Liberty Bay', 'Port Hope', 'Ankrahmun', 'Darashia', 'Edron', 'Carlin'][tibia_time.weekday()]
def get_monster(name): "Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.\n\n The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,\n elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,\n arm, image." c = tibiaDatabase.cursor() c.execute('SELECT * FROM Creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['title'].lower() == name.lower()) or (len(result) == 1)): monster = result[0] else: return [x['title'] for x in result] try: if ((monster['health'] is None) or (monster['health'] < 1)): monster['health'] = None c.execute('SELECT Items.title as name, percentage, min, max FROM CreatureDrops, Items WHERE Items.id = CreatureDrops.itemid AND creatureid = ? ORDER BY percentage DESC', (monster['id'],)) monster['loot'] = c.fetchall() return monster finally: c.close()
-901,352,362,940,470,500
Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions. The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy, elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis, arm, image.
NabBot-master/utils/tibia.py
get_monster
LadyKeladry/Guardian-Bot
python
def get_monster(name): "Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.\n\n The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,\n elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,\n arm, image." c = tibiaDatabase.cursor() c.execute('SELECT * FROM Creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['title'].lower() == name.lower()) or (len(result) == 1)): monster = result[0] else: return [x['title'] for x in result] try: if ((monster['health'] is None) or (monster['health'] < 1)): monster['health'] = None c.execute('SELECT Items.title as name, percentage, min, max FROM CreatureDrops, Items WHERE Items.id = CreatureDrops.itemid AND creatureid = ? ORDER BY percentage DESC', (monster['id'],)) monster['loot'] = c.fetchall() return monster finally: c.close()
def get_item(name): "Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.\n\n The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.\n *npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city." c = tibiaDatabase.cursor() c.execute('SELECT * FROM Items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['title'].lower() == name.lower()) or (len(result) == 1)): item = result[0] else: return [x['title'] for x in result] try: if (item is not None): c.execute('SELECT NPCs.title, city, value FROM Items, SellItems, NPCs WHERE Items.name LIKE ? AND SellItems.itemid = Items.id AND NPCs.id = vendorid ORDER BY value DESC', (name,)) npcs = [] value_sell = None for npc in c: name = npc['title'] city = npc['city'].title() if (value_sell is None): value_sell = npc['value'] elif (npc['value'] != value_sell): break if ((name == 'Alesar') or (name == 'Yaman')): city = "Green Djinn's Fortress" item['color'] = Colour.green() elif ((name == "Nah'Bob") or (name == 'Haroun')): city = "Blue Djinn's Fortress" item['color'] = Colour.blue() elif (name == 'Rashid'): city = get_rashid_city() item['color'] = Colour(15788310) elif (name == 'Yasir'): city = 'his boat' elif (name == 'Briasol'): item['color'] = Colour(11098308) npcs.append({'name': name, 'city': city}) item['npcs_sold'] = npcs item['value_sell'] = value_sell c.execute('SELECT NPCs.title, city, value FROM Items, BuyItems, NPCs WHERE Items.name LIKE ? AND BuyItems.itemid = Items.id AND NPCs.id = vendorid ORDER BY value ASC', (name,)) npcs = [] value_buy = None for npc in c: name = npc['title'] city = npc['city'].title() if (value_buy is None): value_buy = npc['value'] elif (npc['value'] != value_buy): break if ((name == 'Alesar') or (name == 'Yaman')): city = "Green Djinn's Fortress" elif ((name == "Nah'Bob") or (name == 'Haroun')): city = "Blue Djinn's Fortress" elif (name == 'Rashid'): offset = (get_tibia_time_zone() - get_local_timezone()) tibia_time = (datetime.now() + timedelta(hours=(offset - 10))) city = ['Svargrond', 'Liberty Bay', 'Port Hope', 'Ankrahmun', 'Darashia', 'Edron', 'Carlin'][tibia_time.weekday()] elif (name == 'Yasir'): city = 'his boat' npcs.append({'name': name, 'city': city}) item['npcs_bought'] = npcs item['value_buy'] = value_buy c.execute('SELECT Creatures.title as name, CreatureDrops.percentage FROM CreatureDrops, Creatures WHERE CreatureDrops.creatureid = Creatures.id AND CreatureDrops.itemid = ? ORDER BY percentage DESC', (item['id'],)) item['dropped_by'] = c.fetchall() c.execute('SELECT Quests.title FROM Quests, QuestRewards WHERE Quests.id = QuestRewards.questid and itemid = ?', (item['id'],)) quests = c.fetchall() item['quests'] = list() for quest in quests: item['quests'].append(quest['title']) return item finally: c.close() return
-3,596,232,935,869,390,000
Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions. The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy. *npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city.
NabBot-master/utils/tibia.py
get_item
LadyKeladry/Guardian-Bot
python
def get_item(name): "Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.\n\n The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.\n *npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city." c = tibiaDatabase.cursor() c.execute('SELECT * FROM Items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['title'].lower() == name.lower()) or (len(result) == 1)): item = result[0] else: return [x['title'] for x in result] try: if (item is not None): c.execute('SELECT NPCs.title, city, value FROM Items, SellItems, NPCs WHERE Items.name LIKE ? AND SellItems.itemid = Items.id AND NPCs.id = vendorid ORDER BY value DESC', (name,)) npcs = [] value_sell = None for npc in c: name = npc['title'] city = npc['city'].title() if (value_sell is None): value_sell = npc['value'] elif (npc['value'] != value_sell): break if ((name == 'Alesar') or (name == 'Yaman')): city = "Green Djinn's Fortress" item['color'] = Colour.green() elif ((name == "Nah'Bob") or (name == 'Haroun')): city = "Blue Djinn's Fortress" item['color'] = Colour.blue() elif (name == 'Rashid'): city = get_rashid_city() item['color'] = Colour(15788310) elif (name == 'Yasir'): city = 'his boat' elif (name == 'Briasol'): item['color'] = Colour(11098308) npcs.append({'name': name, 'city': city}) item['npcs_sold'] = npcs item['value_sell'] = value_sell c.execute('SELECT NPCs.title, city, value FROM Items, BuyItems, NPCs WHERE Items.name LIKE ? AND BuyItems.itemid = Items.id AND NPCs.id = vendorid ORDER BY value ASC', (name,)) npcs = [] value_buy = None for npc in c: name = npc['title'] city = npc['city'].title() if (value_buy is None): value_buy = npc['value'] elif (npc['value'] != value_buy): break if ((name == 'Alesar') or (name == 'Yaman')): city = "Green Djinn's Fortress" elif ((name == "Nah'Bob") or (name == 'Haroun')): city = "Blue Djinn's Fortress" elif (name == 'Rashid'): offset = (get_tibia_time_zone() - get_local_timezone()) tibia_time = (datetime.now() + timedelta(hours=(offset - 10))) city = ['Svargrond', 'Liberty Bay', 'Port Hope', 'Ankrahmun', 'Darashia', 'Edron', 'Carlin'][tibia_time.weekday()] elif (name == 'Yasir'): city = 'his boat' npcs.append({'name': name, 'city': city}) item['npcs_bought'] = npcs item['value_buy'] = value_buy c.execute('SELECT Creatures.title as name, CreatureDrops.percentage FROM CreatureDrops, Creatures WHERE CreatureDrops.creatureid = Creatures.id AND CreatureDrops.itemid = ? ORDER BY percentage DESC', (item['id'],)) item['dropped_by'] = c.fetchall() c.execute('SELECT Quests.title FROM Quests, QuestRewards WHERE Quests.id = QuestRewards.questid and itemid = ?', (item['id'],)) quests = c.fetchall() item['quests'] = list() for quest in quests: item['quests'].append(quest['title']) return item finally: c.close() return
def parse_tibia_time(tibia_time: str) -> datetime: 'Gets a time object from a time string from tibia.com' tibia_time = tibia_time.replace(',', '').replace('&#160;', ' ') t = time.localtime() u = time.gmtime(time.mktime(t)) local_utc_offset = (((timegm(t) - timegm(u)) / 60) / 60) tz = tibia_time[(- 4):].strip() try: t = datetime.strptime(tibia_time[:(- 4)].strip(), '%b %d %Y %H:%M:%S') except ValueError: log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time)) return None if (tz == 'CET'): utc_offset = 1 elif (tz == 'CEST'): utc_offset = 2 else: log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time)) return None return (t + timedelta(hours=(local_utc_offset - utc_offset)))
1,220,089,986,400,324,900
Gets a time object from a time string from tibia.com
NabBot-master/utils/tibia.py
parse_tibia_time
LadyKeladry/Guardian-Bot
python
def parse_tibia_time(tibia_time: str) -> datetime: tibia_time = tibia_time.replace(',', ).replace('&#160;', ' ') t = time.localtime() u = time.gmtime(time.mktime(t)) local_utc_offset = (((timegm(t) - timegm(u)) / 60) / 60) tz = tibia_time[(- 4):].strip() try: t = datetime.strptime(tibia_time[:(- 4)].strip(), '%b %d %Y %H:%M:%S') except ValueError: log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time)) return None if (tz == 'CET'): utc_offset = 1 elif (tz == 'CEST'): utc_offset = 2 else: log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time)) return None return (t + timedelta(hours=(local_utc_offset - utc_offset)))
def get_stats(level: int, vocation: str): 'Returns a dictionary with the stats for a character of a certain vocation and level.\n\n The dictionary has the following keys: vocation, hp, mp, cap.' try: level = int(level) except ValueError: return 'bad level' if (level <= 0): return 'low level' elif (level > 2000): return 'high level' vocation = vocation.lower().strip() if (vocation in KNIGHT): hp = (((level - 8) * 15) + 185) mp = (((level - 0) * 5) + 50) cap = (((level - 8) * 25) + 470) vocation = 'knight' elif (vocation in PALADIN): hp = (((level - 8) * 10) + 185) mp = (((level - 8) * 15) + 90) cap = (((level - 8) * 20) + 470) vocation = 'paladin' elif (vocation in MAGE): hp = (((level - 0) * 5) + 145) mp = (((level - 8) * 30) + 90) cap = (((level - 0) * 10) + 390) vocation = 'mage' elif (vocation in NO_VOCATION): vocation = 'no vocation' else: return 'bad vocation' if ((level < 8) or (vocation == 'no vocation')): hp = (((level - 0) * 5) + 145) mp = (((level - 0) * 5) + 50) cap = (((level - 0) * 10) + 390) exp = (((((50 * pow(level, 3)) / 3) - (100 * pow(level, 2))) + ((850 * level) / 3)) - 200) exp_tnl = ((((50 * level) * level) - (150 * level)) + 200) return {'vocation': vocation, 'hp': hp, 'mp': mp, 'cap': cap, 'exp': int(exp), 'exp_tnl': exp_tnl}
-2,773,699,681,780,761,000
Returns a dictionary with the stats for a character of a certain vocation and level. The dictionary has the following keys: vocation, hp, mp, cap.
NabBot-master/utils/tibia.py
get_stats
LadyKeladry/Guardian-Bot
python
def get_stats(level: int, vocation: str): 'Returns a dictionary with the stats for a character of a certain vocation and level.\n\n The dictionary has the following keys: vocation, hp, mp, cap.' try: level = int(level) except ValueError: return 'bad level' if (level <= 0): return 'low level' elif (level > 2000): return 'high level' vocation = vocation.lower().strip() if (vocation in KNIGHT): hp = (((level - 8) * 15) + 185) mp = (((level - 0) * 5) + 50) cap = (((level - 8) * 25) + 470) vocation = 'knight' elif (vocation in PALADIN): hp = (((level - 8) * 10) + 185) mp = (((level - 8) * 15) + 90) cap = (((level - 8) * 20) + 470) vocation = 'paladin' elif (vocation in MAGE): hp = (((level - 0) * 5) + 145) mp = (((level - 8) * 30) + 90) cap = (((level - 0) * 10) + 390) vocation = 'mage' elif (vocation in NO_VOCATION): vocation = 'no vocation' else: return 'bad vocation' if ((level < 8) or (vocation == 'no vocation')): hp = (((level - 0) * 5) + 145) mp = (((level - 0) * 5) + 50) cap = (((level - 0) * 10) + 390) exp = (((((50 * pow(level, 3)) / 3) - (100 * pow(level, 2))) + ((850 * level) / 3)) - 200) exp_tnl = ((((50 * level) * level) - (150 * level)) + 200) return {'vocation': vocation, 'hp': hp, 'mp': mp, 'cap': cap, 'exp': int(exp), 'exp_tnl': exp_tnl}
def get_share_range(level: int): 'Returns the share range for a specific level\n\n The returned value is a list with the lower limit and the upper limit in that order.' return (int(round(((level * 2) / 3), 0)), int(round(((level * 3) / 2), 0)))
-468,969,052,756,241,660
Returns the share range for a specific level The returned value is a list with the lower limit and the upper limit in that order.
NabBot-master/utils/tibia.py
get_share_range
LadyKeladry/Guardian-Bot
python
def get_share_range(level: int): 'Returns the share range for a specific level\n\n The returned value is a list with the lower limit and the upper limit in that order.' return (int(round(((level * 2) / 3), 0)), int(round(((level * 3) / 2), 0)))
def get_spell(name): "Returns a dictionary containing a spell's info, a list of possible matches or None" c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM Spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15', ((('%' + name) + '%'), (('%' + name) + '%'))) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['name'].lower() == name.lower()) or (result[0]['words'].lower() == name.lower()) or (len(result) == 1)): spell = result[0] else: return ['{name} ({words})'.format(**x) for x in result] spell['npcs'] = [] c.execute('SELECT NPCs.title as name, NPCs.city, SpellNPCs.knight, SpellNPCs.paladin,\n SpellNPCs.sorcerer, SpellNPCs.druid FROM NPCs, SpellNPCs\n WHERE SpellNPCs.spellid = ? AND SpellNPCs.npcid = NPCs.id', (spell['id'],)) result = c.fetchall() for npc in result: npc['city'] = npc['city'].title() spell['npcs'].append(npc) return spell finally: c.close()
-1,031,592,425,114,744,700
Returns a dictionary containing a spell's info, a list of possible matches or None
NabBot-master/utils/tibia.py
get_spell
LadyKeladry/Guardian-Bot
python
def get_spell(name): c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM Spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15', ((('%' + name) + '%'), (('%' + name) + '%'))) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['name'].lower() == name.lower()) or (result[0]['words'].lower() == name.lower()) or (len(result) == 1)): spell = result[0] else: return ['{name} ({words})'.format(**x) for x in result] spell['npcs'] = [] c.execute('SELECT NPCs.title as name, NPCs.city, SpellNPCs.knight, SpellNPCs.paladin,\n SpellNPCs.sorcerer, SpellNPCs.druid FROM NPCs, SpellNPCs\n WHERE SpellNPCs.spellid = ? AND SpellNPCs.npcid = NPCs.id', (spell['id'],)) result = c.fetchall() for npc in result: npc['city'] = npc['city'].title() spell['npcs'].append(npc) return spell finally: c.close()
def get_npc(name): "Returns a dictionary containing a NPC's info, a list of possible matches or None" c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM NPCs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['title'].lower() == name.lower) or (len(result) == 1)): npc = result[0] else: return [x['title'] for x in result] npc['image'] = 0 c.execute('SELECT Items.name, Items.category, BuyItems.value FROM BuyItems, Items WHERE Items.id = BuyItems.itemid AND BuyItems.vendorid = ?', (npc['id'],)) npc['sell_items'] = c.fetchall() c.execute('SELECT Items.name, Items.category, SellItems.value FROM SellItems, Items WHERE Items.id = SellItems.itemid AND SellItems.vendorid = ?', (npc['id'],)) npc['buy_items'] = c.fetchall() return npc finally: c.close()
5,319,743,790,913,926,000
Returns a dictionary containing a NPC's info, a list of possible matches or None
NabBot-master/utils/tibia.py
get_npc
LadyKeladry/Guardian-Bot
python
def get_npc(name): c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM NPCs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['title'].lower() == name.lower) or (len(result) == 1)): npc = result[0] else: return [x['title'] for x in result] npc['image'] = 0 c.execute('SELECT Items.name, Items.category, BuyItems.value FROM BuyItems, Items WHERE Items.id = BuyItems.itemid AND BuyItems.vendorid = ?', (npc['id'],)) npc['sell_items'] = c.fetchall() c.execute('SELECT Items.name, Items.category, SellItems.value FROM SellItems, Items WHERE Items.id = SellItems.itemid AND SellItems.vendorid = ?', (npc['id'],)) npc['buy_items'] = c.fetchall() return npc finally: c.close()
@asyncio.coroutine def get_house(name, world=None): "Returns a dictionary containing a house's info, a list of possible matches or None.\n\n If world is specified, it will also find the current status of the house in that world." c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM Houses WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['name'].lower() == name.lower()) or (len(result) == 1)): house = result[0] else: return [x['name'] for x in result] if ((world is None) or (world not in tibia_worlds)): house['fetch'] = False return house house['world'] = world house['url'] = url_house.format(id=house['id'], world=world) tries = 5 while True: try: page = (yield from aiohttp.get(house['url'])) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house['name'], house['id'], world)) house['fetch'] = False break else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) continue try: start_index = content.index('"BoxContent"') end_index = content.index('</TD></TR></TABLE>') content = content[start_index:end_index] except ValueError: if (tries == 0): log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house['name'], house['id'], world)) house['fetch'] = False break else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) continue house['fetch'] = True m = re.search('monthly rent is <B>(\\d+)', content) if m: house['rent'] = int(m.group(1)) if ('rented' in content): house['status'] = 'rented' m = re.search('rented by <A?.+name=([^\\"]+).+e has paid the rent until <B>([^<]+)</B>', content) if m: house['owner'] = urllib.parse.unquote_plus(m.group(1)) house['until'] = m.group(2).replace('&#160;', ' ') if ('move out' in content): house['status'] = 'transferred' m = re.search('will move out on <B>([^<]+)</B> \\(time of daily server save\\) and will pass the house to <A.+name=([^\\"]+).+ for <B>(\\d+) gold', content) if m: house['transfer_date'] = house['until'] = m.group(1).replace('&#160;', ' ') house['transferee'] = urllib.parse.unquote_plus(m.group(2)) house['transfer_price'] = int(m.group(3)) elif ('auctioned' in content): house['status'] = 'auctioned' if ('. No bid has' in content): house['status'] = 'empty' break m = re.search('The auction will end at <B>([^\\<]+)</B>\\. The highest bid so far is <B>(\\d+).+ by .+name=([^\\"]+)\\"', content) if m: house['auction_end'] = m.group(1).replace('&#160;', ' ') house['top_bid'] = int(m.group(2)) house['top_bidder'] = urllib.parse.unquote_plus(m.group(3)) break return house finally: c.close()
6,161,030,971,851,621,000
Returns a dictionary containing a house's info, a list of possible matches or None. If world is specified, it will also find the current status of the house in that world.
NabBot-master/utils/tibia.py
get_house
LadyKeladry/Guardian-Bot
python
@asyncio.coroutine def get_house(name, world=None): "Returns a dictionary containing a house's info, a list of possible matches or None.\n\n If world is specified, it will also find the current status of the house in that world." c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM Houses WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['name'].lower() == name.lower()) or (len(result) == 1)): house = result[0] else: return [x['name'] for x in result] if ((world is None) or (world not in tibia_worlds)): house['fetch'] = False return house house['world'] = world house['url'] = url_house.format(id=house['id'], world=world) tries = 5 while True: try: page = (yield from aiohttp.get(house['url'])) content = (yield from page.text(encoding='ISO-8859-1')) except Exception: if (tries == 0): log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house['name'], house['id'], world)) house['fetch'] = False break else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) continue try: start_index = content.index('"BoxContent"') end_index = content.index('</TD></TR></TABLE>') content = content[start_index:end_index] except ValueError: if (tries == 0): log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house['name'], house['id'], world)) house['fetch'] = False break else: tries -= 1 (yield from asyncio.sleep(network_retry_delay)) continue house['fetch'] = True m = re.search('monthly rent is <B>(\\d+)', content) if m: house['rent'] = int(m.group(1)) if ('rented' in content): house['status'] = 'rented' m = re.search('rented by <A?.+name=([^\\"]+).+e has paid the rent until <B>([^<]+)</B>', content) if m: house['owner'] = urllib.parse.unquote_plus(m.group(1)) house['until'] = m.group(2).replace('&#160;', ' ') if ('move out' in content): house['status'] = 'transferred' m = re.search('will move out on <B>([^<]+)</B> \\(time of daily server save\\) and will pass the house to <A.+name=([^\\"]+).+ for <B>(\\d+) gold', content) if m: house['transfer_date'] = house['until'] = m.group(1).replace('&#160;', ' ') house['transferee'] = urllib.parse.unquote_plus(m.group(2)) house['transfer_price'] = int(m.group(3)) elif ('auctioned' in content): house['status'] = 'auctioned' if ('. No bid has' in content): house['status'] = 'empty' break m = re.search('The auction will end at <B>([^\\<]+)</B>\\. The highest bid so far is <B>(\\d+).+ by .+name=([^\\"]+)\\"', content) if m: house['auction_end'] = m.group(1).replace('&#160;', ' ') house['top_bid'] = int(m.group(2)) house['top_bidder'] = urllib.parse.unquote_plus(m.group(3)) break return house finally: c.close()
def get_achievement(name): 'Returns an achievement (dictionary), a list of possible matches or none' c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['name'].lower() == name.lower()) or (len(result) == 1)): return result[0] else: return [x['name'] for x in result] finally: c.close()
-7,458,789,280,913,675,000
Returns an achievement (dictionary), a list of possible matches or none
NabBot-master/utils/tibia.py
get_achievement
LadyKeladry/Guardian-Bot
python
def get_achievement(name): c = tibiaDatabase.cursor() try: c.execute('SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15', ((('%' + name) + '%'),)) result = c.fetchall() if (len(result) == 0): return None elif ((result[0]['name'].lower() == name.lower()) or (len(result) == 1)): return result[0] else: return [x['name'] for x in result] finally: c.close()
def get_tibia_time_zone() -> int: "Returns Germany's timezone, considering their daylight saving time dates" gt = (datetime.utcnow() + timedelta(hours=1)) germany_date = date(gt.year, gt.month, gt.day) dst_start = date(gt.year, 3, (31 - (int((((5 * gt.year) / 4) + 4)) % int(7)))) dst_end = date(gt.year, 10, (31 - (int((((5 * gt.year) / 4) + 1)) % int(7)))) if (dst_start < germany_date < dst_end): return 2 return 1
4,995,092,581,345,460,000
Returns Germany's timezone, considering their daylight saving time dates
NabBot-master/utils/tibia.py
get_tibia_time_zone
LadyKeladry/Guardian-Bot
python
def get_tibia_time_zone() -> int: gt = (datetime.utcnow() + timedelta(hours=1)) germany_date = date(gt.year, gt.month, gt.day) dst_start = date(gt.year, 3, (31 - (int((((5 * gt.year) / 4) + 4)) % int(7)))) dst_end = date(gt.year, 10, (31 - (int((((5 * gt.year) / 4) + 1)) % int(7)))) if (dst_start < germany_date < dst_end): return 2 return 1
def get_voc_abb(vocation: str) -> str: 'Given a vocation name, it returns an abbreviated string' abbrev = {'none': 'N', 'druid': 'D', 'sorcerer': 'S', 'paladin': 'P', 'knight': 'K', 'elder druid': 'ED', 'master sorcerer': 'MS', 'royal paladin': 'RP', 'elite knight': 'EK'} try: return abbrev[vocation.lower()] except KeyError: return 'N'
5,079,631,086,416,907,000
Given a vocation name, it returns an abbreviated string
NabBot-master/utils/tibia.py
get_voc_abb
LadyKeladry/Guardian-Bot
python
def get_voc_abb(vocation: str) -> str: abbrev = {'none': 'N', 'druid': 'D', 'sorcerer': 'S', 'paladin': 'P', 'knight': 'K', 'elder druid': 'ED', 'master sorcerer': 'MS', 'royal paladin': 'RP', 'elite knight': 'EK'} try: return abbrev[vocation.lower()] except KeyError: return 'N'
def get_voc_emoji(vocation: str) -> str: 'Given a vocation name, returns a emoji representing it' emoji = {'none': EMOJI[':hatching_chick:'], 'druid': EMOJI[':snowflake:'], 'sorcerer': EMOJI[':flame:'], 'paladin': EMOJI[':archery:'], 'knight': EMOJI[':shield:'], 'elder druid': EMOJI[':snowflake:'], 'master sorcerer': EMOJI[':flame:'], 'royal paladin': EMOJI[':archery:'], 'elite knight': EMOJI[':shield:']} try: return emoji[vocation.lower()] except KeyError: return EMOJI[':question:']
-3,352,309,903,081,343,000
Given a vocation name, returns a emoji representing it
NabBot-master/utils/tibia.py
get_voc_emoji
LadyKeladry/Guardian-Bot
python
def get_voc_emoji(vocation: str) -> str: emoji = {'none': EMOJI[':hatching_chick:'], 'druid': EMOJI[':snowflake:'], 'sorcerer': EMOJI[':flame:'], 'paladin': EMOJI[':archery:'], 'knight': EMOJI[':shield:'], 'elder druid': EMOJI[':snowflake:'], 'master sorcerer': EMOJI[':flame:'], 'royal paladin': EMOJI[':archery:'], 'elite knight': EMOJI[':shield:']} try: return emoji[vocation.lower()] except KeyError: return EMOJI[':question:']
def get_pronouns(gender: str): 'Gets a list of pronouns based on the gender given. Only binary genders supported, sorry.' gender = gender.lower() if (gender == 'female'): pronoun = ['she', 'her', 'her'] elif (gender == 'male'): pronoun = ['he', 'his', 'him'] else: pronoun = ['it', 'its', 'it'] return pronoun
5,217,491,315,904,803,000
Gets a list of pronouns based on the gender given. Only binary genders supported, sorry.
NabBot-master/utils/tibia.py
get_pronouns
LadyKeladry/Guardian-Bot
python
def get_pronouns(gender: str): gender = gender.lower() if (gender == 'female'): pronoun = ['she', 'her', 'her'] elif (gender == 'male'): pronoun = ['he', 'his', 'him'] else: pronoun = ['it', 'its', 'it'] return pronoun
def get_map_area(x, y, z, size=15, scale=8, crosshair=True): 'Gets a minimap picture of a map area\n\n size refers to the radius of the image in actual tibia sqm\n scale is how much the image will be streched (1 = 1 sqm = 1 pixel)' c = tibiaDatabase.cursor() c.execute('SELECT * FROM WorldMap WHERE z LIKE ?', (z,)) result = c.fetchone() im = Image.open(io.BytesIO(bytearray(result['image']))) im = im.crop(((x - size), (y - size), (x + size), (y + size))) im = im.resize(((size * scale), (size * scale))) if crosshair: draw = ImageDraw.Draw(im) (width, height) = im.size draw.line((0, (height / 2), width, (height / 2)), fill=128) draw.line(((width / 2), 0, (width / 2), height), fill=128) img_byte_arr = io.BytesIO() im.save(img_byte_arr, format='png') img_byte_arr = img_byte_arr.getvalue() return img_byte_arr
-2,065,824,990,881,618,000
Gets a minimap picture of a map area size refers to the radius of the image in actual tibia sqm scale is how much the image will be streched (1 = 1 sqm = 1 pixel)
NabBot-master/utils/tibia.py
get_map_area
LadyKeladry/Guardian-Bot
python
def get_map_area(x, y, z, size=15, scale=8, crosshair=True): 'Gets a minimap picture of a map area\n\n size refers to the radius of the image in actual tibia sqm\n scale is how much the image will be streched (1 = 1 sqm = 1 pixel)' c = tibiaDatabase.cursor() c.execute('SELECT * FROM WorldMap WHERE z LIKE ?', (z,)) result = c.fetchone() im = Image.open(io.BytesIO(bytearray(result['image']))) im = im.crop(((x - size), (y - size), (x + size), (y + size))) im = im.resize(((size * scale), (size * scale))) if crosshair: draw = ImageDraw.Draw(im) (width, height) = im.size draw.line((0, (height / 2), width, (height / 2)), fill=128) draw.line(((width / 2), 0, (width / 2), height), fill=128) img_byte_arr = io.BytesIO() im.save(img_byte_arr, format='png') img_byte_arr = img_byte_arr.getvalue() return img_byte_arr
async def http_request_handler(request: HttpRequest) -> HttpResponse: 'Handle the request' return (await Jinja2TemplateProvider.apply(request, 'example1.html', {'name': 'rob'}))
559,925,934,568,345,540
Handle the request
examples/example1.py
http_request_handler
rob-blackbourn/bareASGI-jinja2
python
async def http_request_handler(request: HttpRequest) -> HttpResponse: return (await Jinja2TemplateProvider.apply(request, 'example1.html', {'name': 'rob'}))
async def handle_no_template(request: HttpRequest) -> HttpResponse: 'This is what happens if there is no template' return (await Jinja2TemplateProvider.apply(request, 'notemplate.html', {'name': 'rob'}))
3,969,731,022,605,455,000
This is what happens if there is no template
examples/example1.py
handle_no_template
rob-blackbourn/bareASGI-jinja2
python
async def handle_no_template(request: HttpRequest) -> HttpResponse: return (await Jinja2TemplateProvider.apply(request, 'notemplate.html', {'name': 'rob'}))
@staticmethod def wait_key(message=''): ' Wait for a key press on the console and return it. ' if (message != ''): print(message) result = None if (os.name == 'nt'): import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = ((newattr[3] & (~ termios.ICANON)) & (~ termios.ECHO)) termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return result
-3,410,446,983,536,551,400
Wait for a key press on the console and return it.
run_demo.py
wait_key
ybettan/AirSimTensorFlow
python
@staticmethod def wait_key(message=): ' ' if (message != ): print(message) result = None if (os.name == 'nt'): import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = ((newattr[3] & (~ termios.ICANON)) & (~ termios.ECHO)) termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return result
@staticmethod def read_pfm(file): ' Read a pfm file ' file = open(file, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().rstrip() header = str(bytes.decode(header, encoding='utf-8')) if (header == 'PF'): color = True elif (header == 'Pf'): color = False else: raise Exception('Not a PFM file.') temp_str = str(bytes.decode(file.readline(), encoding='utf-8')) dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', temp_str) if dim_match: (width, height) = map(int, dim_match.groups()) else: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if (scale < 0): endian = '<' scale = (- scale) else: endian = '>' data = np.fromfile(file, (endian + 'f')) shape = ((height, width, 3) if color else (height, width)) data = np.reshape(data, shape) file.close() return (data, scale)
-9,088,422,383,959,874,000
Read a pfm file
run_demo.py
read_pfm
ybettan/AirSimTensorFlow
python
@staticmethod def read_pfm(file): ' ' file = open(file, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().rstrip() header = str(bytes.decode(header, encoding='utf-8')) if (header == 'PF'): color = True elif (header == 'Pf'): color = False else: raise Exception('Not a PFM file.') temp_str = str(bytes.decode(file.readline(), encoding='utf-8')) dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', temp_str) if dim_match: (width, height) = map(int, dim_match.groups()) else: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if (scale < 0): endian = '<' scale = (- scale) else: endian = '>' data = np.fromfile(file, (endian + 'f')) shape = ((height, width, 3) if color else (height, width)) data = np.reshape(data, shape) file.close() return (data, scale)
@staticmethod def write_pfm(file, image, scale=1): ' Write a pfm file ' file = open(file, 'wb') color = None if (image.dtype.name != 'float32'): raise Exception('Image dtype must be float32.') image = np.flipud(image) if ((len(image.shape) == 3) and (image.shape[2] == 3)): color = True elif ((len(image.shape) == 2) or ((len(image.shape) == 3) and (image.shape[2] == 1))): color = False else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') file.write(('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))) temp_str = ('%d %d\n' % (image.shape[1], image.shape[0])) file.write(temp_str.encode('utf-8')) endian = image.dtype.byteorder if ((endian == '<') or ((endian == '=') and (sys.byteorder == 'little'))): scale = (- scale) temp_str = ('%f\n' % scale) file.write(temp_str.encode('utf-8')) image.tofile(file)
-2,485,777,137,442,465,000
Write a pfm file
run_demo.py
write_pfm
ybettan/AirSimTensorFlow
python
@staticmethod def write_pfm(file, image, scale=1): ' ' file = open(file, 'wb') color = None if (image.dtype.name != 'float32'): raise Exception('Image dtype must be float32.') image = np.flipud(image) if ((len(image.shape) == 3) and (image.shape[2] == 3)): color = True elif ((len(image.shape) == 2) or ((len(image.shape) == 3) and (image.shape[2] == 1))): color = False else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') file.write(('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))) temp_str = ('%d %d\n' % (image.shape[1], image.shape[0])) file.write(temp_str.encode('utf-8')) endian = image.dtype.byteorder if ((endian == '<') or ((endian == '=') and (sys.byteorder == 'little'))): scale = (- scale) temp_str = ('%f\n' % scale) file.write(temp_str.encode('utf-8')) image.tofile(file)
@staticmethod def write_png(filename, image): ' image must be numpy array H X W X channels\n ' import zlib, struct buf = image.flatten().tobytes() width = image.shape[1] height = image.shape[0] width_byte_4 = (width * 4) raw_data = b''.join(((b'\x00' + buf[span:(span + width_byte_4)]) for span in range(((height - 1) * width_byte_4), (- 1), (- width_byte_4)))) def png_pack(png_tag, data): chunk_head = (png_tag + data) return ((struct.pack('!I', len(data)) + chunk_head) + struct.pack('!I', (4294967295 & zlib.crc32(chunk_head)))) png_bytes = b''.join([b'\x89PNG\r\n\x1a\n', png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) AirSimClientBase.write_file(filename, png_bytes)
-8,640,846,976,574,449,000
image must be numpy array H X W X channels
run_demo.py
write_png
ybettan/AirSimTensorFlow
python
@staticmethod def write_png(filename, image): ' \n ' import zlib, struct buf = image.flatten().tobytes() width = image.shape[1] height = image.shape[0] width_byte_4 = (width * 4) raw_data = b.join(((b'\x00' + buf[span:(span + width_byte_4)]) for span in range(((height - 1) * width_byte_4), (- 1), (- width_byte_4)))) def png_pack(png_tag, data): chunk_head = (png_tag + data) return ((struct.pack('!I', len(data)) + chunk_head) + struct.pack('!I', (4294967295 & zlib.crc32(chunk_head)))) png_bytes = b.join([b'\x89PNG\r\n\x1a\n', png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b)]) AirSimClientBase.write_file(filename, png_bytes)
def __init__(self, chainlen=2): '\n Building the dictionary\n ' if ((chainlen > 10) or (chainlen < 1)): print('Chain length must be between 1 and 10, inclusive') sys.exit(0) self.mcd = Mdict() oldnames = [] self.chainlen = chainlen for l in PLACES: l = l.strip() oldnames.append(l) s = ((' ' * chainlen) + l) for n in range(0, len(l)): self.mcd.add_key(s[n:(n + chainlen)], s[(n + chainlen)]) self.mcd.add_key(s[len(l):(len(l) + chainlen)], '\n')
-8,257,344,179,054,929,000
Building the dictionary
lib/markov_usernames.py
__init__
doc22940/Bash-Utils
python
def __init__(self, chainlen=2): '\n \n ' if ((chainlen > 10) or (chainlen < 1)): print('Chain length must be between 1 and 10, inclusive') sys.exit(0) self.mcd = Mdict() oldnames = [] self.chainlen = chainlen for l in PLACES: l = l.strip() oldnames.append(l) s = ((' ' * chainlen) + l) for n in range(0, len(l)): self.mcd.add_key(s[n:(n + chainlen)], s[(n + chainlen)]) self.mcd.add_key(s[len(l):(len(l) + chainlen)], '\n')
def New(self): '\n New name from the Markov chain\n ' prefix = (' ' * self.chainlen) name = '' suffix = '' while True: suffix = self.mcd.get_suffix(prefix) if ((suffix == '\n') or (len(name) > 9)): break else: name = (name + suffix) prefix = (prefix[1:] + suffix) return name.capitalize()
-3,993,756,492,327,136,000
New name from the Markov chain
lib/markov_usernames.py
New
doc22940/Bash-Utils
python
def New(self): '\n \n ' prefix = (' ' * self.chainlen) name = suffix = while True: suffix = self.mcd.get_suffix(prefix) if ((suffix == '\n') or (len(name) > 9)): break else: name = (name + suffix) prefix = (prefix[1:] + suffix) return name.capitalize()
def __eq__(self, *args, **kwargs): ' Return self==value. ' pass
6,816,163,063,501,262,000
Return self==value.
_pycharm_skeletons/renderdoc/BlendStats.py
__eq__
Lex-DRL/renderdoc-py-stubs
python
def __eq__(self, *args, **kwargs): ' ' pass
def __ge__(self, *args, **kwargs): ' Return self>=value. ' pass
-6,967,964,760,159,473,000
Return self>=value.
_pycharm_skeletons/renderdoc/BlendStats.py
__ge__
Lex-DRL/renderdoc-py-stubs
python
def __ge__(self, *args, **kwargs): ' ' pass
def __gt__(self, *args, **kwargs): ' Return self>value. ' pass
-2,387,978,140,579,432,400
Return self>value.
_pycharm_skeletons/renderdoc/BlendStats.py
__gt__
Lex-DRL/renderdoc-py-stubs
python
def __gt__(self, *args, **kwargs): ' ' pass
def __hash__(self, *args, **kwargs): ' Return hash(self). ' pass
7,522,954,561,806,290,000
Return hash(self).
_pycharm_skeletons/renderdoc/BlendStats.py
__hash__
Lex-DRL/renderdoc-py-stubs
python
def __hash__(self, *args, **kwargs): ' ' pass
def __le__(self, *args, **kwargs): ' Return self<=value. ' pass
555,903,605,434,510,850
Return self<=value.
_pycharm_skeletons/renderdoc/BlendStats.py
__le__
Lex-DRL/renderdoc-py-stubs
python
def __le__(self, *args, **kwargs): ' ' pass
def __lt__(self, *args, **kwargs): ' Return self<value. ' pass
-1,514,029,532,418,184,000
Return self<value.
_pycharm_skeletons/renderdoc/BlendStats.py
__lt__
Lex-DRL/renderdoc-py-stubs
python
def __lt__(self, *args, **kwargs): ' ' pass
@staticmethod def __new__(*args, **kwargs): ' Create and return a new object. See help(type) for accurate signature. ' pass
-659,758,195,336,737,200
Create and return a new object. See help(type) for accurate signature.
_pycharm_skeletons/renderdoc/BlendStats.py
__new__
Lex-DRL/renderdoc-py-stubs
python
@staticmethod def __new__(*args, **kwargs): ' ' pass
def __ne__(self, *args, **kwargs): ' Return self!=value. ' pass
-3,179,579,680,757,598,000
Return self!=value.
_pycharm_skeletons/renderdoc/BlendStats.py
__ne__
Lex-DRL/renderdoc-py-stubs
python
def __ne__(self, *args, **kwargs): ' ' pass
def test_displacy_parse_ents(en_vocab): "Test that named entities on a Doc are converted into displaCy's format." doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'])] ents = displacy.parse_ents(doc) assert isinstance(ents, dict) assert (ents['text'] == 'But Google is starting from behind ') assert (ents['ents'] == [{'start': 4, 'end': 10, 'label': 'ORG', 'kb_id': '', 'kb_url': '#'}]) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'], kb_id='Q95')] ents = displacy.parse_ents(doc) assert isinstance(ents, dict) assert (ents['text'] == 'But Google is starting from behind ') assert (ents['ents'] == [{'start': 4, 'end': 10, 'label': 'ORG', 'kb_id': 'Q95', 'kb_url': '#'}])
-7,496,353,557,940,837,000
Test that named entities on a Doc are converted into displaCy's format.
spacy/tests/test_displacy.py
test_displacy_parse_ents
xettrisomeman/spaCy
python
def test_displacy_parse_ents(en_vocab): doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'])] ents = displacy.parse_ents(doc) assert isinstance(ents, dict) assert (ents['text'] == 'But Google is starting from behind ') assert (ents['ents'] == [{'start': 4, 'end': 10, 'label': 'ORG', 'kb_id': , 'kb_url': '#'}]) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'], kb_id='Q95')] ents = displacy.parse_ents(doc) assert isinstance(ents, dict) assert (ents['text'] == 'But Google is starting from behind ') assert (ents['ents'] == [{'start': 4, 'end': 10, 'label': 'ORG', 'kb_id': 'Q95', 'kb_url': '#'}])
def test_displacy_parse_ents_with_kb_id_options(en_vocab): "Test that named entities with kb_id on a Doc are converted into displaCy's format." doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'], kb_id='Q95')] ents = displacy.parse_ents(doc, {'kb_url_template': 'https://www.wikidata.org/wiki/{}'}) assert isinstance(ents, dict) assert (ents['text'] == 'But Google is starting from behind ') assert (ents['ents'] == [{'start': 4, 'end': 10, 'label': 'ORG', 'kb_id': 'Q95', 'kb_url': 'https://www.wikidata.org/wiki/Q95'}])
-5,475,753,648,586,574,000
Test that named entities with kb_id on a Doc are converted into displaCy's format.
spacy/tests/test_displacy.py
test_displacy_parse_ents_with_kb_id_options
xettrisomeman/spaCy
python
def test_displacy_parse_ents_with_kb_id_options(en_vocab): doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'], kb_id='Q95')] ents = displacy.parse_ents(doc, {'kb_url_template': 'https://www.wikidata.org/wiki/{}'}) assert isinstance(ents, dict) assert (ents['text'] == 'But Google is starting from behind ') assert (ents['ents'] == [{'start': 4, 'end': 10, 'label': 'ORG', 'kb_id': 'Q95', 'kb_url': 'https://www.wikidata.org/wiki/Q95'}])
def test_displacy_parse_deps(en_vocab): "Test that deps and tags on a Doc are converted into displaCy's format." words = ['This', 'is', 'a', 'sentence'] heads = [1, 1, 3, 1] pos = ['DET', 'VERB', 'DET', 'NOUN'] tags = ['DT', 'VBZ', 'DT', 'NN'] deps = ['nsubj', 'ROOT', 'det', 'attr'] doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps) deps = displacy.parse_deps(doc) assert isinstance(deps, dict) assert (deps['words'] == [{'lemma': None, 'text': words[0], 'tag': pos[0]}, {'lemma': None, 'text': words[1], 'tag': pos[1]}, {'lemma': None, 'text': words[2], 'tag': pos[2]}, {'lemma': None, 'text': words[3], 'tag': pos[3]}]) assert (deps['arcs'] == [{'start': 0, 'end': 1, 'label': 'nsubj', 'dir': 'left'}, {'start': 2, 'end': 3, 'label': 'det', 'dir': 'left'}, {'start': 1, 'end': 3, 'label': 'attr', 'dir': 'right'}])
5,578,797,609,663,022,000
Test that deps and tags on a Doc are converted into displaCy's format.
spacy/tests/test_displacy.py
test_displacy_parse_deps
xettrisomeman/spaCy
python
def test_displacy_parse_deps(en_vocab): words = ['This', 'is', 'a', 'sentence'] heads = [1, 1, 3, 1] pos = ['DET', 'VERB', 'DET', 'NOUN'] tags = ['DT', 'VBZ', 'DT', 'NN'] deps = ['nsubj', 'ROOT', 'det', 'attr'] doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps) deps = displacy.parse_deps(doc) assert isinstance(deps, dict) assert (deps['words'] == [{'lemma': None, 'text': words[0], 'tag': pos[0]}, {'lemma': None, 'text': words[1], 'tag': pos[1]}, {'lemma': None, 'text': words[2], 'tag': pos[2]}, {'lemma': None, 'text': words[3], 'tag': pos[3]}]) assert (deps['arcs'] == [{'start': 0, 'end': 1, 'label': 'nsubj', 'dir': 'left'}, {'start': 2, 'end': 3, 'label': 'det', 'dir': 'left'}, {'start': 1, 'end': 3, 'label': 'attr', 'dir': 'right'}])
def test_displacy_spans(en_vocab): 'Test that displaCy can render Spans.' doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'])] html = displacy.render(doc[1:4], style='ent') assert html.startswith('<div')
3,441,236,095,723,474,000
Test that displaCy can render Spans.
spacy/tests/test_displacy.py
test_displacy_spans
xettrisomeman/spaCy
python
def test_displacy_spans(en_vocab): doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'])] html = displacy.render(doc[1:4], style='ent') assert html.startswith('<div')
def test_displacy_render_wrapper(en_vocab): 'Test that displaCy accepts custom rendering wrapper.' def wrapper(html): return (('TEST' + html) + 'TEST') displacy.set_render_wrapper(wrapper) doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'])] html = displacy.render(doc, style='ent') assert html.startswith('TEST<div') assert html.endswith('/div>TEST') displacy.set_render_wrapper((lambda html: html))
-3,702,502,177,086,248,400
Test that displaCy accepts custom rendering wrapper.
spacy/tests/test_displacy.py
test_displacy_render_wrapper
xettrisomeman/spaCy
python
def test_displacy_render_wrapper(en_vocab): def wrapper(html): return (('TEST' + html) + 'TEST') displacy.set_render_wrapper(wrapper) doc = Doc(en_vocab, words=['But', 'Google', 'is', 'starting', 'from', 'behind']) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings['ORG'])] html = displacy.render(doc, style='ent') assert html.startswith('TEST<div') assert html.endswith('/div>TEST') displacy.set_render_wrapper((lambda html: html))