code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import json from pathlib import Path from anbot.core import Config class DataConverter: """ Class for moving v2 data to v3 """ def __init__(self, config_instance: Config): self.config = config_instance @staticmethod def json_load(file_path: Path): """Utility function for quickly grabbing data from a JSON file Parameters ---------- file_path: `pathlib.Path` The path to the file to grabdata from Raises ------ FileNotFoundError The file doesn't exist json.JsonDecodeError The file isn't valid JSON """ try: with file_path.open(mode="r", encoding="utf-8") as f: data = json.load(f) except (FileNotFoundError, json.JSONDecodeError): raise else: return data async def convert(self, file_path: Path, conversion_spec: object): """Converts v2 data to v3 format. If your v2 data uses multiple files you will need to call this for each file. Parameters ---------- file_path : `pathlib.Path` This should be the path to a JSON settings file from v2 conversion_spec : `object` This should be a function which takes a single argument argument (the loaded JSON) and from it either returns or yields one or more `dict` whose items are in the form:: {(SCOPE, *IDENTIFIERS): {(key_tuple): value}} an example of a possible entry of that dict:: {(Config.MEMBER, '133049272517001216', '78631113035100160'): {('balance',): 9001}} This allows for any amount of entries at each level in each of the nested dictionaries returned by conversion_spec but the nesting cannot be different to this and still get the expected results see documentation for Config for more details on scopes and the identifiers they need Returns ------- None Raises ------ FileNotFoundError No such file at the specified path json.JSONDecodeError File is not valid JSON AttributeError Something goes wrong with your conversion and it provides data in the wrong format """ v2data = self.json_load(file_path) for entryset in conversion_spec(v2data): for scope_id, values in entryset.items(): base = self.config._get_base_group(*scope_id) for inner_k, inner_v in values.items(): await base.set_raw(*inner_k, value=inner_v) async def dict_import(self, entrydict: dict): """This imports a dictionary in the correct format into Config Parameters ---------- entrydict : `dict` This should be a dictionary of values to set. This is provided as an alternative to providing a file and conversion specification the dictionary should be in the following format:: {(SCOPE, *IDENTIFIERS): {(key_tuple): value}}` an example of a possible entry of that dict:: {(Config.MEMBER, '133049272517001216', '78631113035100160'): {('balance',): 9001}} This allows for any amount of entries at each level in each of the nested dictionaries returned by conversion_spec but the nesting cannot be different to this and still get the expected results Returns ------- None Raises ------ AttributeError Data not in the correct format. """ for scope_id, values in entrydict.items(): base = self.config._get_base_group(*scope_id) for inner_k, inner_v in values.items(): await base.set_raw(*inner_k, value=inner_v)
AN-DiscordBot
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/data_converter.py
data_converter.py
import asyncio import logging import os import shutil from asyncio import AbstractEventLoop, as_completed, Semaphore from asyncio.futures import isfuture from itertools import chain from pathlib import Path from typing import ( Any, AsyncIterator, AsyncIterable, Awaitable, Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union, ) import discord from fuzzywuzzy import fuzz, process from anbot.core import commands from .chat_formatting import box __all__ = [ "bounded_gather", "safe_delete", "fuzzy_command_search", "format_fuzzy_results", "deduplicate_iterables", ] _T = TypeVar("_T") # Benchmarked to be the fastest method. def deduplicate_iterables(*iterables): """ Returns a list of all unique items in ``iterables``, in the order they were first encountered. """ # dict insertion order is guaranteed to be preserved in 3.6+ return list(dict.fromkeys(chain.from_iterable(iterables))) def _fuzzy_log_filter(record): return record.funcName != "extractWithoutOrder" logging.getLogger().addFilter(_fuzzy_log_filter) def safe_delete(pth: Path): if pth.exists(): for root, dirs, files in os.walk(str(pth)): os.chmod(root, 0o755) for d in dirs: os.chmod(os.path.join(root, d), 0o755) for f in files: os.chmod(os.path.join(root, f), 0o755) shutil.rmtree(str(pth), ignore_errors=True) class AsyncFilter(AsyncIterator[_T], Awaitable[List[_T]]): """Class returned by `async_filter`. See that function for details. We don't recommend instantiating this class directly. """ def __init__( self, func: Callable[[_T], Union[bool, Awaitable[bool]]], iterable: Union[AsyncIterable[_T], Iterable[_T]], ) -> None: self.__func: Callable[[_T], Union[bool, Awaitable[bool]]] = func self.__iterable: Union[AsyncIterable[_T], Iterable[_T]] = iterable # We assign the generator strategy based on the arguments' types if isinstance(iterable, AsyncIterable): if asyncio.iscoroutinefunction(func): self.__generator_instance = self.__async_generator_async_pred() else: self.__generator_instance = self.__async_generator_sync_pred() elif asyncio.iscoroutinefunction(func): self.__generator_instance = self.__sync_generator_async_pred() else: raise TypeError("Must be either an async predicate, an async iterable, or both.") async def __sync_generator_async_pred(self) -> AsyncIterator[_T]: for item in self.__iterable: if await self.__func(item): yield item async def __async_generator_sync_pred(self) -> AsyncIterator[_T]: async for item in self.__iterable: if self.__func(item): yield item async def __async_generator_async_pred(self) -> AsyncIterator[_T]: async for item in self.__iterable: if await self.__func(item): yield item async def __flatten(self) -> List[_T]: return [item async for item in self] def __await__(self): # Simply return the generator filled into a list return self.__flatten().__await__() def __anext__(self) -> Awaitable[_T]: # This will use the generator strategy set in __init__ return self.__generator_instance.__anext__() def async_filter( func: Callable[[_T], Union[bool, Awaitable[bool]]], iterable: Union[AsyncIterable[_T], Iterable[_T]], ) -> AsyncFilter[_T]: """Filter an (optionally async) iterable with an (optionally async) predicate. At least one of the arguments must be async. Parameters ---------- func : Callable[[T], Union[bool, Awaitable[bool]]] A function or coroutine function which takes one item of ``iterable`` as an argument, and returns ``True`` or ``False``. iterable : Union[AsyncIterable[_T], Iterable[_T]] An iterable or async iterable which is to be filtered. Raises ------ TypeError If neither of the arguments are async. Returns ------- AsyncFilter[T] An object which can either be awaited to yield a list of the filtered items, or can also act as an async iterator to yield items one by one. """ return AsyncFilter(func, iterable) async def async_enumerate( async_iterable: AsyncIterable[_T], start: int = 0 ) -> AsyncIterator[Tuple[int, _T]]: """Async iterable version of `enumerate`. Parameters ---------- async_iterable : AsyncIterable[T] The iterable to enumerate. start : int The index to start from. Defaults to 0. Returns ------- AsyncIterator[Tuple[int, T]] An async iterator of tuples in the form of ``(index, item)``. """ async for item in async_iterable: yield start, item start += 1 async def fuzzy_command_search( ctx: commands.Context, term: Optional[str] = None, *, min_score: int = 80 ) -> Optional[List[commands.Command]]: """Search for commands which are similar in name to the one invoked. Returns a maximum of 5 commands which must all be at least matched greater than ``min_score``. Parameters ---------- ctx : `commands.Context <anbot.core.commands.Context>` The command invocation context. term : Optional[str] The name of the invoked command. If ``None``, `Context.invoked_with` will be used instead. min_score : int The minimum score for matched commands to reach. Defaults to 80. Returns ------- Optional[List[`commands.Command <anbot.core.commands.Command>`]] A list of commands which were fuzzily matched with the invoked command. """ if ctx.guild is not None: enabled = await ctx.bot.db.guild(ctx.guild).fuzzy() else: enabled = await ctx.bot.db.fuzzy() if not enabled: return if term is None: term = ctx.invoked_with # If the term is an alias or CC, we don't want to send a supplementary fuzzy search. alias_cog = ctx.bot.get_cog("Alias") if alias_cog is not None: is_alias, alias = await alias_cog.is_alias(ctx.guild, term) if is_alias: return customcom_cog = ctx.bot.get_cog("CustomCommands") if customcom_cog is not None: cmd_obj = customcom_cog.commandobj try: await cmd_obj.get(ctx.message, term) except: pass else: return # Do the scoring. `extracted` is a list of tuples in the form `(command, score)` extracted = process.extract(term, ctx.bot.walk_commands(), limit=5, scorer=fuzz.QRatio) if not extracted: return # Filter through the fuzzy-matched commands. matched_commands = [] for command, score in extracted: if score < min_score: # Since the list is in decreasing order of score, we can exit early. break if await command.can_see(ctx): matched_commands.append(command) return matched_commands async def format_fuzzy_results( ctx: commands.Context, matched_commands: List[commands.Command], *, embed: Optional[bool] = None, ) -> Union[str, discord.Embed]: """Format the result of a fuzzy command search. Parameters ---------- ctx : `commands.Context <anbot.core.commands.Context>` The context in which this result is being displayed. matched_commands : List[`commands.Command <anbot.core.commands.Command>`] A list of commands which have been matched by the fuzzy search, sorted in order of decreasing similarity. embed : bool Whether or not the result should be an embed. If set to ``None``, this will default to the result of `ctx.embed_requested`. Returns ------- Union[str, discord.Embed] The formatted results. """ if embed is not False and (embed is True or await ctx.embed_requested()): lines = [] for cmd in matched_commands: lines.append(f"**{ctx.clean_prefix}{cmd.qualified_name}** {cmd.short_doc}") return discord.Embed( title="Perhaps you wanted one of these?", colour=await ctx.embed_colour(), description="\n".join(lines), ) else: lines = [] for cmd in matched_commands: lines.append(f"{ctx.clean_prefix}{cmd.qualified_name} -- {cmd.short_doc}") return "Perhaps you wanted one of these? " + box("\n".join(lines), lang="vhdl") async def _sem_wrapper(sem, task): async with sem: return await task def bounded_gather_iter( *coros_or_futures, loop: Optional[AbstractEventLoop] = None, limit: int = 4, semaphore: Optional[Semaphore] = None, ) -> Iterator[Awaitable[Any]]: """ An iterator that returns tasks as they are ready, but limits the number of tasks running at a time. Parameters ---------- *coros_or_futures The awaitables to run in a bounded concurrent fashion. loop : asyncio.AbstractEventLoop The event loop to use for the semaphore and :meth:`asyncio.gather`. limit : Optional[`int`] The maximum number of concurrent tasks. Used when no ``semaphore`` is passed. semaphore : Optional[:class:`asyncio.Semaphore`] The semaphore to use for bounding tasks. If `None`, create one using ``loop`` and ``limit``. Raises ------ TypeError When invalid parameters are passed """ if loop is None: loop = asyncio.get_event_loop() if semaphore is None: if not isinstance(limit, int) or limit <= 0: raise TypeError("limit must be an int > 0") semaphore = Semaphore(limit, loop=loop) pending = [] for cof in coros_or_futures: if isfuture(cof) and cof._loop is not loop: raise ValueError("futures are tied to different event loops") cof = _sem_wrapper(semaphore, cof) pending.append(cof) return as_completed(pending, loop=loop) def bounded_gather( *coros_or_futures, loop: Optional[AbstractEventLoop] = None, return_exceptions: bool = False, limit: int = 4, semaphore: Optional[Semaphore] = None, ) -> Awaitable[List[Any]]: """ A semaphore-bounded wrapper to :meth:`asyncio.gather`. Parameters ---------- *coros_or_futures The awaitables to run in a bounded concurrent fashion. loop : asyncio.AbstractEventLoop The event loop to use for the semaphore and :meth:`asyncio.gather`. return_exceptions : bool If true, gather exceptions in the result list instead of raising. limit : Optional[`int`] The maximum number of concurrent tasks. Used when no ``semaphore`` is passed. semaphore : Optional[:class:`asyncio.Semaphore`] The semaphore to use for bounding tasks. If `None`, create one using ``loop`` and ``limit``. Raises ------ TypeError When invalid parameters are passed """ if loop is None: loop = asyncio.get_event_loop() if semaphore is None: if not isinstance(limit, int) or limit <= 0: raise TypeError("limit must be an int > 0") semaphore = Semaphore(limit, loop=loop) tasks = (_sem_wrapper(semaphore, task) for task in coros_or_futures) return asyncio.gather(*tasks, loop=loop, return_exceptions=return_exceptions)
AN-DiscordBot
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/__init__.py
__init__.py
import itertools from typing import Sequence, Iterator, List import discord def error(text: str) -> str: """Get text prefixed with an error emoji. Returns ------- str The new message. """ return "\N{NO ENTRY SIGN} {}".format(text) def warning(text: str) -> str: """Get text prefixed with a warning emoji. Returns ------- str The new message. """ return "\N{WARNING SIGN} {}".format(text) def info(text: str) -> str: """Get text prefixed with an info emoji. Returns ------- str The new message. """ return "\N{INFORMATION SOURCE} {}".format(text) def question(text: str) -> str: """Get text prefixed with a question emoji. Returns ------- str The new message. """ return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text) def bold(text: str) -> str: """Get the given text in bold. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. """ text = escape(text, formatting=True) return "**{}**".format(text) def box(text: str, lang: str = "") -> str: """Get the given text in a code block. Parameters ---------- text : str The text to be marked up. lang : `str`, optional The syntax highlighting language for the codeblock. Returns ------- str The marked up text. """ ret = "```{}\n{}\n```".format(lang, text) return ret def inline(text: str) -> str: """Get the given text as inline code. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. """ if "`" in text: return "``{}``".format(text) else: return "`{}`".format(text) def italics(text: str) -> str: """Get the given text in italics. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. """ text = escape(text, formatting=True) return "*{}*".format(text) def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str: """Get two blocks of text in a borders. Note ---- This will only work with a monospaced font. Parameters ---------- *columns : `sequence` of `str` The columns of text, each being a list of lines in that column. ascii_border : bool Whether or not the border should be pure ASCII. Returns ------- str The bordered text. """ borders = { "TL": "-" if ascii_border else "┌", # Top-left "TR": "-" if ascii_border else "┐", # Top-right "BL": "-" if ascii_border else "└", # Bottom-left "BR": "-" if ascii_border else "┘", # Bottom-right "HZ": "-" if ascii_border else "─", # Horizontal "VT": "|" if ascii_border else "│", # Vertical } sep = " " * 4 # Separator between boxes widths = tuple(max(len(row) for row in column) + 9 for column in columns) # width of each col colsdone = [False] * len(columns) # whether or not each column is done lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)] for line in itertools.zip_longest(*columns): row = [] for colidx, column in enumerate(line): width = widths[colidx] done = colsdone[colidx] if column is None: if not done: # bottom border of column column = "{HZ}" * width row.append("{BL}" + column + "{BR}") colsdone[colidx] = True # mark column as done else: # leave empty row.append(" " * (width + 2)) else: column += " " * (width - len(column)) # append padded spaces row.append("{VT}" + column + "{VT}") lines.append(sep.join(row)) final_row = [] for width, done in zip(widths, colsdone): if not done: final_row.append("{BL}" + "{HZ}" * width + "{BR}") else: final_row.append(" " * (width + 2)) lines.append(sep.join(final_row)) return "\n".join(lines).format(**borders) def pagify( text: str, delims: Sequence[str] = ["\n"], *, priority: bool = False, escape_mass_mentions: bool = True, shorten_by: int = 8, page_length: int = 2000 ) -> Iterator[str]: """Generate multiple pages from the given text. Note ---- This does not respect code blocks or inline code. Parameters ---------- text : str The content to pagify and send. delims : `sequence` of `str`, optional Characters where page breaks will occur. If no delimiters are found in a page, the page will break after ``page_length`` characters. By default this only contains the newline. Other Parameters ---------------- priority : `bool` Set to :code:`True` to choose the page break delimiter based on the order of ``delims``. Otherwise, the page will always break at the last possible delimiter. escape_mass_mentions : `bool` If :code:`True`, any mass mentions (here or everyone) will be silenced. shorten_by : `int` How much to shorten each page by. Defaults to 8. page_length : `int` The maximum length of each page. Defaults to 2000. Yields ------ `str` Pages of the given text. """ in_text = text page_length -= shorten_by while len(in_text) > page_length: this_page_len = page_length if escape_mass_mentions: this_page_len -= in_text.count("@here", 0, page_length) + in_text.count( "@everyone", 0, page_length ) closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims) if priority: closest_delim = next((x for x in closest_delim if x > 0), -1) else: closest_delim = max(closest_delim) closest_delim = closest_delim if closest_delim != -1 else this_page_len if escape_mass_mentions: to_send = escape(in_text[:closest_delim], mass_mentions=True) else: to_send = in_text[:closest_delim] if len(to_send.strip()) > 0: yield to_send in_text = in_text[closest_delim:] if len(in_text.strip()) > 0: if escape_mass_mentions: yield escape(in_text, mass_mentions=True) else: yield in_text def strikethrough(text: str) -> str: """Get the given text with a strikethrough. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. """ text = escape(text, formatting=True) return "~~{}~~".format(text) def underline(text: str) -> str: """Get the given text with an underline. Parameters ---------- text : str The text to be marked up. Returns ------- str The marked up text. """ text = escape(text, formatting=True) return "__{}__".format(text) def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str: """Get text with all mass mentions or markdown escaped. Parameters ---------- text : str The text to be escaped. mass_mentions : `bool`, optional Set to :code:`True` to escape mass mentions in the text. formatting : `bool`, optional Set to :code:`True` to escpae any markdown formatting in the text. Returns ------- str The escaped text. """ if mass_mentions: text = text.replace("@everyone", "@\u200beveryone") text = text.replace("@here", "@\u200bhere") if formatting: text = text.replace("`", "\\`").replace("*", "\\*").replace("_", "\\_").replace("~", "\\~") return text def humanize_list(items: Sequence[str]) -> str: """Get comma-separted list, with the last element joined with *and*. This uses an Oxford comma, because without one, items containing the word *and* would make the output difficult to interpret. Parameters ---------- items : Sequence[str] The items of the list to join together. Examples -------- .. testsetup:: from redbot.core.utils.chat_formatting import humanize_list .. doctest:: >>> humanize_list(['One', 'Two', 'Three']) 'One, Two, and Three' >>> humanize_list(['One']) 'One' """ if len(items) == 1: return items[0] return ", ".join(items[:-1]) + ", and " + items[-1] def format_perms_list(perms: discord.Permissions) -> str: """Format a list of permission names. This will return a humanized list of the names of all enabled permissions in the provided `discord.Permissions` object. Parameters ---------- perms : discord.Permissions The permissions object with the requested permissions to list enabled. Returns ------- str The humanized list. """ perm_names: List[str] = [] for perm, value in perms: if value is True: perm_name = '"' + perm.replace("_", " ").title() + '"' perm_names.append(perm_name) return humanize_list(perm_names).replace("Guild", "Server")
AN-DiscordBot
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/chat_formatting.py
chat_formatting.py
import asyncio import contextlib from typing import Iterable, List import discord from discord.ext import commands from .requires import PermState from ..utils.chat_formatting import box from ..utils.predicates import MessagePredicate from ..utils import common_filters TICK = "\N{WHITE HEAVY CHECK MARK}" __all__ = ["Context"] class Context(commands.Context): """Command invocation context for Red. All context passed into commands will be of this type. This class inherits from `discord.ext.commands.Context`. """ def __init__(self, **attrs): super().__init__(**attrs) self.permission_state: PermState = PermState.NORMAL async def send(self, content=None, **kwargs): """Sends a message to the destination with the content given. This acts the same as `discord.ext.commands.Context.send`, with one added keyword argument as detailed below in *Other Parameters*. Parameters ---------- content : str The content of the message to send. Other Parameters ---------------- filter : Callable[`str`] -> `str` A function which is used to sanitize the ``content`` before it is sent. Defaults to :func:`~redbot.core.utils.common_filters.filter_mass_mentions`. This must take a single `str` as an argument, and return the sanitized `str`. \*\*kwargs See `discord.ext.commands.Context.send`. Returns ------- discord.Message The message that was sent. """ _filter = kwargs.pop("filter", common_filters.filter_mass_mentions) if _filter and content: content = _filter(str(content)) return await super().send(content=content, **kwargs) async def send_help(self) -> List[discord.Message]: """Send the command help message. Returns ------- `list` of `discord.Message` A list of help messages which were sent to the user. """ command = self.invoked_subcommand or self.command embed_wanted = await self.bot.embed_requested( self.channel, self.author, command=self.bot.get_command("help") ) if self.guild and not self.channel.permissions_for(self.guild.me).embed_links: embed_wanted = False ret = [] destination = self if embed_wanted: embeds = await self.bot.formatter.format_help_for(self, command) for embed in embeds: try: m = await destination.send(embed=embed) except discord.HTTPException: destination = self.author m = await destination.send(embed=embed) ret.append(m) else: f = commands.HelpFormatter() msgs = await f.format_help_for(self, command) for msg in msgs: try: m = await destination.send(msg) except discord.HTTPException: destination = self.author m = await destination.send(msg) ret.append(m) return ret async def tick(self) -> bool: """Add a tick reaction to the command message. Returns ------- bool :code:`True` if adding the reaction succeeded. """ try: await self.message.add_reaction(TICK) except discord.HTTPException: return False else: return True async def send_interactive( self, messages: Iterable[str], box_lang: str = None, timeout: int = 15 ) -> List[discord.Message]: """Send multiple messages interactively. The user will be prompted for whether or not they would like to view the next message, one at a time. They will also be notified of how many messages are remaining on each prompt. Parameters ---------- messages : `iterable` of `str` The messages to send. box_lang : str If specified, each message will be contained within a codeblock of this language. timeout : int How long the user has to respond to the prompt before it times out. After timing out, the bot deletes its prompt message. """ messages = tuple(messages) ret = [] for idx, page in enumerate(messages, 1): if box_lang is None: msg = await self.send(page) else: msg = await self.send(box(page, lang=box_lang)) ret.append(msg) n_remaining = len(messages) - idx if n_remaining > 0: if n_remaining == 1: plural = "" is_are = "is" else: plural = "s" is_are = "are" query = await self.send( "There {} still {} message{} remaining. " "Type `more` to continue." "".format(is_are, n_remaining, plural) ) try: resp = await self.bot.wait_for( "message", check=MessagePredicate.lower_equal_to("more", self), timeout=timeout, ) except asyncio.TimeoutError: with contextlib.suppress(discord.HTTPException): await query.delete() break else: try: await self.channel.delete_messages((query, resp)) except (discord.HTTPException, AttributeError): # In case the bot can't delete other users' messages, # or is not a bot account # or channel is a DM with contextlib.suppress(discord.HTTPException): await query.delete() return ret async def embed_colour(self): """ Helper function to get the colour for an embed. Returns ------- discord.Colour: The colour to be used """ if self.guild and await self.bot.db.guild(self.guild).use_bot_color(): return self.guild.me.color else: return self.bot.color @property def embed_color(self): # Rather than double awaiting. return self.embed_colour async def embed_requested(self): """ Simple helper to call bot.embed_requested with logic around if embed permissions are available Returns ------- bool: :code:`True` if an embed is requested """ if self.guild and not self.channel.permissions_for(self.guild.me).embed_links: return False return await self.bot.embed_requested(self.channel, self.author, command=self.command) async def maybe_send_embed(self, message: str) -> discord.Message: """ Simple helper to send a simple message to context without manually checking ctx.embed_requested This should only be used for simple messages. Parameters ---------- message: `str` The string to send Returns ------- discord.Message: the message which was sent Raises ------ discord.Forbidden see `discord.abc.Messageable.send` discord.HTTPException see `discord.abc.Messageable.send` """ if await self.embed_requested(): return await self.send( embed=discord.Embed(description=message, color=(await self.embed_colour())) ) else: return await self.send(message) @property def clean_prefix(self) -> str: """str: The command prefix, but a mention prefix is displayed nicer.""" me = self.me return self.prefix.replace(me.mention, f"@{me.display_name}") @property def me(self) -> discord.abc.User: """discord.abc.User: The bot member or user object. If the context is DM, this will be a `discord.User` object. """ if self.guild is not None: return self.guild.me else: return self.bot.user
AN-DiscordBot
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/commands/context.py
context.py
import inspect import weakref from typing import Awaitable, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING import discord from discord.ext import commands from . import converter as converters from .errors import ConversionFailure from .requires import PermState, PrivilegeLevel, Requires from ..i18n import Translator if TYPE_CHECKING: from .context import Context __all__ = [ "Cog", "CogCommandMixin", "CogGroupMixin", "Command", "Group", "GroupMixin", "command", "group", ] _ = Translator("commands.commands", __file__) class CogCommandMixin: """A mixin for cogs and commands.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if isinstance(self, Command): decorated = self.callback else: decorated = self self.requires: Requires = Requires( privilege_level=getattr( decorated, "__requires_privilege_level__", PrivilegeLevel.NONE ), user_perms=getattr(decorated, "__requires_user_perms__", {}), bot_perms=getattr(decorated, "__requires_bot_perms__", {}), checks=getattr(decorated, "__requires_checks__", []), ) def allow_for(self, model_id: Union[int, str], guild_id: int) -> None: """Actively allow this command for the given model. Parameters ---------- model_id : Union[int, str] Must be an `int` if supplying an ID. `str` is only valid for "default". guild_id : int The guild ID to allow this cog or command in. For global rules, use ``0``. """ self.requires.set_rule(model_id, PermState.ACTIVE_ALLOW, guild_id=guild_id) def deny_to(self, model_id: Union[int, str], guild_id: int) -> None: """Actively deny this command to the given model. Parameters ---------- model_id : Union[int, str] Must be an `int` if supplying an ID. `str` is only valid for "default". guild_id : int The guild ID to deny this cog or command in. For global rules, use ``0``. """ cur_rule = self.requires.get_rule(model_id, guild_id=guild_id) if cur_rule is PermState.PASSIVE_ALLOW: self.requires.set_rule(model_id, PermState.CAUTIOUS_ALLOW, guild_id=guild_id) else: self.requires.set_rule(model_id, PermState.ACTIVE_DENY, guild_id=guild_id) def clear_rule_for( self, model_id: Union[int, str], guild_id: int ) -> Tuple[PermState, PermState]: """Clear the rule which is currently set for this model. Parameters ---------- model_id : Union[int, str] Must be an `int` if supplying an ID. `str` is only valid for "default". guild_id : int The guild ID. For global rules, use ``0``. """ cur_rule = self.requires.get_rule(model_id, guild_id=guild_id) if cur_rule is PermState.ACTIVE_ALLOW: new_rule = PermState.NORMAL elif cur_rule is PermState.ACTIVE_DENY: new_rule = PermState.NORMAL elif cur_rule is PermState.CAUTIOUS_ALLOW: new_rule = PermState.PASSIVE_ALLOW else: return cur_rule, cur_rule self.requires.set_rule(model_id, new_rule, guild_id=guild_id) return cur_rule, new_rule def set_default_rule(self, rule: Optional[bool], guild_id: int) -> None: """Set the default rule for this cog or command. Parameters ---------- rule : Optional[bool] The rule to set as default. If ``True`` for allow, ``False`` for deny and ``None`` for normal. guild_id : int The guild to set the default rule in. When ``0``, this will set the global default rule. """ if rule is None: self.clear_rule_for(Requires.DEFAULT, guild_id=guild_id) elif rule is True: self.allow_for(Requires.DEFAULT, guild_id=guild_id) elif rule is False: self.deny_to(Requires.DEFAULT, guild_id=guild_id) class Command(CogCommandMixin, commands.Command): """Command class for AN. This should not be created directly, and instead via the decorator. This class inherits from `discord.ext.commands.Command`. The attributes listed below are simply additions to the ones listed with that class. Attributes ---------- checks : List[`coroutine function`] A list of check predicates which cannot be overridden, unlike `Requires.checks`. translator : Translator A translator for this command's help docstring. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._help_override = kwargs.pop("help_override", None) self.translator = kwargs.pop("i18n", None) @property def help(self): """Help string for this command. If the :code:`help` kwarg was passed into the decorator, it will default to that. If not, it will attempt to translate the docstring of the command's callback function. """ if self._help_override is not None: return self._help_override if self.translator is None: translator = lambda s: s else: translator = self.translator command_doc = self.callback.__doc__ if command_doc is None: return "" return inspect.cleandoc(translator(command_doc)) @help.setter def help(self, value): # We don't want our help property to be overwritten, namely by super() pass @property def parents(self) -> List["Group"]: """List[commands.Group] : Returns all parent commands of this command. This is sorted by the length of :attr:`.qualified_name` from highest to lowest. If the command has no parents, this will be an empty list. """ cmd = self.parent entries = [] while cmd is not None: entries.append(cmd) cmd = cmd.parent return sorted(entries, key=lambda x: len(x.qualified_name), reverse=True) # noinspection PyMethodOverriding async def can_run( self, ctx: "Context", *, check_all_parents: bool = False, change_permission_state: bool = False, ) -> bool: """Check if this command can be run in the given context. This function first checks if the command can be run using discord.py's method `discord.ext.commands.Command.can_run`, then will return the result of `Requires.verify`. Keyword Arguments ----------------- check_all_parents : bool If ``True``, this will check permissions for all of this command's parents and its cog as well as the command itself. Defaults to ``False``. change_permission_state : bool Whether or not the permission state should be changed as a result of this call. For most cases this should be ``False``. Defaults to ``False``. """ ret = await super().can_run(ctx) if ret is False: return False # This is so contexts invoking other commands can be checked with # this command as well original_command = ctx.command original_state = ctx.permission_state ctx.command = self if check_all_parents is True: # Since we're starting from the beginning, we should reset the state to normal ctx.permission_state = PermState.NORMAL for parent in reversed(self.parents): try: result = await parent.can_run(ctx, change_permission_state=True) except commands.CommandError: result = False if result is False: return False if self.parent is None and self.instance is not None: # For top-level commands, we need to check the cog's requires too ret = await self.instance.requires.verify(ctx) if ret is False: return False try: return await self.requires.verify(ctx) finally: ctx.command = original_command if not change_permission_state: ctx.permission_state = original_state async def _verify_checks(self, ctx): if not self.enabled: raise commands.DisabledCommand(f"{self.name} command is disabled") if not (await self.can_run(ctx, change_permission_state=True)): raise commands.CheckFailure( f"The check functions for command {self.qualified_name} failed." ) async def do_conversion( self, ctx: "Context", converter, argument: str, param: inspect.Parameter ): """Convert an argument according to its type annotation. Raises ------ ConversionFailure If doing the conversion failed. Returns ------- Any The converted argument. """ # Let's not worry about all of this junk if it's just a str converter if converter is str: return argument try: return await super().do_conversion(ctx, converter, argument, param) except commands.BadArgument as exc: raise ConversionFailure(converter, argument, param, *exc.args) from exc except ValueError as exc: # Some common converters need special treatment... if converter in (int, float): message = _('"{argument}" is not a number.').format(argument=argument) raise ConversionFailure(converter, argument, param, message) from exc # We should expose anything which might be a bug in the converter raise exc async def can_see(self, ctx: "Context"): """Check if this command is visible in the given context. In short, this will verify whether the user can run the command, and also whether the command is hidden or not. Parameters ---------- ctx : `Context` The invocation context to check with. Returns ------- bool ``True`` if this command is visible in the given context. """ for cmd in (self, *self.parents): if cmd.hidden: return False try: can_run = await self.can_run( ctx, check_all_parents=True, change_permission_state=False ) except commands.CheckFailure: return False else: if can_run is False: return False return True def disable_in(self, guild: discord.Guild) -> bool: """Disable this command in the given guild. Parameters ---------- guild : discord.Guild The guild to disable the command in. Returns ------- bool ``True`` if the command wasn't already disabled. """ disabler = get_command_disabler(guild) if disabler in self.checks: return False else: self.checks.append(disabler) return True def enable_in(self, guild: discord.Guild) -> bool: """Enable this command in the given guild. Parameters ---------- guild : discord.Guild The guild to enable the command in. Returns ------- bool ``True`` if the command wasn't already enabled. """ disabler = get_command_disabler(guild) try: self.checks.remove(disabler) except ValueError: return False else: return True def allow_for(self, model_id: Union[int, str], guild_id: int) -> None: super().allow_for(model_id, guild_id=guild_id) parents = self.parents if self.instance is not None: parents.append(self.instance) for parent in parents: cur_rule = parent.requires.get_rule(model_id, guild_id=guild_id) if cur_rule is PermState.NORMAL: parent.requires.set_rule(model_id, PermState.PASSIVE_ALLOW, guild_id=guild_id) elif cur_rule is PermState.ACTIVE_DENY: parent.requires.set_rule(model_id, PermState.CAUTIOUS_ALLOW, guild_id=guild_id) def clear_rule_for( self, model_id: Union[int, str], guild_id: int ) -> Tuple[PermState, PermState]: old_rule, new_rule = super().clear_rule_for(model_id, guild_id=guild_id) if old_rule is PermState.ACTIVE_ALLOW: parents = self.parents if self.instance is not None: parents.append(self.instance) for parent in parents: should_continue = parent.reevaluate_rules_for(model_id, guild_id=guild_id)[1] if not should_continue: break return old_rule, new_rule class GroupMixin(discord.ext.commands.GroupMixin): """Mixin for `Group` and `AN` classes. This class inherits from :class:`discord.ext.commands.GroupMixin`. """ def command(self, *args, **kwargs): """A shortcut decorator that invokes :func:`.command` and adds it to the internal command list. """ def decorator(func): result = command(*args, **kwargs)(func) self.add_command(result) return result return decorator def group(self, *args, **kwargs): """A shortcut decorator that invokes :func:`.group` and adds it to the internal command list. """ def decorator(func): result = group(*args, **kwargs)(func) self.add_command(result) return result return decorator class CogGroupMixin: requires: Requires all_commands: Dict[str, Command] def reevaluate_rules_for( self, model_id: Union[str, int], guild_id: Optional[int] ) -> Tuple[PermState, bool]: """Re-evaluate a rule by checking subcommand rules. This is called when a subcommand is no longer actively allowed. Parameters ---------- model_id : Union[int, str] Must be an `int` if supplying an ID. `str` is only valid for "default". guild_id : int The guild ID. For global rules, use ``0``. Returns ------- Tuple[PermState, bool] A 2-tuple containing the new rule and a bool indicating whether or not the rule was changed as a result of this call. """ cur_rule = self.requires.get_rule(model_id, guild_id=guild_id) if cur_rule in (PermState.NORMAL, PermState.ACTIVE_ALLOW, PermState.ACTIVE_DENY): # These three states are unaffected by subcommand rules return cur_rule, False else: # Remaining states can be changed if there exists no actively-allowed # subcommand (this includes subcommands multiple levels below) if any( cmd.requires.get_rule(model_id, guild_id=guild_id) in PermState.ALLOWED_STATES for cmd in self.all_commands.values() ): return cur_rule, False elif cur_rule is PermState.PASSIVE_ALLOW: self.requires.set_rule(model_id, PermState.NORMAL, guild_id=guild_id) return PermState.NORMAL, True elif cur_rule is PermState.CAUTIOUS_ALLOW: self.requires.set_rule(model_id, PermState.ACTIVE_DENY, guild_id=guild_id) return PermState.ACTIVE_DENY, True class Group(GroupMixin, Command, CogGroupMixin, commands.Group): """Group command class for AN. This class inherits from `Command`, with :class:`GroupMixin` and `discord.ext.commands.Group` mixed in. """ def __init__(self, *args, **kwargs): self.autohelp = kwargs.pop("autohelp", True) super().__init__(*args, **kwargs) async def invoke(self, ctx: "Context"): view = ctx.view previous = view.index view.skip_ws() trigger = view.get_word() if trigger: ctx.subcommand_passed = trigger ctx.invoked_subcommand = self.all_commands.get(trigger, None) view.index = previous view.previous = previous if ctx.invoked_subcommand is None or self == ctx.invoked_subcommand: if self.autohelp and not self.invoke_without_command: await self._verify_checks(ctx) await ctx.send_help() elif self.invoke_without_command: # So invoke_without_command when a subcommand of this group is invoked # will skip the the invokation of *this* command. However, because of # how our permissions system works, we don't want it to skip the checks # as well. await self._verify_checks(ctx) await super().invoke(ctx) class Cog(CogCommandMixin, CogGroupMixin): """Base class for a cog.""" @property def all_commands(self) -> Dict[str, Command]: return {cmd.name: cmd for cmd in self.__dict__.values() if isinstance(cmd, Command)} def command(name=None, cls=Command, **attrs): """A decorator which transforms an async function into a `Command`. Same interface as `discord.ext.commands.command`. """ attrs["help_override"] = attrs.pop("help", None) return commands.command(name, cls, **attrs) def group(name=None, **attrs): """A decorator which transforms an async function into a `Group`. Same interface as `discord.ext.commands.group`. """ return command(name, cls=Group, **attrs) __command_disablers = weakref.WeakValueDictionary() def get_command_disabler(guild: discord.Guild) -> Callable[["Context"], Awaitable[bool]]: """Get the command disabler for a guild. A command disabler is a simple check predicate which returns ``False`` if the context is within the given guild. """ try: return __command_disablers[guild] except KeyError: async def disabler(ctx: "Context") -> bool: if ctx.guild == guild: raise commands.DisabledCommand() return True __command_disablers[guild] = disabler return disabler
AN-DiscordBot
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/commands/commands.py
commands.py
import asyncio import enum from typing import ( Union, Optional, List, Callable, Awaitable, Dict, Any, TYPE_CHECKING, TypeVar, Tuple, ClassVar, ) import discord from .converter import GuildConverter from .errors import BotMissingPermissions if TYPE_CHECKING: from .commands import Command from .context import Context _CommandOrCoro = TypeVar("_CommandOrCoro", Callable[..., Awaitable[Any]], Command) __all__ = [ "CheckPredicate", "DM_PERMS", "GlobalPermissionModel", "GuildPermissionModel", "PermissionModel", "PrivilegeLevel", "PermState", "Requires", "permissions_check", "bot_has_permissions", "has_permissions", "is_owner", "guildowner", "guildowner_or_permissions", "admin", "admin_or_permissions", "mod", "mod_or_permissions", ] _T = TypeVar("_T") GlobalPermissionModel = Union[ discord.User, discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel, discord.Role, GuildConverter, # Unfortunately this will have to do for now ] GuildPermissionModel = Union[ discord.Member, discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel, discord.Role, GuildConverter, ] PermissionModel = Union[GlobalPermissionModel, GuildPermissionModel] CheckPredicate = Callable[["Context"], Union[Optional[bool], Awaitable[Optional[bool]]]] # Here we are trying to model DM permissions as closely as possible. The only # discrepancy I've found is that users can pin messages, but they cannot delete them. # This means manage_messages is only half True, so it's left as False. # This is also the same as the permissions returned when `permissions_for` is used in DM. DM_PERMS = discord.Permissions.none() DM_PERMS.update( add_reactions=True, attach_files=True, embed_links=True, external_emojis=True, mention_everyone=True, read_message_history=True, read_messages=True, send_messages=True, ) class PrivilegeLevel(enum.IntEnum): """Enumeration for special privileges.""" # Maintainer Note: do NOT re-order these. # Each privelege level also implies access to the ones before it. # Inserting new privelege levels at a later point is fine if that is considered. NONE = enum.auto() """No special privilege level.""" MOD = enum.auto() """User has the mod role.""" ADMIN = enum.auto() """User has the admin role.""" GUILD_OWNER = enum.auto() """User is the guild level.""" BOT_OWNER = enum.auto() """User is a bot owner.""" @classmethod async def from_ctx(cls, ctx: "Context") -> "PrivilegeLevel": """Get a command author's PrivilegeLevel based on context.""" if await ctx.bot.is_owner(ctx.author): return cls.BOT_OWNER elif ctx.guild is None: return cls.NONE elif ctx.author == ctx.guild.owner: return cls.GUILD_OWNER # The following is simply an optimised way to check if the user has the # admin or mod role. guild_settings = ctx.bot.db.guild(ctx.guild) admin_role_id = await guild_settings.admin_role() mod_role_id = await guild_settings.mod_role() is_mod = False for role in ctx.author.roles: if role.id == admin_role_id: return cls.ADMIN elif role.id == mod_role_id: is_mod = True if is_mod: return cls.MOD return cls.NONE def __repr__(self) -> str: return f"<{self.__class__.__name__}.{self.name}>" class PermState(enum.Enum): """Enumeration for permission states used by rules.""" ACTIVE_ALLOW = enum.auto() """This command has been actively allowed, default user checks should be ignored. """ NORMAL = enum.auto() """No overrides have been set for this command, make determination from default user checks. """ PASSIVE_ALLOW = enum.auto() """There exists a subcommand in the `ACTIVE_ALLOW` state, continue down the subcommand tree until we either find it or realise we're on the wrong branch. """ CAUTIOUS_ALLOW = enum.auto() """This command has been actively denied, but there exists a subcommand in the `ACTIVE_ALLOW` state. This occurs when `PASSIVE_ALLOW` and `ACTIVE_DENY` are combined. """ ACTIVE_DENY = enum.auto() """This command has been actively denied, terminate the command chain. """ # The below are valid states, but should not be transitioned to # They should be set if they apply. ALLOWED_BY_HOOK = enum.auto() """This command has been actively allowed by a permission hook. check validation doesn't need this, but is useful to developers""" DENIED_BY_HOOK = enum.auto() """This command has been actively denied by a permission hook check validation doesn't need this, but is useful to developers""" def transition_to( self, next_state: "PermState" ) -> Tuple[Optional[bool], Union["PermState", Dict[bool, "PermState"]]]: return self.TRANSITIONS[self][next_state] @classmethod def from_bool(cls, value: Optional[bool]) -> "PermState": """Get a PermState from a bool or ``NoneType``.""" if value is True: return cls.ACTIVE_ALLOW elif value is False: return cls.ACTIVE_DENY else: return cls.NORMAL def __repr__(self) -> str: return f"<{self.__class__.__name__}.{self.name}>" # Here we're defining how we transition between states. # The dict is in the form: # previous state -> this state -> Tuple[override, next state] # "override" is a bool describing whether or not the command should be # invoked. It can be None, in which case the default permission checks # will be used instead. # There is also one case where the "next state" is dependent on the # result of the default permission checks - the transition from NORMAL # to PASSIVE_ALLOW. In this case "next state" is a dict mapping the # permission check results to the actual next state. PermState.TRANSITIONS = { PermState.ACTIVE_ALLOW: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (True, PermState.ACTIVE_ALLOW), PermState.PASSIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.NORMAL: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (None, PermState.NORMAL), PermState.PASSIVE_ALLOW: (True, {True: PermState.NORMAL, False: PermState.PASSIVE_ALLOW}), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.PASSIVE_ALLOW: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (False, PermState.NORMAL), PermState.PASSIVE_ALLOW: (True, PermState.PASSIVE_ALLOW), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.CAUTIOUS_ALLOW: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (False, PermState.ACTIVE_DENY), PermState.PASSIVE_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.ACTIVE_DENY: { # We can only start from ACTIVE_DENY if it is set on a cog. PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), # Should never happen PermState.NORMAL: (False, PermState.ACTIVE_DENY), PermState.PASSIVE_ALLOW: (False, PermState.ACTIVE_DENY), # Should never happen PermState.CAUTIOUS_ALLOW: (False, PermState.ACTIVE_DENY), # Should never happen PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, } PermState.ALLOWED_STATES = ( PermState.ACTIVE_ALLOW, PermState.PASSIVE_ALLOW, PermState.CAUTIOUS_ALLOW, ) class Requires: """This class describes the requirements for executing a specific command. The permissions described include both bot permissions and user permissions. Attributes ---------- checks : List[Callable[[Context], Union[bool, Awaitable[bool]]]] A list of checks which can be overridden by rules. Use `Command.checks` if you would like them to never be overridden. privilege_level : PrivilegeLevel The required privilege level (bot owner, admin, etc.) for users to execute the command. Can be ``None``, in which case the `user_perms` will be used exclusively, otherwise, for levels other than bot owner, the user can still run the command if they have the required `user_perms`. user_perms : Optional[discord.Permissions] The required permissions for users to execute the command. Can be ``None``, in which case the `privilege_level` will be used exclusively, otherwise, it will pass whether the user has the required `privilege_level` _or_ `user_perms`. bot_perms : discord.Permissions The required bot permissions for a command to be executed. This is not overrideable by other conditions. """ DEFAULT: ClassVar[str] = "default" """The key for the default rule in a rules dict.""" GLOBAL: ClassVar[int] = 0 """Should be used in place of a guild ID when setting/getting global rules. """ def __init__( self, privilege_level: Optional[PrivilegeLevel], user_perms: Union[Dict[str, bool], discord.Permissions, None], bot_perms: Union[Dict[str, bool], discord.Permissions], checks: List[CheckPredicate], ): self.checks: List[CheckPredicate] = checks self.privilege_level: Optional[PrivilegeLevel] = privilege_level if isinstance(user_perms, dict): self.user_perms: Optional[discord.Permissions] = discord.Permissions.none() _validate_perms_dict(user_perms) self.user_perms.update(**user_perms) else: self.user_perms = user_perms if isinstance(bot_perms, dict): self.bot_perms: discord.Permissions = discord.Permissions.none() _validate_perms_dict(bot_perms) self.bot_perms.update(**bot_perms) else: self.bot_perms = bot_perms self._global_rules: _RulesDict = _RulesDict() self._guild_rules: _IntKeyDict[_RulesDict] = _IntKeyDict[_RulesDict]() @staticmethod def get_decorator( privilege_level: Optional[PrivilegeLevel], user_perms: Dict[str, bool] ) -> Callable[["_CommandOrCoro"], "_CommandOrCoro"]: if not user_perms: user_perms = None def decorator(func: "_CommandOrCoro") -> "_CommandOrCoro": if asyncio.iscoroutinefunction(func): func.__requires_privilege_level__ = privilege_level func.__requires_user_perms__ = user_perms else: func.requires.privilege_level = privilege_level if user_perms is None: func.requires.user_perms = None else: _validate_perms_dict(user_perms) func.requires.user_perms.update(**user_perms) return func return decorator def get_rule(self, model: Union[int, str, PermissionModel], guild_id: int) -> PermState: """Get the rule for a particular model. Parameters ---------- model : Union[int, str, PermissionModel] The model to get the rule for. `str` is only valid for `Requires.DEFAULT`. guild_id : int The ID of the guild for the rule's scope. Set to `Requires.GLOBAL` for a global rule. Returns ------- PermState The state for this rule. See the `PermState` class for an explanation. """ if not isinstance(model, (str, int)): model = model.id if guild_id: rules = self._guild_rules.get(guild_id, _RulesDict()) else: rules = self._global_rules return rules.get(model, PermState.NORMAL) def set_rule(self, model_id: Union[str, int], rule: PermState, guild_id: int) -> None: """Set the rule for a particular model. Parameters ---------- model_id : Union[str, int] The model to add a rule for. `str` is only valid for `Requires.DEFAULT`. rule : PermState Which state this rule should be set as. See the `PermState` class for an explanation. guild_id : int The ID of the guild for the rule's scope. Set to `Requires.GLOBAL` for a global rule. """ if guild_id: rules = self._guild_rules.setdefault(guild_id, _RulesDict()) else: rules = self._global_rules if rule is PermState.NORMAL: rules.pop(model_id, None) else: rules[model_id] = rule def clear_all_rules(self, guild_id: int) -> None: """Clear all rules of a particular scope. This will preserve the default rule, if set. Parameters ---------- guild_id : int The guild ID to clear rules for. If set to `Requires.GLOBAL`, this will clear all global rules and leave all guild rules untouched. """ if guild_id: rules = self._guild_rules.setdefault(guild_id, _RulesDict()) else: rules = self._global_rules default = rules.get(self.DEFAULT, None) rules.clear() if default is not None: rules[self.DEFAULT] = default async def verify(self, ctx: "Context") -> bool: """Check if the given context passes the requirements. This will check the bot permissions, overrides, user permissions and privilege level. Parameters ---------- ctx : "Context" The invkokation context to check with. Returns ------- bool ``True`` if the context passes the requirements. Raises ------ BotMissingPermissions If the bot is missing required permissions to run the command. CommandError Propogated from any permissions checks. """ await self._verify_bot(ctx) # Owner should never be locked out of commands for user permissions. if await ctx.bot.is_owner(ctx.author): return True # Owner-only commands are non-overrideable, and we already checked for owner. if self.privilege_level is PrivilegeLevel.BOT_OWNER: return False hook_result = await ctx.bot.verify_permissions_hooks(ctx) if hook_result is not None: return hook_result return await self._transition_state(ctx) async def _verify_bot(self, ctx: "Context") -> None: if ctx.guild is None: bot_user = ctx.bot.user else: bot_user = ctx.guild.me bot_perms = ctx.channel.permissions_for(bot_user) if not (bot_perms.administrator or bot_perms >= self.bot_perms): raise BotMissingPermissions(missing=self._missing_perms(self.bot_perms, bot_perms)) async def _transition_state(self, ctx: "Context") -> bool: prev_state = ctx.permission_state cur_state = self._get_rule_from_ctx(ctx) should_invoke, next_state = prev_state.transition_to(cur_state) if should_invoke is None: # NORMAL invokation, we simply follow standard procedure should_invoke = await self._verify_user(ctx) elif isinstance(next_state, dict): # NORMAL to PASSIVE_ALLOW; should we proceed as normal or transition? # We must check what would happen normally, if no explicit rules were set. default_rule = PermState.NORMAL if ctx.guild is not None: default_rule = self.get_rule(self.DEFAULT, guild_id=ctx.guild.id) if default_rule is PermState.NORMAL: default_rule = self.get_rule(self.DEFAULT, self.GLOBAL) if default_rule == PermState.ACTIVE_DENY: would_invoke = False elif default_rule == PermState.ACTIVE_ALLOW: would_invoke = True else: would_invoke = await self._verify_user(ctx) next_state = next_state[would_invoke] ctx.permission_state = next_state return should_invoke async def _verify_user(self, ctx: "Context") -> bool: checks_pass = await self._verify_checks(ctx) if checks_pass is False: return False if self.user_perms is not None: user_perms = ctx.channel.permissions_for(ctx.author) if user_perms.administrator or user_perms >= self.user_perms: return True if self.privilege_level is not None: privilege_level = await PrivilegeLevel.from_ctx(ctx) if privilege_level >= self.privilege_level: return True return False def _get_rule_from_ctx(self, ctx: "Context") -> PermState: author = ctx.author guild = ctx.guild if ctx.guild is None: # We only check the user for DM channels rule = self._global_rules.get(author.id) if rule is not None: return rule return self.get_rule(self.DEFAULT, self.GLOBAL) rules_chain = [self._global_rules] guild_rules = self._guild_rules.get(ctx.guild.id) if guild_rules: rules_chain.append(guild_rules) channels = [] if author.voice is not None: channels.append(author.voice.channel) channels.append(ctx.channel) category = ctx.channel.category if category is not None: channels.append(category) model_chain = [author, *channels, *author.roles, guild] for rules in rules_chain: for model in model_chain: rule = rules.get(model.id) if rule is not None: return rule del model_chain[-1] # We don't check for the guild in guild rules default_rule = self.get_rule(self.DEFAULT, guild.id) if default_rule is PermState.NORMAL: default_rule = self.get_rule(self.DEFAULT, self.GLOBAL) return default_rule async def _verify_checks(self, ctx: "Context") -> bool: if not self.checks: return True return await discord.utils.async_all(check(ctx) for check in self.checks) @staticmethod def _get_perms_for(ctx: "Context", user: discord.abc.User) -> discord.Permissions: if ctx.guild is None: return DM_PERMS else: return ctx.channel.permissions_for(user) @classmethod def _get_bot_perms(cls, ctx: "Context") -> discord.Permissions: return cls._get_perms_for(ctx, ctx.guild.me if ctx.guild else ctx.bot.user) @staticmethod def _missing_perms( required: discord.Permissions, actual: discord.Permissions ) -> discord.Permissions: # Explained in set theory terms: # Assuming R is the set of required permissions, and A is # the set of the user's permissions, the set of missing # permissions will be equal to R \ A, i.e. the relative # complement/difference of A with respect to R. relative_complement = required.value & ~actual.value return discord.Permissions(relative_complement) @staticmethod def _member_as_user(member: discord.abc.User) -> discord.User: if isinstance(member, discord.Member): # noinspection PyProtectedMember return member._user return member def __repr__(self) -> str: return ( f"<Requires privilege_level={self.privilege_level!r} user_perms={self.user_perms!r} " f"bot_perms={self.bot_perms!r}>" ) # check decorators def permissions_check(predicate: CheckPredicate): """An overwriteable version of `discord.ext.commands.check`. This has the same behaviour as `discord.ext.commands.check`, however this check can be ignored if the command is allowed through a permissions cog. """ def decorator(func: "_CommandOrCoro") -> "_CommandOrCoro": if hasattr(func, "requires"): func.requires.checks.append(predicate) else: if not hasattr(func, "__requires_checks__"): func.__requires_checks__ = [] # noinspection PyUnresolvedReferences func.__requires_checks__.append(predicate) return func return decorator def bot_has_permissions(**perms: bool): """Complain if the bot is missing permissions. If the user tries to run the command, but the bot is missing the permissions, it will send a message describing which permissions are missing. This check cannot be overridden by rules. """ def decorator(func: "_CommandOrCoro") -> "_CommandOrCoro": if asyncio.iscoroutinefunction(func): func.__requires_bot_perms__ = perms else: _validate_perms_dict(perms) func.requires.bot_perms.update(**perms) return func return decorator def has_permissions(**perms: bool): """Restrict the command to users with these permissions. This check can be overridden by rules. """ if perms is None: raise TypeError("Must provide at least one keyword argument to has_permissions") return Requires.get_decorator(None, perms) def is_owner(): """Restrict the command to bot owners. This check cannot be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.BOT_OWNER, {}) def guildowner_or_permissions(**perms: bool): """Restrict the command to the guild owner or users with these permissions. This check can be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.GUILD_OWNER, perms) def guildowner(): """Restrict the command to the guild owner. This check can be overridden by rules. """ return guildowner_or_permissions() def admin_or_permissions(**perms: bool): """Restrict the command to users with the admin role or these permissions. This check can be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.ADMIN, perms) def admin(): """Restrict the command to users with the admin role. This check can be overridden by rules. """ return admin_or_permissions() def mod_or_permissions(**perms: bool): """Restrict the command to users with the mod role or these permissions. This check can be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.MOD, perms) def mod(): """Restrict the command to users with the mod role. This check can be overridden by rules. """ return mod_or_permissions() class _IntKeyDict(Dict[int, _T]): """Dict subclass which throws KeyError when a non-int key is used.""" def __getitem__(self, key: Any) -> _T: if not isinstance(key, int): raise TypeError("Keys must be of type `int`") return super().__getitem__(key) def __setitem__(self, key: Any, value: _T) -> None: if not isinstance(key, int): raise TypeError("Keys must be of type `int`") return super().__setitem__(key, value) class _RulesDict(Dict[Union[int, str], PermState]): """Dict subclass which throws a KeyError when an invalid key is used.""" def __getitem__(self, key: Any) -> PermState: if key != Requires.DEFAULT and not isinstance(key, int): raise TypeError(f'Expected "{Requires.DEFAULT}" or int key, not "{key}"') return super().__getitem__(key) def __setitem__(self, key: Any, value: PermState) -> None: if key != Requires.DEFAULT and not isinstance(key, int): raise TypeError(f'Expected "{Requires.DEFAULT}" or int key, not "{key}"') return super().__setitem__(key, value) def _validate_perms_dict(perms: Dict[str, bool]) -> None: for perm, value in perms.items(): try: attr = getattr(discord.Permissions, perm) except AttributeError: attr = None if attr is None or not isinstance(attr, property): # We reject invalid permissions raise TypeError(f"Unknown permission name '{perm}'") if value is not True: # We reject any permission not specified as 'True', since this is the only value which # makes practical sense. raise TypeError(f"Permission {perm} may only be specified as 'True', not {value}")
AN-DiscordBot
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/commands/requires.py
requires.py
<h1 align="center"> <br> <a href="https://github.com/aditya-nugraha-bot/AN-DiscordBot"><img src="https://imgur.com/pY1WUFX.png" alt="AN - Discord Bot"></a> <br> AN Discord Bot <br> </h1> <h4 align="center">Music, Moderation, Trivia, Stream Alerts and Fully Modular.</h4> <p align="center"> <a href="https://discord.gg/WEMJKXY"> <img src="https://discordapp.com/api/guilds/133049272517001216/widget.png?style=shield" alt="Discord Server"> </a> <a href="https://www.patreon.com/Red_Devs"> <img src="https://img.shields.io/badge/Support-Red!-yellow.svg" alt="Support Red on Patreon!"> </a> <a href="https://www.python.org/downloads/"> <img src="https://img.shields.io/badge/Made%20With-Python%203.7-blue.svg?style=for-the-badge" alt="Made with Python 3.7"> </a> <a href="https://crowdin.com/project/red-discordbot"> <img src="https://d322cqt584bo4o.cloudfront.net/red-discordbot/localized.svg" alt="Localized with Crowdin"> </a> </p> <p align="center"> <a href="https://github.com/ambv/black"> <img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="Code Style: Black"> </a> <a href="http://makeapullrequest.com"> <img src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg"> </a> </p> <p align="center"> <a href="#overview">Overview</a> • <a href="#installation">Installation</a> • <a href="http://red-discordbot.readthedocs.io/en/v3-develop/index.html">Documentation</a> • <a href="#plugins">Plugins</a> • <a href="#join-the-community">Community</a> • <a href="#license">License</a> </p> # Overview AN is a fully modular bot – meaning all features and commands can be enabled/disabled to your liking, making it completely customizable. This is also a *self-hosted bot* – meaning you will need to host and maintain your own instance. You can turn AN into an admin bot, music bot, trivia bot, new best friend or all of these together! [Installation](#installation) is easy, and you do **NOT** need to know anything about coding! Aside from installation and updating, every part of the bot can be controlled from within Discord. **The default set of modules includes and is not limited to:** - Customisable command permissions **Additionally, other [plugins](#plugins) (cogs) can be easily found and added from our growing community of cog repositories.** # Installation **The following platforms are officially supported:** - Later # Plugins AN is fully modular, allowing you to load and unload plugins of your choice, and install 3rd party plugins directly from Discord! A few examples are: - Cleverbot integration (talk to Red and she talks back) - Ban sync - Welcome messages - Casino - Reaction roles - Slow Mode - Anilist - And much, much more! Feel free to take a [peek](https://cogboard.red/t/approved-repositories/210) at a list of available 3rd party cogs! # Join the community! **AN** is in continuous development, and it’s supported by an active community which produces new content (cogs/plugins) for everyone to enjoy. New features are constantly added. Join us on our [Official Discord Server](https://discord.gg/WEMJKXY)! # License Released under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html) license. Red is named after the main character of "Transistor", a video game by [Super Giant Games](https://www.supergiantgames.com/games/transistor/). Artwork created by [Sinlaire](https://sinlaire.deviantart.com/) on Deviant Art for the Red Discord Bot Project.
ANBOT
/ANBOT-3.1.0.tar.gz/ANBOT-3.1.0/README.md
README.md
# ANBUtils ANBUtils is a Python package that provides various utility functions for common tasks in data analysis and database operations. It includes functions for working with MongoDB, sending messages via DingTalk, and handling date and time operations. ## Stable Version - Version: 1.6.0 - Release Date: June 13, 2023 ## Installation You can install ANBUtils using pip: ``` pip install ANBUtils == 1.6.0 ``` ## Functions ### MongoDB Operations - `DBWorker`: A class that provides convenient methods for working with MongoDB databases and collections. It allows you to perform operations such as querying data, inserting data, updating data, and more. ### Environment Checker - `environment_checker`: A function that checks the required environment variables for the ANBUtils package. It verifies the presence of the following environment variables: `MONGODB_URL`, `MONGODB_PUB_URI`, and `DINGTALK_WEBHOOK`. - `MONGODB_URL`: URL of the MongoDB database. - `MONGODB_PUB_URI`: URL of the publicly accessible MongoDB database. - `DINGTALK_WEBHOOK`: Webhook address of the DingTalk bot. - `QYWECHAT_WEBHOOK`: Webhook address of the QiYe Wechat Work bot. ### DingTalk Message Sender - `dingtalk_text_message`: A function that sends text messages to DingTalk. It requires the `DINGTALK_WEBHOOK` environment variable to be set. You can use this function to send notifications or alerts to a DingTalk group or chat. ### Date and Time Operations - `future`: A function that returns the date in the future by adding a specified number of days to the current date. - `utc2tz`: A function that converts a UTC datetime to a specified time zone. - `today`: A function that returns the current date. - `tomorrow`: A function that returns tomorrow's date. - `yesterday`: A function that returns yesterday's date. - `now`: A function that returns the current date and time. - `future_base`: A function that returns a date in the future based on a given date. - `ts2str`: A function that converts a timestamp to a formatted date string. - `date_format`: A function that formats a date string according to a specified format. ## Usage ### MongoDB Operations To use the MongoDB operations provided by ANBUtils, you need to instantiate a `DBWorker` object with the name of the database you want to work with. Here's an example: ```python from ANBUtils import DBWorker # Instantiate a DBWorker object for the "mydb_key" database db = DBWorker("mydb_key") # Query data from a collection data = db.to_df("mycollection") # Query data from a collection list data = db.to_df_many(["mycollection1", "mycollection2", "mycollection3"]) # Insert data into a collection df = ... db.insert_df(df, "mycollection") # Update data in a collection df = ... db.update_df(df, "mycollection", key="id") ``` ### Message Sender To send text messages to DingTalk or QiYe Wechat using the `dingtalk_text_message` or `qywechat_text_message` function, you need to set the `DINGTALK_WEBHOOK` environment variable to the webhook URL provided by DingTalk. Here's an example: ```python from ANBUtils import dingtalk_text_message, qywechat_text_message # Send a text message to DingTalk dingtalk_text_message("Hello from ANBUtils!") qywechat_text_message("Hello from ANBUtils!") ``` ### Date and Time Operations ANBUtils provides various functions for working with dates and times. Here are a few examples: ```python from ANBUtils import future, utc2tz, today, tomorrow, yesterday, now, future_base, ts2str, date_format # Get the date in the future future_date = future(5) # Convert UTC datetime to a specific time zone utc_datetime = ... tz_datetime = utc2tz(utc_datetime, tz="E8") # Get the current date current_date = today() # Get tomorrow's date next_date = tomorrow() # Get yesterday's date prev_date = yesterday() # Get the current date and time current_datetime = now() # Get a date in the future based on a given date base_date = "2023-01-01" future_date = future_base(base_date, 10) # Convert a timestamp to a formatted date string timestamp = ... date_string = ts2str(timestamp) # Format a date string according to a specified format date = "2023-06-06" formatted_date = date_format(date, date_format="YYYY_MM_DD") ``` Please make sure to refer to the ANBUtils documentation for detailed information on each function and its parameters. ## Contributions and Support ANBUtils is an open-source project, and contributions are welcome. If you encounter any issues or have suggestions for improvements, please feel free to open an issue on the [GitHub repository](https://github.com/example-user/ANBUtils). For support or general questions, you can reach out to the project maintainers or the community through the GitHub repository. ## License ANBUtils is released under the [MIT License](https://opensource.org/licenses/MIT). Please refer to the LICENSE file for more details.
ANBUtils
/ANBUtils-1.6.4.tar.gz/ANBUtils-1.6.4/README.md
README.md
# Extract spatial features using a Graphical User Interface This tool can extract spatial features using attribute parameters selected by the user. ## Step 1 Click the folder icons to select a base polygon shapefile and a point shapefile. The point shapefile will be your extracted feature. ![Image](Step1.png) ## Step 2 Select which field you want to categorize your first shapefile by. Then select which parameter you need to finish the expression. ![Image](Step2.png) ## Step 3 Finally name your new shapefile and select which folder to save it in by click on the three dots. Click _Create new shapefile_ button. The status bar at the bottom will tell you if your extraction was successful or not. ![Image](Step3.png) Have fun!
ANC08JUNE2020GEOG489L2
/ANC08JUNE2020GEOG489L2-2.0.tar.gz/ANC08JUNE2020GEOG489L2-1.0/README.md
README.md
# ANCIENT_INVASION "Ancient Invasion" is an offline turn-based strategy RPG inspired by "Summoners War: Sky Arena" (https://play.google.com/store/apps/details?id=com.com2us.smon.normal.freefull.google.kr.android.common&hl=en&gl=US) and "Heir of Light" (https://play.google.com/store/apps/details?id=com.gamevil.heiroflight.android.google.global.normal&hl=en&gl=US). This game involves battles between teams of legendary creatures. Each legendary creature takes turns to make moves during battles. # Executable File The executable file is downloadable at https://github.com/NativeApkDev/ANCIENT_INVASION/blob/master/ANCIENT_INVASION/dist/ancient_invasion/ancient_invasion. # Source Code Python code used to create the game is available in https://github.com/NativeApkDev/ANCIENT_INVASION/blob/master/ANCIENT_INVASION/ancient_invasion.py. # Installation Enter the command "pip install ANCIENT_INVASION". # Unit Tests Python unit tests created using Python's "unittest" module, testing basic functionalities of the game is available in https://github.com/NativeApkDev/ANCIENT_INVASION/blob/master/ANCIENT_INVASION/ancient_invasion_tests.py. The tests are all automated and related to user inputs in the game. # How to Use the Executable File? First, open by double-clicking the file "ancient_invasion". How the executable file looks like is shown in the image below (the file is enclosed with a red rectangle). ### Image 1 ![Executable File](images/Executable%20File.png) # Getting Started After you run the game, you will be asked to enter your name. If a saved game data with your name exists, that saved game data will be loaded. Else, you will be told to create a new saved game data using your name. ### Image 2 ![Getting Started](images/Getting%20Started.png) # Main Menu Once you loaded a saved game data or created a new game data, you will be asked whether you want to continue playing the game "Ancient Invasion" or not. If you enter "Y", you will be able to do various activities (e.g., battle in map areas and dungeons, build on your player base, buy and sell items, etc) in the game. The activity you want to do can be chosen by entering an input as instructed in the command line interface (see "Image #4"). ### Image 3 ![Main Menu 1](images/Main%20Menu%201.png) ### Image 4 ![Main Menu 2](images/Main%20Menu%202.png) # The Game In the game, you will be able to do any of the actions as shown in "Image 4". The actions are described as below. * PLAY ADVENTURE MODE -> battle in levels inside either map areas or dungeons against enemy legendary creatures. Each level has multiple stages where each stage has a number of enemies you will need to defeat in order to proceed and eventually clear the levels and gain rewards. * MANAGE PLAYER BASE -> build, level up, and remove buildings on your player base. Trees can be built for decorations; obstacles can be removed; and buildings to strengthen legendary creatures (e.g., magic altar), produce resources (e.g., gold mine and gem mine), increase legendary creatures' EXP (i.e., training area), and so forth can be built. * MANAGE BATTLE TEAM -> add and remove legendary creatures from your team. By default, the first legendary creature appearing in the order the legendary creatures were added is the team leader. * MANAGE LEGENDARY CREATURE INVENTORY -> this allows you to remove legendary creatures which you do not use. * MANAGE ITEM INVENTORY -> sell items and/or level up runes in the item inventory. * MAKE A WISH -> gain random rewards (i.e., items or resources such as gold and gems) from making a wish using the temple of wishes. * FUSE LEGENDARY CREATURES -> fuse multiple legendary creatures to gain a stronger one. * SUMMON LEGENDARY CREATURE -> use a scroll to summon a legendary creature which will be added to your legendary creature inventory. * GIVE ITEM -> give an item to a legendary creature to strengthen that legendary creature. * POWER UP LEGENDARY CREATURE -> strengthen a legendary creature by sacrificing some other legendary creatures as power-up materials. This requires a power-up circle. * EVOLVE LEGENDARY CREATURE -> increase the rating of a legendary creature to make it able to reach higher levels. This also requires a power-up circle. * MANAGE TRAINING AREA -> add and remove legendary creatures from a training area in your player base. * PLACE RUNE -> place a rune in a legendary creature you have. * REMOVE RUNE -> remove a rune from a legendary creature you have. * BUY ITEM -> buy an item from the item shop. * VIEW STATS -> view your stats in the game (e.g., your level, EXP, amount of EXP you need to have to get to the next level, and so forth). Once you entered one of the actions above at the main menu and then press the button "ENTER" or "RETURN" on your machine, further instructions of what you need to do will be shown on the command line interface.
ANCIENT-INVASION
/ANCIENT_INVASION-1.tar.gz/ANCIENT_INVASION-1/README.md
README.md
# Importing necessary libraries import sys import uuid import pickle import copy import random from datetime import datetime import os from functools import reduce from mpmath import mp, mpf from tabulate import tabulate mp.pretty = True # Creating static functions to be used in this game. def is_number(string: str) -> bool: try: mpf(string) return True except ValueError: return False def triangular(n: int) -> int: return int(n * (n - 1) / 2) def mpf_sum_of_list(a_list: list) -> mpf: return mpf(str(sum(mpf(str(elem)) for elem in a_list if is_number(str(elem))))) def mpf_product_of_list(a_list: list) -> mpf: return mpf(reduce(lambda x, y: mpf(x) * mpf(y) if is_number(x) and is_number(y) else mpf(x) if is_number(x) and not is_number( y) else mpf(y) if is_number(y) and not is_number(x) else 1, a_list, 1)) def load_game_data(file_name): # type: (str) -> Game return pickle.load(open(file_name, "rb")) def save_game_data(game_data, file_name): # type: (Game, str) -> None pickle.dump(game_data, open(file_name, "wb")) def clear(): # type: () -> None if sys.platform.startswith('win'): os.system('cls') # For Windows System else: os.system('clear') # For Linux System def resistance_accuracy_rule(accuracy: mpf, resistance: mpf) -> mpf: if resistance - accuracy <= mpf("0.15"): return mpf("0.15") else: return resistance - accuracy def glancing_hit_chance_by_elements(element1: str, element2: str) -> mpf: if element1 == "FIRE" and element2 == "WATER": return mpf("0.3") elif element1 == "WATER" and element2 == "WIND": return mpf("0.3") elif element1 == "WIND" and element2 == "FIRE": return mpf("0.3") else: return mpf("0") def crushing_hit_chance_by_elements(legendary_creature1, legendary_creature2): # type: (LegendaryCreature, LegendaryCreature) -> mpf if legendary_creature1.element == "WATER" and legendary_creature2.element == "FIRE": return mpf("1") - (legendary_creature1.crit_rate + legendary_creature1.crit_rate_up - legendary_creature2.crit_resist - legendary_creature2.crit_resist_up) elif legendary_creature1.element == "WIND" and legendary_creature2.element == "WATER": return mpf("1") - (legendary_creature1.crit_rate + legendary_creature1.crit_rate_up - legendary_creature2.crit_resist - legendary_creature2.crit_resist_up) elif legendary_creature1.element == "FIRE" and legendary_creature2.element == "WIND": return mpf("1") - (legendary_creature1.crit_rate + legendary_creature1.crit_rate_up - legendary_creature2.crit_resist - legendary_creature2.crit_resist_up) else: return mpf("0") # Creating necessary classes to be used throughout the game. class Action: """ This class contains attributes of an action that can be carried out in this game. """ POSSIBLE_NAMES: list = ["NORMAL ATTACK", "NORMAL HEAL", "USE SKILL"] def __init__(self, name): # type: (str) -> None self.name: str = name if name in self.POSSIBLE_NAMES else self.POSSIBLE_NAMES[0] def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def execute(self, user, target, skill_to_use=None): # type: (LegendaryCreature, LegendaryCreature, Skill or None) -> bool if self.name == "NORMAL ATTACK": if user == target: return False raw_damage: mpf = user.attack_power * (1 + user.attack_power_percentage_up / 100 - user.attack_power_percentage_down / 100) * \ (1 + target.defense_percentage_up / 100 - target.defense_percentage_down / 100) damage_reduction_factor: mpf = mpf("1e8") / (mpf("1e8") + 3.5 * target.defense) damage: mpf = raw_damage * damage_reduction_factor target.curr_hp -= damage print(str(user.name) + " dealt " + str(damage) + " damage on " + str(target.name) + "!") return True elif self.name == "NORMAL HEAL": if user != target: return False heal_amount: mpf = 0.05 * user.max_hp user.curr_hp += heal_amount return True elif self.name == "USE SKILL": if isinstance(skill_to_use, ActiveSkill): if not skill_to_use.is_active: return False if skill_to_use.active_skill_type == "ATTACK": if user == target or user.corresponding_team == target.corresponding_team: return False # Attack the enem(ies) if skill_to_use.is_aoe: for enemy in target.corresponding_team.get_legendary_creatures(): damage: mpf = skill_to_use.damage_multiplier.calculate_raw_damage(user, enemy, skill_to_use.does_ignore_enemies_defense, skill_to_use.does_ignore_shield, skill_to_use.does_ignore_invincibility) enemy.curr_hp -= damage print(str(user.name) + " dealt " + str(damage) + " damage on " + str(enemy.name) + "!") # Considering life drain life_drain: mpf = damage * (user.life_drain_percentage + user.life_drain_percentage_up) \ / 100 user.curr_hp += life_drain print(str(user.name) + " drained " + str(life_drain) + " HP!") if user.curr_hp >= user.max_hp: user.curr_hp = user.max_hp # Taking into account "ENDURE" effect if not enemy.can_die: enemy.curr_hp = mpf("1") if enemy.can_receive_harmful_effect: # Add negative effects to the enemy resist_chance: mpf = resistance_accuracy_rule(user.accuracy + user.accuracy_up, enemy.resistance + enemy.resistance_up) for harmful_effect in skill_to_use.get_harmful_effects_to_enemies(): if random.random() >= resist_chance: if not (harmful_effect.name == "OBLIVION" and enemy.legendary_creature_type == "BOSS"): enemy.add_harmful_effect(harmful_effect) if random.random() >= resist_chance: enemy.attack_gauge -= skill_to_use.enemies_attack_gauge_down if enemy.attack_gauge <= enemy.MIN_ATTACK_GAUGE: enemy.attack_gauge = enemy.MIN_ATTACK_GAUGE # Resetting user's attack gauge to zero at first user.attack_gauge = user.MIN_ATTACK_GAUGE # Consider effect of passive skills of the user # 1. Beneficial effects to allies for legendary_creature in user.corresponding_team.get_legendary_creatures(): if legendary_creature.can_receive_beneficial_effect: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): for beneficial_effect in \ skill.passive_skill_effect.get_beneficial_effects_to_allies(): legendary_creature.add_beneficial_effect(beneficial_effect) # 2. Harmful effects to enemies if enemy.can_receive_harmful_effect: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): resist_chance = resistance_accuracy_rule( user.accuracy + user.accuracy_up, enemy.resistance + enemy.resistance_up) for harmful_effect in \ skill.passive_skill_effect.get_harmful_effects_to_enemies(): # Add negative effects to the enemy if random.random() >= resist_chance: if not (harmful_effect.name == "OBLIVION" and enemy.legendary_creature_type == "BOSS"): enemy.add_harmful_effect(harmful_effect) # 3. Increase allies' attack gauge for legendary_creature in user.corresponding_team.get_legendary_creatures(): for skill in user.get_skills(): if isinstance(skill, PassiveSkill): legendary_creature.attack_gauge += skill.passive_skill_effect.allies_attack_gauge_up # 4. Decrease enemies' attack gauge if enemy.can_receive_harmful_effect: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): resist_chance = resistance_accuracy_rule( user.accuracy + user.accuracy_up, enemy.resistance + enemy.resistance_up) if random.random() >= resist_chance: enemy.attack_gauge -= skill.passive_skill_effect.enemies_attack_gauge_down # 5. Heal allies for legendary_creature in user.corresponding_team.get_legendary_creatures(): if legendary_creature.can_be_healed: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): legendary_creature.curr_hp += skill.passive_skill_effect.heal_amount_to_allies if legendary_creature.curr_hp >= legendary_creature.max_hp: legendary_creature.curr_hp = legendary_creature.max_hp else: damage: mpf = skill_to_use.damage_multiplier.calculate_raw_damage(user, target, skill_to_use.does_ignore_enemies_defense, skill_to_use.does_ignore_shield, skill_to_use.does_ignore_invincibility) target.curr_hp -= damage print(str(user.name) + " dealt " + str(damage) + " damage on " + str(target.name) + "!") # Considering life drain life_drain: mpf = damage * (user.life_drain_percentage + user.life_drain_percentage_up) \ / 100 user.curr_hp += life_drain print(str(user.name) + " drained " + str(life_drain) + " HP!") if user.curr_hp >= user.max_hp: user.curr_hp = user.max_hp # Taking into account "ENDURE" effect if not target.can_die: target.curr_hp = mpf("1") if target.can_receive_harmful_effect: # Add negative effects to the enemy resist_chance: mpf = resistance_accuracy_rule(user.accuracy + user.accuracy_up, target.resistance + target.resistance_up) for harmful_effect in skill_to_use.get_harmful_effects_to_enemies(): if random.random() >= resist_chance: if not (harmful_effect.name == "OBLIVION" and target.legendary_creature_type == "BOSS"): target.add_harmful_effect(harmful_effect) if random.random() >= resist_chance: target.attack_gauge -= skill_to_use.enemies_attack_gauge_down if target.attack_gauge <= target.MIN_ATTACK_GAUGE: target.attack_gauge = target.MIN_ATTACK_GAUGE # Resetting user's attack gauge to zero at first user.attack_gauge = user.MIN_ATTACK_GAUGE # Consider effect of passive skills of the user # 1. Beneficial effects to allies for legendary_creature in user.corresponding_team.get_legendary_creatures(): if legendary_creature.can_receive_beneficial_effect: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): for beneficial_effect in \ skill.passive_skill_effect.get_beneficial_effects_to_allies(): legendary_creature.add_beneficial_effect(beneficial_effect) # 2. Harmful effects to enemies if target.can_receive_harmful_effect: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): resist_chance = resistance_accuracy_rule(user.accuracy + user.accuracy_up, target.resistance + target.resistance_up) for harmful_effect in \ skill.passive_skill_effect.get_harmful_effects_to_enemies(): # Add negative effects to the enemy if random.random() >= resist_chance: if not (harmful_effect.name == "OBLIVION" and target.legendary_creature_type == "BOSS"): target.add_harmful_effect(harmful_effect) # 3. Increase allies' attack gauge for legendary_creature in user.corresponding_team.get_legendary_creatures(): for skill in user.get_skills(): if isinstance(skill, PassiveSkill): legendary_creature.attack_gauge += skill.passive_skill_effect.allies_attack_gauge_up # 4. Decrease enemies' attack gauge if target.can_receive_harmful_effect: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): resist_chance = resistance_accuracy_rule(user.accuracy + user.accuracy_up, target.resistance + target.resistance_up) if random.random() >= resist_chance: target.attack_gauge -= skill.passive_skill_effect.enemies_attack_gauge_down # 5. Heal allies for legendary_creature in user.corresponding_team.get_legendary_creatures(): if legendary_creature.can_be_healed: for skill in user.get_skills(): if isinstance(skill, PassiveSkill): legendary_creature.curr_hp += skill.passive_skill_effect.heal_amount_to_allies if legendary_creature.curr_hp >= legendary_creature.max_hp: legendary_creature.curr_hp = legendary_creature.max_hp elif skill_to_use.active_skill_type == "HEAL": if user.corresponding_team != target.corresponding_team: return False # Heal the all(ies) if skill_to_use.is_aoe: for ally in target.corresponding_team.get_legendary_creatures(): if ally.can_be_healed: ally.curr_hp += skill_to_use.heal_amount_to_allies if ally.curr_hp >= ally.max_hp: ally.curr_hp = ally.max_hp else: if target.can_be_healed: target.curr_hp += skill_to_use.heal_amount_to_allies if target.curr_hp >= target.max_hp: target.curr_hp = target.max_hp elif skill_to_use.active_skill_type == "ALLIES EFFECT": if user.corresponding_team != target.corresponding_team: return False # Give beneficial effects to all(ies) if skill_to_use.is_aoe: for ally in target.corresponding_team.get_legendary_creatures(): if ally.can_receive_beneficial_effect: for beneficial_effect in skill_to_use.get_beneficial_effects_to_allies(): ally.add_beneficial_effect(beneficial_effect) ally.attack_gauge += skill_to_use.allies_attack_gauge_up else: if target.can_receive_beneficial_effect: for beneficial_effect in skill_to_use.get_beneficial_effects_to_allies(): target.add_beneficial_effect(beneficial_effect) target.attack_gauge += skill_to_use.allies_attack_gauge_up elif skill_to_use.active_skill_type == "ENEMIES EFFECT": if user == target or user.corresponding_team == target.corresponding_team: return False # Give harmful effects to enem(ies) if skill_to_use.is_aoe: for enemy in target.corresponding_team.get_legendary_creatures(): resist_chance: mpf = resistance_accuracy_rule(user.accuracy, enemy.resistance) for harmful_effect in skill_to_use.get_harmful_effects_to_enemies(): if random.random() >= resist_chance: enemy.add_harmful_effect(harmful_effect) if random.random() >= resist_chance: enemy.attack_gauge -= skill_to_use.enemies_attack_gauge_down if enemy.attack_gauge <= enemy.MIN_ATTACK_GAUGE: enemy.attack_gauge = enemy.MIN_ATTACK_GAUGE else: resist_chance: mpf = resistance_accuracy_rule(user.accuracy, target.resistance) for harmful_effect in skill_to_use.get_harmful_effects_to_enemies(): if random.random() >= resist_chance: target.add_harmful_effect(harmful_effect) if random.random() >= resist_chance: target.attack_gauge -= skill_to_use.enemies_attack_gauge_down if target.attack_gauge <= target.MIN_ATTACK_GAUGE: target.attack_gauge = target.MIN_ATTACK_GAUGE skill_to_use.cooltime = skill_to_use.max_cooltime return True else: return False return False def clone(self): # type: () -> Action return copy.deepcopy(self) class Arena: """ This class contains attributes of the battle arena. """ def __init__(self, potential_opponents=None): # type: (list) -> None if potential_opponents is None: potential_opponents = [] self.__potential_opponents: list = potential_opponents def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def add_opponent(self, opponent): # type: (CPU) -> bool if opponent not in self.__potential_opponents: self.__potential_opponents.append(opponent) return True return False def remove_opponent(self, opponent): # type: (CPU) -> bool if opponent in self.__potential_opponents: self.__potential_opponents.remove(opponent) return True return False def get_potential_opponents(self): # type: () -> list return self.__potential_opponents def clone(self): # type: () -> Arena return copy.deepcopy(self) class AwakenBonus: """ This class contains attributes of the bonus gained for awakening a legendary creature. """ def __init__(self, max_hp_percentage_up, max_magic_points_percentage_up, attack_power_percentage_up, defense_percentage_up, attack_speed_up, crit_rate_up, crit_damage_up, resistance_up, accuracy_up, new_skill_gained): # type: (mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, Skill) -> None self.max_hp_percentage_up: mpf = max_hp_percentage_up self.max_magic_points_percentage_up: mpf = max_magic_points_percentage_up self.attack_power_percentage_up: mpf = attack_power_percentage_up self.defense_percentage_up: mpf = defense_percentage_up self.attack_speed_up: mpf = attack_speed_up self.crit_rate_up: mpf = crit_rate_up self.crit_damage_up: mpf = crit_damage_up self.resistance_up: mpf = resistance_up self.accuracy_up: mpf = accuracy_up self.new_skill_gained: Skill = new_skill_gained def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> AwakenBonus return copy.deepcopy(self) class Battle: """ This class contains attributes of a battle which takes place in this game. """ def __init__(self, team1, team2): # type: (Team, Team) -> None self.team1: Team = team1 self.team2: Team = team2 self.reward: Reward = Reward(mpf("10") ** sum(legendary_creature.rating for legendary_creature in self.team2.get_legendary_creatures()), mpf("10") ** (sum(legendary_creature.rating for legendary_creature in self.team2.get_legendary_creatures()) - 2), mpf("10") ** (sum(legendary_creature.rating for legendary_creature in self.team2.get_legendary_creatures()) - 5), mpf("10") ** sum(legendary_creature.rating for legendary_creature in self.team2.get_legendary_creatures())) self.whose_turn: LegendaryCreature or None = None self.winner: Team or None = None def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_someone_to_move(self): # type: () -> None """ Getting a legendary creature to move and have its turn. :return: None """ # Finding out which legendary creature moves full_attack_gauge_list: list = [] # initial value while len(full_attack_gauge_list) == 0: for legendary_creature in self.team1.get_legendary_creatures(): if legendary_creature.attack_gauge >= legendary_creature.FULL_ATTACK_GAUGE and legendary_creature not \ in full_attack_gauge_list: full_attack_gauge_list.append(legendary_creature) for legendary_creature in self.team2.get_legendary_creatures(): if legendary_creature.attack_gauge >= legendary_creature.FULL_ATTACK_GAUGE and legendary_creature not \ in full_attack_gauge_list: full_attack_gauge_list.append(legendary_creature) self.tick() max_attack_gauge: mpf = max(legendary_creature.attack_gauge for legendary_creature in full_attack_gauge_list) for legendary_creature in full_attack_gauge_list: if legendary_creature.attack_gauge == max_attack_gauge: self.whose_turn = legendary_creature def tick(self): # type: () -> None """ The clock ticks when battles are carried out. :return: None """ for legendary_creature in self.team1.get_legendary_creatures(): legendary_creature.attack_gauge += legendary_creature.attack_speed * 0.07 for legendary_creature in self.team2.get_legendary_creatures(): legendary_creature.attack_gauge += legendary_creature.attack_speed * 0.07 def clone(self): # type: () -> Battle return copy.deepcopy(self) class BattleArea: """ This class contains attributes of areas used for single player battles. """ def __init__(self, name, levels, clear_reward): # type: (str, list, Reward) -> None self.name: str = name self.__levels: list = levels self.clear_reward: Reward = clear_reward self.has_been_cleared: bool = False def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_levels(self): # type: () -> list return self.__levels def clone(self): # type: () -> BattleArea return copy.deepcopy(self) class MapArea(BattleArea): """ This class contains attributes of a map area in this game. """ POSSIBLE_MODES: list = ["EASY", "NORMAL", "HARD", "HELL"] def __init__(self, name, levels, clear_reward, mode): # type: (str, list, Reward, str) -> None BattleArea.__init__(self, name, levels, clear_reward) self.mode: str = mode if mode in self.POSSIBLE_MODES else self.POSSIBLE_MODES[0] def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class Dungeon(BattleArea): """ This class contains attributes of a dungeon in this game. """ POSSIBLE_TYPES: list = ["RESOURCE", "ITEM"] def __init__(self, name, levels, clear_reward, dungeon_type): # type: (str, list, Reward, str) -> None BattleArea.__init__(self, name, levels, clear_reward) self.dungeon_type: str = dungeon_type if dungeon_type in self.POSSIBLE_TYPES else self.POSSIBLE_TYPES[0] def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class Level: """ This class contains attributes of a level where battles take place. """ def __init__(self, name, stages, clear_reward): # type: (str, list, Reward) -> None self.name: str = name self.__stages: list = stages self.is_cleared: bool = False self.clear_reward: Reward = clear_reward self.times_beaten: int = 0 # initial value def curr_stage(self, stage_number): # type: (int) -> Stage or None if stage_number < 0 or stage_number >= len(self.__stages): return None return self.__stages[stage_number] def next_stage(self, stage_number): # type: (int) -> Stage or None if stage_number < -1 or stage_number >= len(self.__stages) - 1: return None return self.__stages[stage_number + 1] def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_stages(self): # type: () -> list return self.__stages def strengthen_enemies(self): # type: () -> None for stage in self.__stages: for enemy in stage.get_enemies_list(): level_ups: int = 2 ** self.times_beaten for i in range(level_ups): enemy.exp = enemy.required_exp enemy.level_up() def clone(self): # type: () -> Level return copy.deepcopy(self) class Stage: """ This class contains attributes of a stage in a level. """ def __init__(self, enemies_list): # type: (list) -> None self.__enemies_list: list = enemies_list self.is_cleared: bool = False def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_enemies_list(self): # type: () -> list return self.__enemies_list def clone(self): # type: () -> Stage return copy.deepcopy(self) class Player: """ This class contains attributes of the player in this game. """ def __init__(self, name): # type: (str) -> None self.player_id: str = str(uuid.uuid1()) # generating random player ID self.name: str = name self.level: int = 1 self.exp: mpf = mpf("0") self.required_exp: mpf = mpf("1e6") self.exp_per_second: mpf = mpf("0") self.gold: mpf = mpf("5e6") self.gold_per_second: mpf = mpf("0") self.gems: mpf = mpf("100") self.gems_per_second: mpf = mpf("0") self.arena_points: int = 1000 self.arena_wins: int = 0 self.arena_losses: int = 0 self.battle_team: Team = Team() self.item_inventory: ItemInventory = ItemInventory() self.legendary_creature_inventory: LegendaryCreatureInventory = LegendaryCreatureInventory() self.player_base: PlayerBase = PlayerBase() def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def claim_reward(self, reward): # type: (Reward) -> None self.exp += reward.player_reward_exp self.level_up() self.gold += reward.player_reward_gold self.gems += reward.player_reward_gems for legendary_creature in self.battle_team.get_legendary_creatures(): legendary_creature.exp += reward.legendary_creature_reward_exp legendary_creature.level_up() self.battle_team.recover_all() for item in reward.get_player_reward_items(): self.add_item_to_inventory(item) def make_a_wish(self, temple_of_wishes): # type: (TempleOfWishes) -> bool temple_of_wishes_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == temple_of_wishes: temple_of_wishes_exists = True break if not temple_of_wishes_exists: return False if temple_of_wishes.wishes_left <= 0: return False potential_objects: list = temple_of_wishes.get_obtainable_objects() object_obtained: Item or Reward or LegendaryCreature = \ potential_objects[random.randint(0, len(potential_objects) - 1)] if isinstance(object_obtained, Item): self.add_item_to_inventory(object_obtained) elif isinstance(object_obtained, Reward): self.exp += object_obtained.player_reward_exp self.level_up() self.gold += object_obtained.player_reward_gold self.gems += object_obtained.player_reward_gems for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): legendary_creature.exp += object_obtained.legendary_creature_reward_exp legendary_creature.level_up() for item in object_obtained.get_player_reward_items(): self.add_item_to_inventory(item) elif isinstance(object_obtained, LegendaryCreature): self.add_legendary_creature(object_obtained) else: pass return True def fuse_legendary_creatures(self, material_legendary_creatures, chosen_fusion_legendary_creature, fusion_center): # type: (list, FusionLegendaryCreature, FusionCenter) -> bool for material_legendary_creature in material_legendary_creatures: if material_legendary_creature not in self.legendary_creature_inventory.get_legendary_creatures(): return False fusion_center_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == fusion_center: fusion_center_exists = True break if not fusion_center_exists: return False # Checking whether the materials match the materials for the chosen fusion legendary creature or not for index in range(len(material_legendary_creatures)): curr_material: LegendaryCreature = material_legendary_creatures[index] list_to_compare_with: list = chosen_fusion_legendary_creature.get_material_legendary_creatures() material_for_comparison: LegendaryCreature = list_to_compare_with[index] if not (curr_material.name == material_for_comparison.name or curr_material.name == "AWAKENED " + str(material_for_comparison.name) or material_for_comparison.name == "AWAKENED " + str( curr_material.name)): # Material mismatch return False # Add the fusion legendary creature to player's legendary creature inventory and remove the fusion materials self.add_legendary_creature(chosen_fusion_legendary_creature) for material_legendary_creature in material_legendary_creatures: self.remove_legendary_creature(material_legendary_creature) return True def summon_legendary_creature(self, scroll, summonhenge): # type: (Scroll, Summonhenge) -> bool if scroll not in self.item_inventory.get_items(): return False summonhenge_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == summonhenge: summonhenge_exists = True break if not summonhenge_exists: return False summoned_legendary_creature_index: int = random.randint(0, len(scroll.get_potential_legendary_creatures()) - 1) summoned_legendary_creature: LegendaryCreature = \ scroll.get_potential_legendary_creatures()[summoned_legendary_creature_index] print("You have summoned " + str(summoned_legendary_creature.name) + "!!!") self.add_legendary_creature(summoned_legendary_creature) self.remove_item_from_inventory(scroll) return True def give_item_to_legendary_creature(self, item, legendary_creature): # type: (Item, LegendaryCreature) -> bool if item not in self.item_inventory.get_items(): return False if legendary_creature not in self.legendary_creature_inventory.get_legendary_creatures(): return False if isinstance(item, EXPShard): legendary_creature.exp += item.exp_granted legendary_creature.level_up() self.remove_item_from_inventory(item) return True elif isinstance(item, LevelUpShard): legendary_creature.exp = legendary_creature.required_exp legendary_creature.level_up() self.remove_item_from_inventory(item) return True elif isinstance(item, SkillLevelUpShard): skill_index: int = random.randint(0, len(legendary_creature.get_skills()) - 1) curr_skill: Skill = legendary_creature.get_skills()[skill_index] curr_skill.level_up() self.remove_item_from_inventory(item) return True elif isinstance(item, AwakenShard): if item.legendary_creature_name == legendary_creature.name: legendary_creature.awaken() self.remove_item_from_inventory(item) return True return False return False def power_up_legendary_creature(self, legendary_creature_to_power_up, material_legendary_creatures, power_up_circle): # type: (LegendaryCreature, list, PowerUpCircle) -> bool if len(material_legendary_creatures) < 0 or len(material_legendary_creatures) > \ power_up_circle.MAX_MATERIAL_LEGENDARY_CREATURES: return False if legendary_creature_to_power_up not in self.legendary_creature_inventory.get_legendary_creatures(): return False power_up_circle_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == power_up_circle: power_up_circle_exists = True break if not power_up_circle_exists: return False power_up_circle.deselect_legendary_creature_to_power_up() power_up_circle.select_legendary_creature_to_power_up(legendary_creature_to_power_up) power_up_circle.set_material_legendary_creatures(material_legendary_creatures) legendary_creature_to_power_up = power_up_circle.execute_power_up() assert isinstance(legendary_creature_to_power_up, LegendaryCreature), "Legendary creature power-up failed!" for legendary_creature in material_legendary_creatures: self.remove_legendary_creature(legendary_creature) return True def evolve_legendary_creature(self, legendary_creature_to_evolve, material_legendary_creatures, power_up_circle): # type: (LegendaryCreature, list, PowerUpCircle) -> bool if len(material_legendary_creatures) < 0 or len(material_legendary_creatures) > \ power_up_circle.MAX_MATERIAL_LEGENDARY_CREATURES: return False if legendary_creature_to_evolve not in self.legendary_creature_inventory.get_legendary_creatures(): return False power_up_circle_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == power_up_circle: power_up_circle_exists = True break if not power_up_circle_exists: return False power_up_circle.deselect_legendary_creature_to_power_up() power_up_circle.select_legendary_creature_to_power_up(legendary_creature_to_evolve) power_up_circle.set_material_legendary_creatures(material_legendary_creatures) legendary_creature_to_evolve = power_up_circle.execute_evolution() assert isinstance(legendary_creature_to_evolve, LegendaryCreature), "Legendary creature evolution failed!" for legendary_creature in material_legendary_creatures: self.remove_legendary_creature(legendary_creature) return True def add_legendary_creature_to_training_area(self, legendary_creature, training_area): # type: (LegendaryCreature, TrainingArea) -> bool if legendary_creature not in self.legendary_creature_inventory.get_legendary_creatures() or \ legendary_creature in self.battle_team.get_legendary_creatures(): return False training_area_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == training_area: training_area_exists = True break if not training_area_exists: return False if training_area.add_legendary_creature(legendary_creature): legendary_creature.exp_per_second += training_area.legendary_creature_exp_per_second legendary_creature.placed_in_training_area = True return True return False def remove_legendary_creature_from_training_area(self, legendary_creature, training_area): # type: (LegendaryCreature, TrainingArea) -> bool if legendary_creature not in self.legendary_creature_inventory.get_legendary_creatures() or \ legendary_creature in self.battle_team.get_legendary_creatures(): return False training_area_exists: bool = False for island in self.player_base.get_islands(): for y in range(island.ISLAND_HEIGHT): for x in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if curr_tile.building == training_area: training_area_exists = True break if not training_area_exists: return False if training_area.remove_legendary_creature(legendary_creature): legendary_creature.exp_per_second -= training_area.legendary_creature_exp_per_second legendary_creature.placed_in_training_area = False return True return False def add_island_to_player_base(self): # type: () -> bool if self.gold >= self.player_base.island_build_gold_cost: self.gold -= self.player_base.island_build_gold_cost self.player_base.add_island() return True return False def level_up_building_at_island_tile(self, island_index, tile_x, tile_y): # type: (int, int, int) -> bool if island_index < 0 or island_index >= len(self.player_base.get_islands()): return False corresponding_island: Island = self.player_base.get_islands()[island_index] if isinstance(corresponding_island.get_tile_at(tile_x, tile_y), IslandTile): curr_tile: IslandTile = corresponding_island.get_tile_at(tile_x, tile_y) if isinstance(curr_tile.building, Building): curr_building: Building = curr_tile.building if self.gold < curr_building.upgrade_gold_cost or self.gems < curr_building.upgrade_gem_cost: return False self.gold -= curr_building.upgrade_gold_cost self.gems -= curr_building.upgrade_gem_cost if isinstance(curr_building, Guardstone): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." initial_legendary_creature_defense_percentage_up: mpf = \ curr_building.legendary_creature_defense_percentage_up curr_building.level_up() legendary_creature.DEFAULT_DEFENSE_PERCENTAGE_UP += \ (curr_building.legendary_creature_defense_percentage_up - initial_legendary_creature_defense_percentage_up) legendary_creature.defense_percentage_up += \ (curr_building.legendary_creature_defense_percentage_up - initial_legendary_creature_defense_percentage_up) elif isinstance(curr_building, LegendaryCreatureSanctuary): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." initial_legendary_creature_attack_power_percentage_up: mpf = \ curr_building.legendary_creature_attack_power_percentage_up curr_building.level_up() legendary_creature.DEFAULT_ATTACK_POWER_PERCENTAGE_UP += \ (curr_building.legendary_creature_attack_power_percentage_up - initial_legendary_creature_attack_power_percentage_up) legendary_creature.attack_power_percentage_up += \ (curr_building.legendary_creature_attack_power_percentage_up - initial_legendary_creature_attack_power_percentage_up) elif isinstance(curr_building, SurvivalAltar): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." initial_legendary_creature_max_hp_percentage_up: mpf = \ curr_building.legendary_creature_max_hp_percentage_up curr_building.level_up() legendary_creature.DEFAULT_MAX_HP_PERCENTAGE_UP += \ (curr_building.legendary_creature_max_hp_percentage_up - initial_legendary_creature_max_hp_percentage_up) legendary_creature.max_hp_percentage_up += \ (curr_building.legendary_creature_max_hp_percentage_up - initial_legendary_creature_max_hp_percentage_up) elif isinstance(curr_building, MagicAltar): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." initial_legendary_creature_max_magic_points_percentage_up: mpf = \ curr_building.legendary_creature_max_magic_points_percentage_up curr_building.level_up() legendary_creature.DEFAULT_MAX_MAGIC_POINTS_PERCENTAGE_UP += \ (curr_building.legendary_creature_max_magic_points_percentage_up - initial_legendary_creature_max_magic_points_percentage_up) legendary_creature.max_magic_points_percentage_up += \ (curr_building.legendary_creature_max_magic_points_percentage_up - initial_legendary_creature_max_magic_points_percentage_up) elif isinstance(curr_building, BoosterTower): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." initial_legendary_creature_attack_speed_percentage_up: mpf = \ curr_building.legendary_creature_attack_speed_percentage_up curr_building.level_up() legendary_creature.DEFAULT_ATTACK_SPEED_PERCENTAGE_UP += \ (curr_building.legendary_creature_attack_speed_percentage_up - initial_legendary_creature_attack_speed_percentage_up) legendary_creature.attack_speed_percentage_up += \ (curr_building.legendary_creature_attack_speed_percentage_up - initial_legendary_creature_attack_speed_percentage_up) elif isinstance(curr_building, PlayerEXPTower): initial_exp_per_second: mpf = curr_building.exp_per_second curr_building.level_up() self.exp_per_second += (curr_building.exp_per_second - initial_exp_per_second) elif isinstance(curr_building, GoldMine): initial_gold_per_second: mpf = curr_building.gold_per_second curr_building.level_up() self.gold_per_second += (curr_building.gold_per_second - initial_gold_per_second) elif isinstance(curr_building, GemMine): initial_gems_per_second: mpf = curr_building.gem_per_second curr_building.level_up() self.gems_per_second += (curr_building.gem_per_second - initial_gems_per_second) else: curr_building.level_up() return True return False return False def build_at_island_tile(self, island_index, tile_x, tile_y, building): # type: (int, int, int, Building) -> bool if island_index < 0 or island_index >= len(self.player_base.get_islands()): return False corresponding_island: Island = self.player_base.get_islands()[island_index] if isinstance(corresponding_island.get_tile_at(tile_x, tile_y), IslandTile): curr_tile: IslandTile = corresponding_island.get_tile_at(tile_x, tile_y) if curr_tile.building is not None: return False if self.gold < building.gold_cost or self.gems < building.gem_cost: return False self.gold -= building.gold_cost self.gems -= building.gem_cost if isinstance(building, Guardstone): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_DEFENSE_PERCENTAGE_UP += \ building.legendary_creature_defense_percentage_up legendary_creature.defense_percentage_up += building.legendary_creature_defense_percentage_up elif isinstance(building, LegendaryCreatureSanctuary): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_ATTACK_POWER_PERCENTAGE_UP += \ building.legendary_creature_attack_power_percentage_up legendary_creature.attack_power_percentage_up += \ building.legendary_creature_attack_power_percentage_up elif isinstance(building, SurvivalAltar): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_MAX_HP_PERCENTAGE_UP += \ building.legendary_creature_max_hp_percentage_up legendary_creature.max_hp_percentage_up += \ building.legendary_creature_max_hp_percentage_up elif isinstance(building, MagicAltar): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_MAX_MAGIC_POINTS_PERCENTAGE_UP += \ building.legendary_creature_max_magic_points_percentage_up legendary_creature.max_magic_points_percentage_up += \ building.legendary_creature_max_magic_points_percentage_up elif isinstance(building, BoosterTower): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_ATTACK_SPEED_PERCENTAGE_UP += \ building.legendary_creature_attack_speed_percentage_up legendary_creature.attack_speed_percentage_up += \ building.legendary_creature_attack_speed_percentage_up elif isinstance(building, PlayerEXPTower): self.exp_per_second += building.exp_per_second elif isinstance(building, GoldMine): self.gold_per_second += building.gold_per_second elif isinstance(building, GemMine): self.gems_per_second += building.gem_per_second elif isinstance(building, Obstacle): # Cannot build obstacle return False curr_tile.building = building return True return False def remove_building_from_island_tile(self, island_index, tile_x, tile_y): # type: (int, int, int) -> bool if island_index < 0 or island_index >= len(self.player_base.get_islands()): return False corresponding_island: Island = self.player_base.get_islands()[island_index] if isinstance(corresponding_island.get_tile_at(tile_x, tile_y), IslandTile): curr_tile: IslandTile = corresponding_island.get_tile_at(tile_x, tile_y) if isinstance(curr_tile.building, Building): curr_building: Building = curr_tile.building self.gold += curr_building.sell_gold_gain self.gems += curr_building.sell_gem_gain if isinstance(curr_building, Guardstone): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_DEFENSE_PERCENTAGE_UP -= \ curr_building.legendary_creature_defense_percentage_up legendary_creature.defense_percentage_up -= \ curr_building.legendary_creature_defense_percentage_up elif isinstance(curr_building, LegendaryCreatureSanctuary): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_ATTACK_POWER_PERCENTAGE_UP -= \ curr_building.legendary_creature_attack_power_percentage_up legendary_creature.attack_power_percentage_up -= \ curr_building.legendary_creature_attack_power_percentage_up elif isinstance(curr_building, SurvivalAltar): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_MAX_HP_PERCENTAGE_UP -= \ curr_building.legendary_creature_max_hp_percentage_up legendary_creature.max_hp_percentage_up -= \ curr_building.legendary_creature_max_hp_percentage_up elif isinstance(curr_building, MagicAltar): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_MAX_MAGIC_POINTS_PERCENTAGE_UP -= \ curr_building.legendary_creature_max_magic_points_percentage_up legendary_creature.max_magic_points_percentage_up -= \ curr_building.legendary_creature_max_magic_points_percentage_up elif isinstance(curr_building, BoosterTower): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): assert isinstance(legendary_creature, LegendaryCreature), "Invalid argument in the list of " \ "legendary creatures in player's " \ "legendary creature inventory." legendary_creature.DEFAULT_ATTACK_SPEED_PERCENTAGE_UP -= \ curr_building.legendary_creature_attack_speed_percentage_up legendary_creature.attack_speed_percentage_up -= \ curr_building.legendary_creature_attack_speed_percentage_up elif isinstance(curr_building, PlayerEXPTower): self.exp_per_second -= curr_building.exp_per_second elif isinstance(curr_building, GoldMine): self.gold_per_second -= curr_building.gold_per_second elif isinstance(curr_building, GemMine): self.gems_per_second -= curr_building.gem_per_second elif isinstance(curr_building, Obstacle): self.gold += curr_building.remove_gold_gain self.gems += curr_building.remove_gem_gain curr_tile.building = None return True return False return False def place_rune_on_legendary_creature(self, legendary_creature, rune): # type: (LegendaryCreature, Rune) -> bool if legendary_creature in self.legendary_creature_inventory.get_legendary_creatures() and rune in \ self.item_inventory.get_items(): legendary_creature.place_rune(rune) return True return False def remove_rune_from_legendary_creature(self, legendary_creature, slot_number): # type: (LegendaryCreature, int) -> bool if legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): if slot_number in legendary_creature.get_runes().keys(): legendary_creature.remove_rune(slot_number) return True return False return False def level_up(self): # type: () -> None while self.exp >= self.required_exp: self.level += 1 self.required_exp *= mpf("10") ** self.level def purchase_item(self, item): # type: (Item) -> bool if self.gold >= item.gold_cost and self.gems >= item.gem_cost: self.gold -= item.gold_cost self.gems -= item.gem_cost self.add_item_to_inventory(item) return True return False def sell_item(self, item): # type: (Item) -> bool if item in self.item_inventory.get_items(): if isinstance(item, Rune): if item.already_placed: return False self.remove_item_from_inventory(item) self.gold += item.sell_gold_gain self.gems += item.sell_gem_gain return True return False def add_new_island_to_player_base(self): # type: () -> bool if self.gold >= self.player_base.island_build_gold_cost: self.gold -= self.player_base.island_build_gold_cost self.player_base.add_island() return True return False def level_up_rune(self, rune): # type: (Rune) -> bool if rune in self.item_inventory.get_items(): if self.gold >= rune.level_up_gold_cost: self.gold -= rune.level_up_gold_cost return rune.level_up() return False else: # Check whether a legendary creature has the rune 'rune' or not for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): if rune in legendary_creature.get_runes().values(): if self.gold >= rune.level_up_gold_cost: self.gold -= rune.level_up_gold_cost return legendary_creature.level_up_rune(rune.slot_number) return False return False def add_item_to_inventory(self, item): # type: (Item) -> None self.item_inventory.add_item(item) def remove_item_from_inventory(self, item): # type: (Item) -> bool if isinstance(item, Rune): for legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): if item in legendary_creature.get_runes().values(): return False return self.item_inventory.remove_item(item) def add_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> None self.legendary_creature_inventory.add_legendary_creature(legendary_creature) def remove_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.battle_team.get_legendary_creatures(): return False return self.legendary_creature_inventory.remove_legendary_creature(legendary_creature) def add_legendary_creature_to_team(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): if self.battle_team.add_legendary_creature(legendary_creature): legendary_creature.corresponding_team = self.battle_team return True return False return False def remove_legendary_creature_from_team(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.legendary_creature_inventory.get_legendary_creatures(): legendary_creature.corresponding_team = Team() return self.battle_team.remove_legendary_creature(legendary_creature) return False def clone(self): # type: () -> Player return copy.deepcopy(self) class CPU(Player): """ This class contains attributes of a CPU controlled player. """ def __init__(self, name): # type: (str) -> None Player.__init__(self, name) self.currently_available: bool = False self.next_available_time: datetime or None = None self.times_beaten: int = 0 # initial value def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class LegendaryCreatureInventory: """ This class contains attributes of an inventory containing legendary creatures. """ def __init__(self): # type: () -> None self.__legendary_creatures: list = [] # initial value def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def add_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> None self.__legendary_creatures.append(legendary_creature) def remove_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.__legendary_creatures: self.__legendary_creatures.remove(legendary_creature) return True return False def get_legendary_creatures(self): # type: () -> list return self.__legendary_creatures def clone(self): # type: () -> LegendaryCreatureInventory return copy.deepcopy(self) class ItemInventory: """ This class contains attributes of an inventory containing items. """ def __init__(self): # type: () -> None self.__items: list = [] # initial value def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def add_item(self, item): # type: (Item) -> None self.__items.append(item) def remove_item(self, item): # type: (Item) -> bool if item in self.__items: self.__items.remove(item) return True return False def get_items(self): # type: () -> list return self.__items def clone(self): # type: () -> ItemInventory return copy.deepcopy(self) class Item: """ This class contains attributes of an item in this game. """ def __init__(self, name, description, gold_cost, gem_cost): # type: (str, str, mpf, mpf) -> None self.name: str = name self.description: str = description self.gold_cost: mpf = gold_cost self.gem_cost: mpf = gem_cost self.sell_gold_gain: mpf = gold_cost / 5 self.sell_gem_gain: mpf = gem_cost / 5 def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> Item return copy.deepcopy(self) class Rune(Item): """ This class contains attributes of a rune used to strengthen legendary creatures. """ MIN_SLOT_NUMBER: int = 1 MAX_SLOT_NUMBER: int = 6 MIN_RATING: int = 1 MAX_RATING: int = 6 POTENTIAL_SET_NAMES: list = ["ENERGY", "MAGIC", "FATAL", "BLADE", "SWIFT", "FOCUS", "GUARD", "ENDURE", "REVENGE", "VAMPIRE", "RAGE", "VIOLENT", "REFLECT", "RESIST", "DESPAIR"] POTENTIAL_MAIN_STATS: list = ["HP", "HP%", "MP", "MP%", "ATK", "ATK%", "DEF", "DEF%", "SPD", "CR", "CD", "RES", "ACC"] MAX_SUB_STATS: int = 4 def __init__(self, name, description, gold_cost, gem_cost, rating, slot_number, set_name, main_stat): # type: (str, str, mpf, mpf, int, int, str, str) -> None Item.__init__(self, name, description, gold_cost, gem_cost) self.rating: int = rating if self.MIN_RATING <= rating <= self.MAX_RATING else self.MIN_RATING self.slot_number: int = slot_number if self.MIN_SLOT_NUMBER <= slot_number <= self.MAX_SLOT_NUMBER else \ self.MIN_SLOT_NUMBER self.set_name: str = set_name if set_name in self.POTENTIAL_SET_NAMES else self.POTENTIAL_SET_NAMES[0] self.set_size: int = 4 if self.set_name in ["FATAL", "SWIFT", "VAMPIRE", "RAGE", "VIOLENT", "REFLECT", "DESPAIR"] else 2 self.main_stat: str = main_stat if main_stat in self.POTENTIAL_MAIN_STATS else self.POTENTIAL_MAIN_STATS[0] self.__sub_stats: list = [] # initial value self.set_effect_is_active: bool = False self.stat_increase: StatIncrease = self.__get_stat_increase() self.set_effect: SetEffect = self.__get_set_effect() self.level: int = 1 self.level_up_gold_cost: mpf = gold_cost self.level_up_success_rate: mpf = mpf("1") self.already_placed: bool = False # initial value def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_sub_stats(self): # type: () -> list return self.__sub_stats def __get_stat_increase(self): # type: () -> StatIncrease if self.main_stat == "HP": return StatIncrease(max_hp_up=mpf("10") ** (6 * self.rating)) elif self.main_stat == "HP%": return StatIncrease(max_hp_percentage_up=mpf(2 * self.rating)) elif self.main_stat == "MP": return StatIncrease(max_magic_points_up=mpf("10") ** (6 * self.rating)) elif self.main_stat == "MP%": return StatIncrease(max_magic_points_percentage_up=mpf(2 * self.rating)) elif self.main_stat == "ATK": return StatIncrease(attack_up=mpf("10") ** (5 * self.rating)) elif self.main_stat == "ATK%": return StatIncrease(attack_percentage_up=mpf(2 * self.rating)) elif self.main_stat == "DEF": return StatIncrease(defense_up=mpf("10") ** (5 * self.rating)) elif self.main_stat == "DEF%": return StatIncrease(defense_percentage_up=mpf(2 * self.rating)) elif self.main_stat == "SPD": return StatIncrease(attack_speed_up=mpf(2 * self.rating)) elif self.main_stat == "CR": return StatIncrease(crit_rate_up=mpf(0.01 * self.rating)) elif self.main_stat == "CD": return StatIncrease(crit_damage_up=mpf(0.05 * self.rating)) elif self.main_stat == "RES": return StatIncrease(resistance_up=mpf(0.01 * self.rating)) elif self.main_stat == "ACC": return StatIncrease(accuracy_up=mpf(0.01 * self.rating)) return StatIncrease() def __get_set_effect(self): # type: () -> SetEffect if self.set_name == "ENERGY": return SetEffect(max_hp_percentage_up=mpf("15")) elif self.set_name == "MAGIC": return SetEffect(max_magic_points_percentage_up=mpf("15")) elif self.set_name == "FATAL": return SetEffect(attack_percentage_up=mpf("35")) elif self.set_name == "BLADE": return SetEffect(crit_rate_up=mpf("0.12")) elif self.set_name == "SWIFT": return SetEffect(attack_speed_percentage_up=mpf("25")) elif self.set_name == "FOCUS": return SetEffect(accuracy_up=mpf("0.2")) elif self.set_name == "GUARD": return SetEffect(defense_percentage_up=mpf("20")) elif self.set_name == "ENDURE": return SetEffect(resistance_up=mpf("0.2")) elif self.set_name == "REVENGE": return SetEffect(counterattack_chance_up=mpf("0.15")) elif self.set_name == "VAMPIRE": return SetEffect(life_drain_percentage_up=mpf("35")) elif self.set_name == "RAGE": return SetEffect(crit_damage_up=mpf("0.4")) elif self.set_name == "VIOLENT": return SetEffect(extra_turn_chance_up=mpf("0.22")) elif self.set_name == "REFLECT": return SetEffect(reflected_damage_percentage_up=mpf("35")) elif self.set_name == "RESIST": return SetEffect(crit_resist_up=mpf("0.15")) elif self.set_name == "DESPAIR": return SetEffect(stun_rate_up=mpf("0.25")) return SetEffect() def level_up(self): # type: () -> bool # Check whether levelling up is successful or not if random.random() > self.level_up_success_rate: return False # Increase the level of the rune self.level += 1 # Update the cost and success rate of levelling up the rune self.level_up_gold_cost *= mpf("10") ** (self.level + self.rating) self.level_up_success_rate *= mpf("0.95") # Increase main stat attribute if self.main_stat == "HP": self.stat_increase.max_hp_up += mpf("10") ** (6 * self.rating + self.level) elif self.main_stat == "HP%": self.stat_increase.max_hp_percentage_up += self.rating elif self.main_stat == "MP": self.stat_increase.max_magic_points_up += mpf("10") ** (6 * self.rating + self.level) elif self.main_stat == "MP%": self.stat_increase.max_magic_points_percentage_up += self.rating elif self.main_stat == "ATK": self.stat_increase.attack_up += mpf("10") ** (5 * self.rating + 1) elif self.main_stat == "ATK%": self.stat_increase.attack_percentage_up += self.rating elif self.main_stat == "DEF": self.stat_increase.defense_up += mpf("10") ** (5 * self.rating + 1) elif self.main_stat == "DEF%": self.stat_increase.defense_percentage_up += self.rating elif self.main_stat == "SPD": self.stat_increase.attack_speed_up += 2 * self.rating elif self.main_stat == "CR": self.stat_increase.crit_rate_up += 0.01 * self.rating elif self.main_stat == "CD": self.stat_increase.crit_damage_up += 0.05 * self.rating elif self.main_stat == "RES": self.stat_increase.resistance_up += 0.01 * self.rating elif self.main_stat == "ACC": self.stat_increase.accuracy_up += 0.01 * self.rating else: print("Cannot increase rune main stat: " + str(self.main_stat) + "\n") # Add new sub-stat if possible. new_sub_stat: str = self.POTENTIAL_MAIN_STATS[random.randint(0, len(self.POTENTIAL_MAIN_STATS) - 1)] if new_sub_stat not in self.__sub_stats and len(self.__sub_stats) < self.MAX_SUB_STATS and \ new_sub_stat != self.main_stat: self.__sub_stats.append(new_sub_stat) # Increase value of sub-stat attribute self.increase_substat_attribute(new_sub_stat) return True def increase_substat_attribute(self, substat_name): # type: (str) -> None if substat_name == "HP": self.stat_increase.max_hp_up += mpf("10") ** (6 * self.rating + self.level) elif substat_name == "HP%": self.stat_increase.max_hp_percentage_up += self.rating elif substat_name == "MP": self.stat_increase.max_magic_points_up += mpf("10") ** (6 * self.rating + self.level) elif substat_name == "MP%": self.stat_increase.max_magic_points_percentage_up += self.rating elif substat_name == "ATK": self.stat_increase.attack_up += mpf("10") ** (5 * self.rating + 1) elif substat_name == "ATK%": self.stat_increase.attack_percentage_up += self.rating elif substat_name == "DEF": self.stat_increase.defense_up += mpf("10") ** (5 * self.rating + 1) elif substat_name == "DEF%": self.stat_increase.defense_percentage_up += self.rating elif substat_name == "SPD": self.stat_increase.attack_speed_up += 2 * self.rating elif substat_name == "CR": self.stat_increase.crit_rate_up += 0.01 * self.rating elif substat_name == "CD": self.stat_increase.crit_damage_up += 0.05 * self.rating elif substat_name == "RES": self.stat_increase.resistance_up += 0.01 * self.rating elif substat_name == "ACC": self.stat_increase.accuracy_up += 0.01 * self.rating else: print("No such sub-stat: " + str(substat_name) + "\n") class SetEffect: """ This class contains attributes of the set effect of a rune. """ def __init__(self, max_hp_percentage_up=mpf("0"), max_magic_points_percentage_up=mpf("0"), attack_percentage_up=mpf("0"), defense_percentage_up=mpf("0"), attack_speed_percentage_up=mpf("0"), crit_rate_up=mpf("0"), crit_damage_up=mpf("0"), resistance_up=mpf("0"), accuracy_up=mpf("0"), extra_turn_chance_up=mpf("0"), counterattack_chance_up=mpf("0"), reflected_damage_percentage_up=mpf("0"), life_drain_percentage_up=mpf("0"), crit_resist_up=mpf("0"), stun_rate_up=mpf("0")): # type: (mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf) -> None self.max_hp_percentage_up: mpf = max_hp_percentage_up self.max_magic_points_percentage_up: mpf = max_magic_points_percentage_up self.attack_percentage_up: mpf = attack_percentage_up self.defense_percentage_up: mpf = defense_percentage_up self.attack_speed_percentage_up: mpf = attack_speed_percentage_up self.crit_rate_up: mpf = crit_rate_up self.crit_damage_up: mpf = crit_damage_up self.resistance_up: mpf = resistance_up self.accuracy_up: mpf = accuracy_up self.extra_turn_chance_up: mpf = extra_turn_chance_up self.counterattack_chance_up: mpf = counterattack_chance_up self.reflected_damage_percentage_up: mpf = reflected_damage_percentage_up self.life_drain_percentage_up: mpf = life_drain_percentage_up self.crit_resist_up: mpf = crit_resist_up self.stun_rate_up: mpf = stun_rate_up def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> SetEffect return copy.deepcopy(self) class StatIncrease: """ This class contains attributes of the increase in stats of a rune. """ def __init__(self, max_hp_up=mpf("0"), max_hp_percentage_up=mpf("0"), max_magic_points_up=mpf("0"), max_magic_points_percentage_up=mpf("0"), attack_up=mpf("0"), attack_percentage_up=mpf("0"), defense_up=mpf("0"), defense_percentage_up=mpf("0"), attack_speed_up=mpf("0"), crit_rate_up=mpf("0"), crit_damage_up=mpf("0"), resistance_up=mpf("0"), accuracy_up=mpf("0")): # type: (mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf) -> None self.max_hp_up: mpf = max_hp_up self.max_hp_percentage_up: mpf = max_hp_percentage_up self.max_magic_points_up: mpf = max_magic_points_up self.max_magic_points_percentage_up: mpf = max_magic_points_percentage_up self.attack_up: mpf = attack_up self.attack_percentage_up: mpf = attack_percentage_up self.defense_up: mpf = defense_up self.defense_percentage_up: mpf = defense_percentage_up self.attack_speed_up: mpf = attack_speed_up self.crit_rate_up: mpf = crit_rate_up self.crit_damage_up: mpf = crit_damage_up self.resistance_up: mpf = resistance_up self.accuracy_up: mpf = accuracy_up def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> StatIncrease return copy.deepcopy(self) class AwakenShard(Item): """ This class contains attributes of a shard used to awaken a legendary creature. """ def __init__(self, gold_cost, gem_cost, legendary_creature_name): # type: (mpf, mpf, str) -> None Item.__init__(self, "AWAKEN SHARD", "A shard used to immediately awaken a legendary creature.", gold_cost, gem_cost) self.legendary_creature_name: str = legendary_creature_name def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class EXPShard(Item): """ This class contains attributes of a shard used to increase the EXP of legendary creatures. """ def __init__(self, gold_cost, gem_cost, exp_granted): # type: (mpf, mpf, mpf) -> None Item.__init__(self, "EXP SHARD", "A shard used to immediately increase the EXP of a legendary creature.", gold_cost, gem_cost) self.exp_granted: mpf = exp_granted def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class LevelUpShard(Item): """ This class contains attributes of a level up shard used to immediately level up a legendary creature. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Item.__init__(self, "LEVEL UP SHARD", "A shard used to immediately increase the level of a legendary creature.", gold_cost, gem_cost) def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class SkillLevelUpShard(Item): """ This class contains attributes of a skill level up shard to level up skills owned by legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Item.__init__(self, "SKILL LEVEL UP SHARD", "A shard used to immediately increase the level of a " "legendary creature' s skill.", gold_cost, gem_cost) def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class Scroll(Item): """ This class contains attributes of a scroll used to summon legendary creatures. """ POTENTIAL_NAMES: list = ["UNKNOWN", "MYSTICAL", "FIRE", "WATER", "WIND", "LIGHT & DARK", "LEGENDARY"] def __init__(self, name, description, gold_cost, gem_cost, potential_legendary_creatures): # type: (str, str, mpf, mpf, list) -> None scroll_name: str = str(name) + " SCROLL" if name in self.POTENTIAL_NAMES else str(self.POTENTIAL_NAMES[0]) + \ " SCROLL" Item.__init__(self, scroll_name, description, gold_cost, gem_cost) self.__potential_legendary_creatures: list = potential_legendary_creatures def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_potential_legendary_creatures(self): # type: () -> list return self.__potential_legendary_creatures class Team: """ This class contains attributes of a team brought to battles. """ MAX_LEGENDARY_CREATURES: int = 5 def __init__(self, legendary_creatures=None): # type: (list) -> None if legendary_creatures is None: legendary_creatures = [] self.__legendary_creatures: list = legendary_creatures if len(legendary_creatures) <= \ self.MAX_LEGENDARY_CREATURES else [] self.leader: LegendaryCreature or None = None if len(self.__legendary_creatures) == 0 else \ self.__legendary_creatures[0] def set_leader(self): # type: () -> None self.leader = None if len(self.__legendary_creatures) == 0 else \ self.__legendary_creatures[0] def __str__(self): res: str = "Team(leader=" + str(self.leader.name) + ", legendary_creatures=[" if self.leader is not None else \ "Team(leader=None, legendary_creatures=[" for i in range(len(self.__legendary_creatures)): curr_legendary_creature: LegendaryCreature = self.__legendary_creatures[i] if i < len(self.__legendary_creatures) - 1: res += str(curr_legendary_creature) + ", " else: res += str(curr_legendary_creature) + "])" return res def recover_all(self): # type: () -> None for legendary_creature in self.__legendary_creatures: legendary_creature.restore() def all_died(self): # type: () -> bool for legendary_creature in self.__legendary_creatures: if legendary_creature.get_is_alive(): return False return True def add_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if len(self.__legendary_creatures) < self.MAX_LEGENDARY_CREATURES: self.__legendary_creatures.append(legendary_creature) self.set_leader() return True return False def remove_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.__legendary_creatures: self.__legendary_creatures.remove(legendary_creature) self.set_leader() return True return False def get_legendary_creatures(self): # type: () -> list return self.__legendary_creatures def clone(self): # type: () -> Team return copy.deepcopy(self) class LegendaryCreature: """ This class contains attributes of a legendary creature in this game. """ MIN_RATING: int = 1 MAX_RATING: int = 6 MIN_CRIT_RATE: mpf = mpf("0.15") MIN_CRIT_DAMAGE: mpf = mpf("1.5") MIN_RESISTANCE: mpf = mpf("0.15") MAX_RESISTANCE: mpf = mpf("1") MIN_ACCURACY: mpf = mpf("0") MAX_ACCURACY: mpf = mpf("1") MIN_ATTACK_GAUGE: mpf = mpf("0") FULL_ATTACK_GAUGE: mpf = mpf("1") MIN_EXTRA_TURN_CHANCE: mpf = mpf("0") MAX_EXTRA_TURN_CHANCE: mpf = mpf("0.5") MIN_COUNTERATTACK_CHANCE: mpf = mpf("0") MAX_COUNTERATTACK_CHANCE: mpf = mpf("1") MIN_REFLECTED_DAMAGE_PERCENTAGE: mpf = mpf("0") MIN_LIFE_DRAIN_PERCENTAGE: mpf = mpf("0") MIN_CRIT_RESIST: mpf = mpf("0") MAX_CRIT_RESIST: mpf = mpf("1") MIN_GLANCING_HIT_CHANCE: mpf = mpf("0") MIN_BENEFICIAL_EFFECTS: int = 0 MAX_BENEFICIAL_EFFECTS: int = 10 MIN_HARMFUL_EFFECTS: int = 0 MAX_HARMFUL_EFFECTS: int = 10 POTENTIAL_ELEMENTS: list = ["FIRE", "WATER", "WIND", "LIGHT", "DARK", "NEUTRAL"] POTENTIAL_TYPES: list = ["NORMAL", "MINIBOSS", "BOSS"] DEFAULT_MAX_HP_PERCENTAGE_UP: mpf = mpf("0") DEFAULT_MAX_MAGIC_POINTS_PERCENTAGE_UP: mpf = mpf("0") DEFAULT_ATTACK_POWER_PERCENTAGE_UP: mpf = mpf("0") DEFAULT_ATTACK_SPEED_PERCENTAGE_UP: mpf = mpf("0") DEFAULT_DEFENSE_PERCENTAGE_UP: mpf = mpf("0") DEFAULT_CRIT_DAMAGE_UP: mpf = mpf("0") def __init__(self, name, element, rating, legendary_creature_type, max_hp, max_magic_points, attack_power, defense, attack_speed, skills, awaken_bonus): # type: (str, str, int, str, mpf, mpf, mpf, mpf, mpf, list, AwakenBonus) -> None self.name: str = name self.element: str = element if element in self.POTENTIAL_ELEMENTS else self.POTENTIAL_ELEMENTS[0] self.legendary_creature_type: str = legendary_creature_type if legendary_creature_type in \ self.POTENTIAL_TYPES else self.POTENTIAL_TYPES[0] self.rating: int = rating if self.MIN_RATING <= rating <= self.MAX_RATING else self.MIN_RATING self.level: int = 1 self.max_level: int = 10 * triangular(self.rating) if self.rating < self.MAX_RATING else float('inf') self.exp: mpf = mpf("0") self.required_exp: mpf = mpf("1e6") self.exp_per_second: mpf = mpf("0") self.curr_hp: mpf = max_hp self.max_hp: mpf = max_hp self.curr_magic_points: mpf = max_magic_points self.max_magic_points: mpf = max_magic_points self.attack_power: mpf = attack_power self.defense: mpf = defense self.attack_speed: mpf = attack_speed self.crit_rate: mpf = self.MIN_CRIT_RATE self.crit_damage: mpf = self.MIN_CRIT_DAMAGE self.resistance: mpf = self.MIN_RESISTANCE self.accuracy: mpf = self.MIN_ACCURACY self.extra_turn_chance: mpf = self.MIN_EXTRA_TURN_CHANCE self.counterattack_chance: mpf = self.MIN_COUNTERATTACK_CHANCE self.reflected_damage_percentage: mpf = self.MIN_REFLECTED_DAMAGE_PERCENTAGE self.life_drain_percentage: mpf = self.MIN_LIFE_DRAIN_PERCENTAGE self.crit_resist: mpf = self.MIN_CRIT_RESIST self.stun_rate: mpf = mpf("0") self.glancing_hit_chance: mpf = self.MIN_GLANCING_HIT_CHANCE self.__beneficial_effects: list = [] self.__harmful_effects: list = [] self.__skills: list = skills self.awaken_bonus: AwakenBonus = awaken_bonus self.__runes: dict = {} # initial value self.max_hp_percentage_up: mpf = self.DEFAULT_MAX_HP_PERCENTAGE_UP self.max_magic_points_percentage_up: mpf = self.DEFAULT_MAX_MAGIC_POINTS_PERCENTAGE_UP self.attack_power_percentage_up: mpf = self.DEFAULT_ATTACK_POWER_PERCENTAGE_UP self.attack_power_percentage_down: mpf = mpf("0") self.attack_speed_percentage_up: mpf = self.DEFAULT_ATTACK_SPEED_PERCENTAGE_UP self.attack_speed_percentage_down: mpf = mpf("0") self.defense_percentage_up: mpf = self.DEFAULT_DEFENSE_PERCENTAGE_UP self.defense_percentage_down: mpf = mpf("0") self.crit_rate_up: mpf = mpf("0") self.crit_damage_up: mpf = self.DEFAULT_CRIT_DAMAGE_UP self.resistance_up: mpf = mpf("0") self.accuracy_up: mpf = mpf("0") self.extra_turn_chance_up: mpf = mpf("0") self.counterattack_chance_up: mpf = mpf("0") self.reflected_damage_percentage_up: mpf = mpf("0") self.life_drain_percentage_up: mpf = mpf("0") self.crit_resist_up: mpf = mpf("0") self.shield_percentage: mpf = mpf("0") self.damage_percentage_per_turn: mpf = mpf("0") self.heal_percentage_per_turn: mpf = mpf("0") self.has_awakened: bool = False self.can_move: bool = True self.can_be_healed: bool = True self.can_receive_beneficial_effect: bool = True self.can_receive_damage: bool = True self.can_receive_harmful_effect: bool = True self.can_die: bool = True self.damage_received_percentage_up: mpf = mpf("0") self.attack_gauge: mpf = self.MIN_ATTACK_GAUGE self.can_use_skills_with_cooltime: bool = True self.can_use_passive_skills: bool = True self.passive_skills_activated: bool = False self.leader_skills_activated: bool = False self.placed_in_training_area: bool = False self.corresponding_team: Team = Team() def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def awaken(self): # type: () -> bool if not self.has_awakened: self.name = "AWAKENED " + str(self.name) self.max_hp *= 1 + self.awaken_bonus.max_hp_percentage_up / 100 self.max_magic_points *= 1 + self.awaken_bonus.max_magic_points_percentage_up / 100 self.attack_power *= 1 + self.awaken_bonus.attack_power_percentage_up / 100 self.defense *= 1 + self.awaken_bonus.defense_percentage_up / 100 self.attack_speed += self.awaken_bonus.attack_speed_up self.crit_rate += self.awaken_bonus.crit_rate_up self.crit_damage += self.awaken_bonus.crit_damage_up self.resistance += self.awaken_bonus.resistance_up if self.resistance > self.MAX_RESISTANCE: self.resistance = self.MAX_RESISTANCE self.accuracy += self.awaken_bonus.accuracy_up if self.accuracy > self.MAX_ACCURACY: self.accuracy = self.MAX_ACCURACY self.__skills.append(self.awaken_bonus.new_skill_gained) self.restore() self.has_awakened = True return True return False def evolve(self): # type: () -> bool if self.level == self.max_level and self.rating < self.MAX_RATING and self.exp >= self.required_exp: self.rating += 1 self.level = 1 self.max_level = 10 * triangular(self.rating) if self.rating < self.MAX_RATING else float('inf') self.exp = mpf("0") self.required_exp = mpf("1e6") temp_runes: dict = self.__runes for slot_number in self.__runes.keys(): self.remove_rune(slot_number) self.attack_power *= triangular(self.level) + 1 self.max_hp *= triangular(self.level) + 1 self.max_magic_points *= triangular(self.level) + 1 self.defense *= triangular(self.level) + 1 self.attack_speed += 3 for rune in temp_runes.values(): self.place_rune(rune) self.restore() return True return False def restore(self): # type: () -> None self.curr_hp = self.max_hp * (1 + self.max_hp_percentage_up / 100) self.curr_magic_points = self.max_magic_points * (1 + self.max_magic_points_percentage_up / 100) self.glancing_hit_chance = self.MIN_GLANCING_HIT_CHANCE self.max_hp_percentage_up = self.DEFAULT_MAX_HP_PERCENTAGE_UP self.max_magic_points_percentage_up = self.DEFAULT_MAX_MAGIC_POINTS_PERCENTAGE_UP self.attack_power_percentage_up = self.DEFAULT_ATTACK_POWER_PERCENTAGE_UP self.attack_power_percentage_down = mpf("0") self.attack_speed_percentage_up = self.DEFAULT_ATTACK_SPEED_PERCENTAGE_UP self.attack_speed_percentage_down = mpf("0") self.defense_percentage_up = self.DEFAULT_DEFENSE_PERCENTAGE_UP self.defense_percentage_down = mpf("0") self.crit_rate_up = mpf("0") self.crit_damage_up = self.DEFAULT_CRIT_DAMAGE_UP self.resistance_up = mpf("0") self.accuracy_up = mpf("0") self.extra_turn_chance_up = mpf("0") self.counterattack_chance_up = mpf("0") self.reflected_damage_percentage_up = mpf("0") self.life_drain_percentage_up = mpf("0") self.crit_resist_up = mpf("0") self.shield_percentage = mpf("0") self.damage_percentage_per_turn = mpf("0") self.heal_percentage_per_turn = mpf("0") self.can_move = True self.can_be_healed = True self.can_receive_beneficial_effect = True self.can_receive_damage = True self.can_receive_harmful_effect = True self.can_die = True self.damage_received_percentage_up = mpf("0") self.__beneficial_effects = [] self.__harmful_effects = [] self.attack_gauge: mpf = self.MIN_ATTACK_GAUGE self.can_use_skills_with_cooltime: bool = True self.can_use_passive_skills: bool = True def use_passive_skills(self): # type: () -> bool if self.can_use_passive_skills and not self.passive_skills_activated: for skill in self.__skills: if isinstance(skill, PassiveSkill): self.max_hp_percentage_up += skill.passive_skill_effect.max_hp_percentage_up self.max_magic_points_percentage_up += skill.passive_skill_effect.max_magic_points_percentage_up self.attack_power_percentage_up += skill.passive_skill_effect.attack_power_percentage_up self.defense_percentage_up += skill.passive_skill_effect.defense_percentage_up self.attack_speed_percentage_up += skill.passive_skill_effect.attack_speed_percentage_up self.crit_rate_up += skill.passive_skill_effect.crit_rate_up self.crit_damage_up += skill.passive_skill_effect.crit_damage_up self.resistance_up += skill.passive_skill_effect.resistance_up self.accuracy_up += skill.passive_skill_effect.accuracy_up self.extra_turn_chance_up += skill.passive_skill_effect.extra_turn_chance_up self.passive_skills_activated = True return True return False def deactivate_passive_skills(self): # type: () -> bool if self.passive_skills_activated: for skill in self.__skills: if isinstance(skill, PassiveSkill): self.max_hp_percentage_up -= skill.passive_skill_effect.max_hp_percentage_up self.max_magic_points_percentage_up -= skill.passive_skill_effect.max_magic_points_percentage_up self.attack_power_percentage_up -= skill.passive_skill_effect.attack_power_percentage_up self.defense_percentage_up -= skill.passive_skill_effect.defense_percentage_up self.attack_speed_percentage_up -= skill.passive_skill_effect.attack_speed_percentage_up self.crit_rate_up -= skill.passive_skill_effect.crit_rate_up self.crit_damage_up -= skill.passive_skill_effect.crit_damage_up self.resistance_up -= skill.passive_skill_effect.resistance_up self.accuracy_up -= skill.passive_skill_effect.accuracy_up self.extra_turn_chance_up -= skill.passive_skill_effect.extra_turn_chance_up self.passive_skills_activated = False return True return False def use_leader_skills(self): # type: () -> bool if not self.leader_skills_activated: for legendary_creature in self.corresponding_team.get_legendary_creatures(): for skill in self.__skills: if isinstance(skill, LeaderSkill): legendary_creature.max_hp_percentage_up += skill.leader_skill_effect.max_hp_percentage_up legendary_creature.max_magic_points_percentage_up += \ skill.leader_skill_effect.max_magic_points_percentage_up legendary_creature.attack_power_percentage_up += \ skill.leader_skill_effect.attack_power_percentage_up legendary_creature.defense_percentage_up += skill.leader_skill_effect.defense_percentage_up legendary_creature.attack_speed_percentage_up += \ skill.leader_skill_effect.attack_speed_percentage_up legendary_creature.crit_rate_up += skill.leader_skill_effect.crit_rate_up legendary_creature.crit_damage_up += skill.leader_skill_effect.crit_damage_up legendary_creature.resistance_up += skill.leader_skill_effect.resistance_up legendary_creature.accuracy_up += skill.leader_skill_effect.accuracy_up self.leader_skills_activated = True return True return False def deactivate_leader_skills(self): # type: () -> bool if self.leader_skills_activated: for legendary_creature in self.corresponding_team.get_legendary_creatures(): for skill in self.__skills: if isinstance(skill, LeaderSkill): legendary_creature.max_hp_percentage_up -= skill.leader_skill_effect.max_hp_percentage_up legendary_creature.max_magic_points_percentage_up -= \ skill.leader_skill_effect.max_magic_points_percentage_up legendary_creature.attack_power_percentage_up -= \ skill.leader_skill_effect.attack_power_percentage_up legendary_creature.defense_percentage_up -= skill.leader_skill_effect.defense_percentage_up legendary_creature.attack_speed_percentage_up -= \ skill.leader_skill_effect.attack_speed_percentage_up legendary_creature.crit_rate_up -= skill.leader_skill_effect.crit_rate_up legendary_creature.crit_damage_up -= skill.leader_skill_effect.crit_damage_up legendary_creature.resistance_up -= skill.leader_skill_effect.resistance_up legendary_creature.accuracy_up -= skill.leader_skill_effect.accuracy_up self.leader_skills_activated = False return True return False def get_is_alive(self): # type: () -> bool return self.curr_hp > 0 def recover_magic_points(self): # type: () -> None self.curr_magic_points += self.max_magic_points / 12 if self.curr_magic_points >= self.max_magic_points: self.curr_magic_points = self.max_magic_points def get_beneficial_effects(self): # type: () -> list return self.__beneficial_effects def get_harmful_effects(self): # type: () -> list return self.__harmful_effects def add_beneficial_effect(self, beneficial_effect): # type: (BeneficialEffect) -> bool if len(self.__beneficial_effects) < self.MAX_BENEFICIAL_EFFECTS: if beneficial_effect.name in [b.name for b in self.__beneficial_effects] and not \ beneficial_effect.can_be_stacked: return False self.attack_power_percentage_up += beneficial_effect.attack_power_percentage_up self.attack_speed_percentage_up += beneficial_effect.attack_speed_percentage_up self.defense_percentage_up += beneficial_effect.defense_percentage_up self.crit_rate_up += beneficial_effect.crit_rate_up if beneficial_effect.prevents_damage: self.can_receive_damage = False if beneficial_effect.blocks_debuffs: self.can_receive_harmful_effect = False if beneficial_effect.prevents_death: self.can_die = False self.heal_percentage_per_turn += beneficial_effect.heal_percentage_per_turn self.counterattack_chance_up += beneficial_effect.counterattack_chance_up self.reflected_damage_percentage_up += beneficial_effect.reflected_damage_percentage_up self.life_drain_percentage_up += beneficial_effect.life_drain_percentage_up self.crit_resist_up += beneficial_effect.crit_resist_up self.shield_percentage += beneficial_effect.shield_percentage_up self.__beneficial_effects.append(beneficial_effect) return True return False def remove_beneficial_effect(self, beneficial_effect): # type: (BeneficialEffect) -> bool if beneficial_effect in self.__beneficial_effects: self.attack_power_percentage_up -= beneficial_effect.attack_power_percentage_up self.attack_speed_percentage_up -= beneficial_effect.attack_speed_percentage_up self.defense_percentage_up -= beneficial_effect.defense_percentage_up self.crit_rate_up -= beneficial_effect.crit_rate_up if beneficial_effect.prevents_damage: self.can_receive_damage = True if beneficial_effect.blocks_debuffs: self.can_receive_harmful_effect = True if beneficial_effect.prevents_death: self.can_die = True self.heal_percentage_per_turn -= beneficial_effect.heal_percentage_per_turn self.counterattack_chance_up -= beneficial_effect.counterattack_chance_up self.reflected_damage_percentage_up -= beneficial_effect.reflected_damage_percentage_up self.life_drain_percentage_up -= beneficial_effect.life_drain_percentage_up self.crit_resist_up -= beneficial_effect.crit_resist_up self.shield_percentage -= beneficial_effect.shield_percentage_up self.__beneficial_effects.remove(beneficial_effect) return True return False def add_harmful_effect(self, harmful_effect): # type: (HarmfulEffect) -> bool if len(self.__harmful_effects) < self.MAX_HARMFUL_EFFECTS: if harmful_effect.name in [h.name for h in self.__harmful_effects] and not \ harmful_effect.can_be_stacked: return False self.attack_power_percentage_down += harmful_effect.attack_power_percentage_down self.attack_speed_percentage_down += harmful_effect.attack_speed_percentage_down self.defense_percentage_down += harmful_effect.defense_percentage_down self.glancing_hit_chance += harmful_effect.glancing_hit_chance_up if harmful_effect.blocks_beneficial_effects: self.can_receive_beneficial_effect = False self.damage_received_percentage_up += harmful_effect.damage_received_percentage_up if harmful_effect.blocks_heal: self.can_be_healed = False if harmful_effect.blocks_passive_skills: self.can_use_passive_skills = False self.deactivate_passive_skills() if harmful_effect.blocks_skills_with_cooltime: self.can_use_skills_with_cooltime = False self.damage_percentage_per_turn += harmful_effect.damage_percentage_per_turn if harmful_effect.prevents_moves: self.can_move = False self.__harmful_effects.append(harmful_effect) return True return False def remove_harmful_effect(self, harmful_effect): # type: (HarmfulEffect) -> bool if harmful_effect in self.__harmful_effects: self.attack_power_percentage_down -= harmful_effect.attack_power_percentage_down self.attack_speed_percentage_down -= harmful_effect.attack_speed_percentage_down self.defense_percentage_down -= harmful_effect.defense_percentage_down self.glancing_hit_chance -= harmful_effect.glancing_hit_chance_up if harmful_effect.blocks_beneficial_effects: self.can_receive_beneficial_effect = True self.damage_received_percentage_up -= harmful_effect.damage_received_percentage_up if harmful_effect.blocks_heal: self.can_be_healed = True if harmful_effect.blocks_passive_skills: self.can_use_passive_skills = True self.use_passive_skills() if harmful_effect.blocks_skills_with_cooltime: self.can_use_skills_with_cooltime = True self.damage_percentage_per_turn -= harmful_effect.damage_percentage_per_turn if harmful_effect.prevents_moves: self.can_move = True self.__harmful_effects.remove(harmful_effect) return True return False def get_skills(self): # type: () -> list return self.__skills def add_skill(self, skill): # type: (Skill) -> None self.__skills.append(skill) def get_runes(self): # type: () -> dict return self.__runes def place_rune(self, rune): # type: (Rune) -> bool if rune.already_placed: return False if rune.slot_number in self.__runes.keys(): self.remove_rune(rune.slot_number) self.__runes[rune.slot_number] = rune self.max_hp *= 1 + (rune.stat_increase.max_hp_percentage_up / 100) self.max_hp += rune.stat_increase.max_hp_up self.max_magic_points *= 1 + (rune.stat_increase.max_magic_points_percentage_up / 100) self.max_magic_points += rune.stat_increase.max_magic_points_up self.attack_power *= 1 + (rune.stat_increase.attack_percentage_up / 100) self.attack_power += rune.stat_increase.attack_up self.defense *= 1 + (rune.stat_increase.defense_percentage_up / 100) self.defense += rune.stat_increase.defense_up self.attack_speed += rune.stat_increase.attack_speed_up self.crit_rate += rune.stat_increase.crit_rate_up self.crit_damage += rune.stat_increase.crit_damage_up self.resistance += rune.stat_increase.resistance_up if self.resistance >= self.MAX_RESISTANCE: self.resistance = self.MAX_RESISTANCE self.accuracy += rune.stat_increase.accuracy_up if self.accuracy >= self.MAX_ACCURACY: self.accuracy = self.MAX_ACCURACY # Try to activate the set effect of the rune if possible. matching_runes: int = sum(1 for curr_rune in self.__runes.values() if curr_rune.set_name == rune.set_name) if matching_runes >= rune.set_size and not rune.set_effect_is_active: self.max_hp *= 1 + (rune.set_effect.max_hp_percentage_up / 100) self.max_magic_points *= 1 + (rune.set_effect.max_magic_points_percentage_up / 100) self.attack_power *= 1 + (rune.set_effect.attack_percentage_up / 100) self.defense *= 1 + (rune.set_effect.defense_percentage_up / 100) self.attack_speed *= 1 + (rune.set_effect.attack_speed_percentage_up / 100) self.crit_rate += rune.set_effect.crit_rate_up self.crit_damage += rune.set_effect.crit_damage_up self.resistance += rune.set_effect.resistance_up if self.resistance >= self.MAX_RESISTANCE: self.resistance = self.MAX_RESISTANCE self.accuracy += rune.set_effect.accuracy_up if self.accuracy >= self.MAX_ACCURACY: self.accuracy = self.MAX_ACCURACY self.extra_turn_chance += rune.set_effect.extra_turn_chance_up if self.extra_turn_chance >= self.MAX_EXTRA_TURN_CHANCE: self.extra_turn_chance = self.MAX_EXTRA_TURN_CHANCE self.counterattack_chance += rune.set_effect.counterattack_chance_up if self.counterattack_chance >= self.MAX_COUNTERATTACK_CHANCE: self.counterattack_chance = self.MAX_COUNTERATTACK_CHANCE self.reflected_damage_percentage += rune.set_effect.reflected_damage_percentage_up self.life_drain_percentage += rune.set_effect.life_drain_percentage_up self.crit_resist += rune.set_effect.crit_resist_up if self.crit_resist >= self.MAX_CRIT_RESIST: self.crit_resist = self.MAX_CRIT_RESIST self.stun_rate += rune.set_effect.stun_rate_up rune.set_effect_is_active = True count: int = 0 while count < rune.set_size: for other_rune in self.__runes.values(): if other_rune.set_name == rune.set_name: other_rune.set_effect_is_active = True count += 1 self.restore() rune.already_placed = True return True def level_up(self): # type: () -> None while self.exp >= self.required_exp and self.level < self.max_level: self.level += 1 self.required_exp *= mpf("10") ** self.level temp_runes: dict = self.__runes for slot_number in self.__runes.keys(): self.remove_rune(slot_number) self.attack_power *= triangular(self.level) self.max_hp *= triangular(self.level) self.max_magic_points *= triangular(self.level) self.defense *= triangular(self.level) self.attack_speed += 2 for rune in temp_runes.values(): self.place_rune(rune) self.restore() def level_up_rune(self, slot_number): # type: (int) -> bool if slot_number not in self.__runes.keys(): return False current_rune: Rune = self.__runes[slot_number] self.remove_rune(slot_number) success: bool = current_rune.level_up() self.place_rune(current_rune) return success def remove_rune(self, slot_number): # type: (int) -> bool if slot_number in self.__runes.keys(): # Remove the rune at slot number 'slot_number' current_rune: Rune = self.__runes[slot_number] self.max_hp -= current_rune.stat_increase.max_hp_up self.max_hp /= 1 + (current_rune.stat_increase.max_hp_percentage_up / 100) self.max_magic_points -= current_rune.stat_increase.max_magic_points_up self.max_magic_points /= 1 + (current_rune.stat_increase.max_magic_points_percentage_up / 100) self.attack_power -= current_rune.stat_increase.attack_up self.attack_power /= 1 + (current_rune.stat_increase.attack_percentage_up / 100) self.defense -= current_rune.stat_increase.defense_up self.defense /= 1 + (current_rune.stat_increase.defense_percentage_up / 100) self.attack_speed -= current_rune.stat_increase.attack_speed_up self.crit_rate -= current_rune.stat_increase.crit_rate_up if self.crit_rate <= self.MIN_CRIT_RATE: self.crit_rate = self.MIN_CRIT_RATE self.crit_damage -= current_rune.stat_increase.crit_damage_up if self.crit_damage <= self.MIN_CRIT_DAMAGE: self.crit_damage = self.MIN_CRIT_DAMAGE self.resistance -= current_rune.stat_increase.resistance_up if self.resistance <= self.MIN_RESISTANCE: self.resistance = self.MIN_RESISTANCE self.accuracy -= current_rune.stat_increase.accuracy_up if self.accuracy <= self.MIN_ACCURACY: self.accuracy = self.MIN_ACCURACY # Try to deactivate the set effect of the rune if possible. matching_runes: int = sum(1 for rune in self.__runes.values() if rune.set_name == current_rune.set_name) if matching_runes >= current_rune.set_size and current_rune.set_effect_is_active: self.max_hp /= 1 + (current_rune.stat_increase.max_hp_percentage_up / 100) self.max_magic_points /= 1 + (current_rune.set_effect.max_magic_points_percentage_up / 100) self.attack_power /= 1 + (current_rune.set_effect.attack_percentage_up / 100) self.defense /= 1 + (current_rune.set_effect.defense_percentage_up / 100) self.attack_speed /= 1 + (current_rune.set_effect.attack_speed_percentage_up / 100) self.crit_rate -= current_rune.set_effect.crit_rate_up if self.crit_rate <= self.MIN_CRIT_RATE: self.crit_rate = self.MIN_CRIT_RATE self.crit_damage -= current_rune.set_effect.crit_damage_up if self.crit_damage <= self.MIN_CRIT_DAMAGE: self.crit_damage = self.MIN_CRIT_DAMAGE self.resistance -= current_rune.set_effect.resistance_up if self.resistance <= self.MIN_RESISTANCE: self.resistance = self.MIN_RESISTANCE self.accuracy -= current_rune.set_effect.accuracy_up if self.accuracy <= self.MIN_ACCURACY: self.accuracy = self.MIN_ACCURACY self.extra_turn_chance -= current_rune.set_effect.extra_turn_chance_up if self.extra_turn_chance <= self.MIN_EXTRA_TURN_CHANCE: self.extra_turn_chance = self.MIN_EXTRA_TURN_CHANCE self.counterattack_chance -= current_rune.set_effect.counterattack_chance_up if self.counterattack_chance <= self.MIN_COUNTERATTACK_CHANCE: self.counterattack_chance = self.MIN_COUNTERATTACK_CHANCE self.reflected_damage_percentage -= current_rune.set_effect.reflected_damage_percentage_up self.life_drain_percentage -= current_rune.set_effect.life_drain_percentage_up self.crit_resist -= current_rune.set_effect.crit_resist_up if self.crit_resist <= self.MIN_CRIT_RESIST: self.crit_resist = self.MIN_CRIT_RESIST self.stun_rate -= current_rune.set_effect.stun_rate_up current_rune.set_effect_is_active = False count: int = 0 while count < current_rune.set_size: for other_rune in self.__runes.values(): if other_rune.set_name == current_rune.set_name: other_rune.set_effect_is_active = False count += 1 self.restore() self.__runes.pop(current_rune.slot_number) current_rune.already_placed = False return True return False def have_turn(self, other, active_skill, action_name): # type: (LegendaryCreature, ActiveSkill or None, str) -> bool if self.can_use_passive_skills and not self.passive_skills_activated: self.use_passive_skills() for beneficial_effect in self.get_beneficial_effects(): beneficial_effect.number_of_turns -= 1 if beneficial_effect.number_of_turns <= 0: self.remove_beneficial_effect(beneficial_effect) for harmful_effect in self.get_harmful_effects(): harmful_effect.number_of_turns -= 1 if harmful_effect.number_of_turns <= 0: self.remove_harmful_effect(harmful_effect) if self.can_move: if action_name == "NORMAL ATTACK": self.normal_attack(other) elif action_name == "NORMAL HEAL": self.normal_heal(other) elif action_name == "USE SKILL" and isinstance(active_skill, ActiveSkill): self.use_skill(other, active_skill) else: pass return True return False def counterattack(self, other): # type: (LegendaryCreature) -> bool if self.can_move: first_attacking_active_skill: ActiveSkill or None = None # initial value for skill in self.get_skills(): if isinstance(skill, ActiveSkill): if skill.active_skill_type == "ATTACK": first_attacking_active_skill = skill if first_attacking_active_skill is None: self.normal_attack(other) else: assert isinstance(first_attacking_active_skill, ActiveSkill) if self.curr_magic_points < first_attacking_active_skill.magic_points_cost: self.normal_attack(other) else: self.use_skill(other, first_attacking_active_skill) return True else: return False def normal_attack(self, other): # type: (LegendaryCreature) -> None action: Action = Action("NORMAL ATTACK") action.execute(self, other) def normal_heal(self, other): # type: (LegendaryCreature) -> None action: Action = Action("NORMAL HEAL") action.execute(self, other) def use_skill(self, other, active_skill): # type: (LegendaryCreature, ActiveSkill) -> bool if active_skill not in self.__skills: return False if self.curr_magic_points < active_skill.magic_points_cost: return False action: Action = Action("USE SKILL") action.execute(self, other, active_skill) self.curr_magic_points -= active_skill.magic_points_cost return True def clone(self): # type: () -> LegendaryCreature return copy.deepcopy(self) class FusionLegendaryCreature(LegendaryCreature): """ This class contains attributes of a fusion legendary creature. """ def __init__(self, name, element, rating, legendary_creature_type, max_hp, max_magic_points, attack_power, defense, attack_speed, skills, awaken_bonus, material_legendary_creatures): # type: (str, str, int, str, mpf, mpf, mpf, mpf, mpf, list, AwakenBonus, list) -> None LegendaryCreature.__init__(self, name, element, rating, legendary_creature_type, max_hp, max_magic_points, attack_power, defense, attack_speed, skills, awaken_bonus) self.__material_legendary_creatures: list = material_legendary_creatures def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_material_legendary_creatures(self): # type: () -> list return self.__material_legendary_creatures class Skill: """ This class contains attributes of a skill legendary creatures have. """ def __init__(self, name, description, magic_points_cost): # type: (str, str, mpf) -> None self.name: str = name self.description: str = description self.magic_points_cost: mpf = magic_points_cost self.level: int = 1 self.is_active: bool = True def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None pass def clone(self): # type: () -> Skill return copy.deepcopy(self) class ActiveSkill(Skill): """ This class contains attributes of an active skill which is manually used. """ POSSIBLE_ACTIVE_SKILL_TYPES: list = ["ATTACK", "HEAL", "ALLIES EFFECT", "ENEMIES EFFECT"] def __init__(self, name, description, active_skill_type, is_aoe, magic_points_cost, max_cooltime, damage_multiplier, beneficial_effects_to_allies, harmful_effects_to_enemies, allies_attack_gauge_up, enemies_attack_gauge_down, heal_amount_to_allies, does_ignore_enemies_defense, does_ignore_shield, does_ignore_invincibility): # type: (str, str, str, bool, mpf, int, DamageMultiplier, list, list, mpf, mpf, mpf, bool, bool, bool) -> None Skill.__init__(self, name, description, magic_points_cost) self.active_skill_type: str = active_skill_type if active_skill_type in self.POSSIBLE_ACTIVE_SKILL_TYPES \ else self.POSSIBLE_ACTIVE_SKILL_TYPES[0] self.is_aoe: bool = is_aoe self.cooltime: int = max_cooltime self.max_cooltime: int = max_cooltime self.damage_multiplier: DamageMultiplier = damage_multiplier if self.active_skill_type == "ATTACK" else \ DamageMultiplier() self.__beneficial_effects_to_allies: list = beneficial_effects_to_allies if self.active_skill_type == \ "ATTACK" or \ self.active_skill_type == \ "ALLIES EFFECT" else [] self.__harmful_effects_to_enemies: list = harmful_effects_to_enemies if self.active_skill_type == "ATTACK" or \ self.active_skill_type == \ "ENEMIES EFFECT" else [] self.allies_attack_gauge_up: mpf = allies_attack_gauge_up if self.active_skill_type == \ "ALLIES EFFECT" else mpf("0") self.enemies_attack_gauge_down: mpf = enemies_attack_gauge_down if self.active_skill_type == "ATTACK" or \ self.active_skill_type == "ENEMIES EFFECT" \ else mpf("0") self.heal_amount_to_allies: mpf = heal_amount_to_allies if self.active_skill_type == \ "HEAL" else mpf("0") self.does_ignore_enemies_defense: bool = does_ignore_enemies_defense self.does_ignore_shield: bool = does_ignore_shield self.does_ignore_invincibility: bool = does_ignore_invincibility def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_beneficial_effects_to_allies(self): # type: () -> list return self.__beneficial_effects_to_allies def get_harmful_effects_to_enemies(self): # type: () -> list return self.__harmful_effects_to_enemies def level_up(self): # type: () -> None self.level += 1 self.damage_multiplier.multiplier_to_self_max_hp *= mpf("1.25") self.damage_multiplier.multiplier_to_enemy_max_hp *= mpf("1.25") self.damage_multiplier.multiplier_to_self_attack_power *= mpf("1.25") self.damage_multiplier.multiplier_to_enemy_attack_power *= mpf("1.25") self.damage_multiplier.multiplier_to_self_defense *= mpf("1.25") self.damage_multiplier.multiplier_to_enemy_defense *= mpf("1.25") self.damage_multiplier.multiplier_to_self_max_magic_points *= mpf("1.25") self.damage_multiplier.multiplier_to_enemy_max_magic_points *= mpf("1.25") self.damage_multiplier.multiplier_to_self_attack_speed *= mpf("1.25") self.damage_multiplier.multiplier_to_enemy_attack_speed *= mpf("1.25") self.damage_multiplier.multiplier_to_self_current_hp_percentage *= mpf("1.25") self.damage_multiplier.multiplier_to_self_hp_percentage_loss *= mpf("1.25") self.damage_multiplier.multiplier_to_enemy_current_hp_percentage *= mpf("1.25") class PassiveSkill(Skill): """ This class contains attributes of a passive skill which is automatically used. """ def __init__(self, name, description, passive_skill_effect): # type: (str, str, PassiveSkillEffect) -> None Skill.__init__(self, name, description, mpf("0")) self.passive_skill_effect: PassiveSkillEffect = passive_skill_effect def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class PassiveSkillEffect: """ This class contains attributes of the effect of a passive skill. """ def __init__(self, max_hp_percentage_up=mpf("0"), max_magic_points_percentage_up=mpf("0"), attack_power_percentage_up=mpf("0"), defense_percentage_up=mpf("0"), attack_speed_percentage_up=mpf("0"), crit_rate_up=mpf("0"), crit_damage_up=mpf("0"), resistance_up=mpf("0"), accuracy_up=mpf("0"), extra_turn_chance_up=mpf("0"), beneficial_effects_to_allies=mpf("0"), harmful_effects_to_enemies=mpf("0"), allies_attack_gauge_up=mpf("0"), enemies_attack_gauge_down=mpf("0"), heal_amount_to_allies=mpf("0")): # type: (mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, list, list, mpf, mpf, mpf) -> None self.max_hp_percentage_up: mpf = max_hp_percentage_up self.max_magic_points_percentage_up: mpf = max_magic_points_percentage_up self.attack_power_percentage_up: mpf = attack_power_percentage_up self.defense_percentage_up: mpf = defense_percentage_up self.attack_speed_percentage_up: mpf = attack_speed_percentage_up self.crit_rate_up: mpf = crit_rate_up self.crit_damage_up: mpf = crit_damage_up self.resistance_up: mpf = resistance_up self.accuracy_up: mpf = accuracy_up self.extra_turn_chance_up: mpf = extra_turn_chance_up self.__beneficial_effects_to_allies: list = beneficial_effects_to_allies self.__harmful_effects_to_enemies: list = harmful_effects_to_enemies self.allies_attack_gauge_up: mpf = allies_attack_gauge_up self.enemies_attack_gauge_down: mpf = enemies_attack_gauge_down self.heal_amount_to_allies: mpf = heal_amount_to_allies def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_beneficial_effects_to_allies(self): # type: () -> list return self.__beneficial_effects_to_allies def get_harmful_effects_to_enemies(self): # type: () -> list return self.__harmful_effects_to_enemies def clone(self): # type: () -> PassiveSkillEffect return copy.deepcopy(self) class LeaderSkill(Skill): """ This class contains attributes of a leader skill. """ def __init__(self, name, description, magic_points_cost, leader_skill_effect): # type: (str, str, mpf, LeaderSkillEffect) -> None Skill.__init__(self, name, description, magic_points_cost) self.leader_skill_effect: LeaderSkillEffect = leader_skill_effect def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class LeaderSkillEffect: """ This class contains attributes of the effect of a leader skill. """ def __init__(self, max_hp_percentage_up=mpf("0"), max_magic_points_percentage_up=mpf("0"), attack_power_percentage_up=mpf("0"), defense_percentage_up=mpf("0"), attack_speed_percentage_up=mpf("0"), crit_rate_up=mpf("0"), crit_damage_up=mpf("0"), resistance_up=mpf("0"), accuracy_up=mpf("0")): # type: (mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf) -> None self.max_hp_percentage_up: mpf = max_hp_percentage_up self.max_magic_points_percentage_up: mpf = max_magic_points_percentage_up self.attack_power_percentage_up: mpf = attack_power_percentage_up self.defense_percentage_up: mpf = defense_percentage_up self.attack_speed_percentage_up: mpf = attack_speed_percentage_up self.crit_rate_up: mpf = crit_rate_up self.crit_damage_up: mpf = crit_damage_up self.resistance_up: mpf = resistance_up self.accuracy_up: mpf = accuracy_up def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> LeaderSkillEffect return copy.deepcopy(self) class DamageMultiplier: """ This class contains attributes of the damage multiplier of a skill. """ def __init__(self, multiplier_to_self_max_hp=mpf("0"), multiplier_to_enemy_max_hp=mpf("0"), multiplier_to_self_attack_power=mpf("0"), multiplier_to_enemy_attack_power=mpf("0"), multiplier_to_self_defense=mpf("0"), multiplier_to_enemy_defense=mpf("0"), multiplier_to_self_max_magic_points=mpf("0"), multiplier_to_enemy_max_magic_points=mpf("0"), multiplier_to_self_attack_speed=mpf("0"), multiplier_to_enemy_attack_speed=mpf("0"), multiplier_to_self_current_hp_percentage=mpf("0"), multiplier_to_self_hp_percentage_loss=mpf("0"), multiplier_to_enemy_current_hp_percentage=mpf("0")): # type: (mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf, mpf) -> None self.multiplier_to_self_max_hp: mpf = multiplier_to_self_max_hp self.multiplier_to_enemy_max_hp: mpf = multiplier_to_enemy_max_hp self.multiplier_to_self_attack_power: mpf = multiplier_to_self_attack_power self.multiplier_to_enemy_attack_power: mpf = multiplier_to_enemy_attack_power self.multiplier_to_self_defense: mpf = multiplier_to_self_defense self.multiplier_to_enemy_defense: mpf = multiplier_to_enemy_defense self.multiplier_to_self_max_magic_points: mpf = multiplier_to_self_max_magic_points self.multiplier_to_enemy_max_magic_points: mpf = multiplier_to_enemy_max_magic_points self.multiplier_to_self_attack_speed: mpf = multiplier_to_self_attack_speed self.multiplier_to_enemy_attack_speed: mpf = multiplier_to_enemy_attack_speed self.multiplier_to_self_current_hp_percentage: mpf = multiplier_to_self_current_hp_percentage self.multiplier_to_self_hp_percentage_loss: mpf = multiplier_to_self_hp_percentage_loss self.multiplier_to_enemy_current_hp_percentage: mpf = multiplier_to_enemy_current_hp_percentage def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def calculate_raw_damage_without_enemy_defense_invincibility_shield(self, user, target): # type: (LegendaryCreature, LegendaryCreature) -> mpf self_current_hp_percentage: mpf = (user.curr_hp / user.max_hp) * 100 self_hp_percentage_loss: mpf = 100 - self_current_hp_percentage target_current_hp_percentage: mpf = (target.curr_hp / target.max_hp) * 100 return (user.max_hp * (1 + user.max_hp_percentage_up / 100) * self.multiplier_to_self_max_hp + target.max_hp * self.multiplier_to_enemy_max_hp * (1 + target.max_hp_percentage_up / 100) + user.attack_power * (1 + user.attack_power_percentage_up / 100 - user.attack_power_percentage_down / 100) * (self.multiplier_to_self_attack_speed * user.attack_speed * (1 + user.attack_speed_percentage_up / 100 - user.attack_speed_percentage_down / 100)) * self.multiplier_to_self_attack_power + target.attack_power * ( 1 + target.attack_power_percentage_up / 100 - target.attack_power_percentage_down / 100) + target.attack_power * (1 + target.attack_power_percentage_up / 100 - target.attack_power_percentage_down / 100) * (self.multiplier_to_enemy_attack_speed * target.attack_speed * (1 + target.attack_speed_percentage_up / 100 - target.attack_speed_percentage_down / 100)) * self.multiplier_to_enemy_attack_power + user.defense * (1 + user.defense_percentage_up / 100 - user.defense_percentage_down / 100) * self.multiplier_to_self_defense + target.defense * (1 + target.defense_percentage_up / 100 - target.defense_percentage_down / 100) * self.multiplier_to_enemy_defense + user.max_magic_points * (1 + user.max_magic_points_percentage_up / 100) * self.multiplier_to_self_max_magic_points + target.max_magic_points * (1 + target.max_magic_points_percentage_up / 100) * self.multiplier_to_enemy_max_magic_points) * (1 + self_current_hp_percentage * self.multiplier_to_self_current_hp_percentage) * ( 1 + self_hp_percentage_loss * self.multiplier_to_self_hp_percentage_loss) * (1 + target_current_hp_percentage * self.multiplier_to_enemy_current_hp_percentage) * ( 1 + target.damage_received_percentage_up / 100) def calculate_raw_damage(self, user, target, does_ignore_defense=False, does_ignore_shield=False, does_ignore_invincibility=False): # type: (LegendaryCreature, LegendaryCreature, bool, bool, bool) -> mpf damage_reduction_factor: mpf = mpf("1") if does_ignore_defense else mpf("1e8") / (mpf("1e8") + 3.5 * target.defense) raw_damage: mpf = self.calculate_raw_damage_without_enemy_defense_invincibility_shield(user, target) if not does_ignore_shield and target.shield_percentage > 0: raw_damage *= (1 - target.shield_percentage / 100) if not (does_ignore_invincibility or target.can_receive_damage): return mpf("0") # Checking for glancing hits glancing_chance: mpf = user.glancing_hit_chance + glancing_hit_chance_by_elements(user.element, target.element) is_glancing: bool = random.random() < glancing_chance if is_glancing: return raw_damage * damage_reduction_factor * mpf("0.7") # Checking for crushing hits crushing_chance: mpf = crushing_hit_chance_by_elements(user, target) is_crushing: bool = random.random() < crushing_chance if is_crushing: return raw_damage * damage_reduction_factor * mpf("1.3") # Checking for critical hits crit_chance: mpf = user.crit_rate + user.crit_rate_up - target.crit_resist - target.crit_resist_up if crit_chance < LegendaryCreature.MIN_CRIT_RATE: crit_chance = LegendaryCreature.MIN_CRIT_RATE is_crit: bool = random.random() < crit_chance return raw_damage * damage_reduction_factor if not is_crit else raw_damage * (user.crit_damage + user.crit_damage_up) * \ damage_reduction_factor def clone(self): # type: () -> DamageMultiplier return copy.deepcopy(self) class BeneficialEffect: """ This class contains attributes of a beneficial effect a legendary creature has. """ POSSIBLE_NAMES: list = ["INCREASE_ATK", "INCREASE_DEF", "INCREASE_SPD", "INCREASE_CRIT_RATE", "IMMUNITY", "INVINCIBILITY", "HEAL_OVER_TIME", "COUNTER", "REFLECT", "VAMPIRE", "INCREASE_CRIT_RESIST", "SHIELD", "ENDURE"] def __init__(self, name, number_of_turns): # type: (str, int) -> None self.name: str = name if name in self.POSSIBLE_NAMES else self.POSSIBLE_NAMES[0] self.number_of_turns: int = number_of_turns self.attack_power_percentage_up: mpf = mpf("50") if self.name == "INCREASE_ATK" else mpf("0") self.attack_speed_percentage_up: mpf = mpf("33") if self.name == "INCREASE_SPD" else mpf("0") self.defense_percentage_up: mpf = mpf("50") if self.name == "INCREASE_DEF" else mpf("0") self.crit_rate_up: mpf = mpf("0.3") if self.name == "INCREASE_CRIT_RATE" else mpf("0") self.prevents_damage: bool = self.name == "INVINCIBILITY" self.blocks_debuffs: bool = self.name == "IMMUNITY" self.prevents_death: bool = self.name == "ENDURE" self.heal_percentage_per_turn: mpf = mpf("15") if self.name == "HEAL_OVER_TIME" else mpf("0") self.counterattack_chance_up: mpf = mpf("1") if self.name == "COUNTER" else mpf("0") self.reflected_damage_percentage_up: mpf = mpf("33") if self.name == "REFLECT" else mpf("0") self.life_drain_percentage_up: mpf = mpf("33") if self.name == "VAMPIRE" else mpf("0") self.crit_resist_up: mpf = mpf("0.5") if self.name == "INCREASE_CRIT_RESIST" else mpf("0") self.shield_percentage_up: mpf = mpf("15") if self.name == "SHIELD" else mpf("0") self.can_be_stacked: bool = self.name == "HEAL_OVER_TIME" def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> BeneficialEffect return copy.deepcopy(self) class HarmfulEffect: """ This class contains attributes of a harmful effect a legendary creature has. """ POSSIBLE_NAMES: list = ["DECREASE_ATK", "DECREASE_DEF", "GLANCING", "DECREASE_SPD", "BLOCK_BENEFICIAL_EFFECTS", "BRAND", "UNRECOVERABLE", "OBLIVION", "SILENCE", "DAMAGE_OVER_TIME", "STUN"] def __init__(self, name, number_of_turns): # type: (str, int) -> None self.name: str = name if name in self.POSSIBLE_NAMES else self.POSSIBLE_NAMES[0] self.number_of_turns: int = number_of_turns self.attack_power_percentage_down: mpf = mpf("50") if self.name == "DECREASE_ATK" else mpf("0") self.attack_speed_percentage_down: mpf = mpf("33") if self.name == "DECREASE_SPD" else mpf("0") self.defense_percentage_down: mpf = mpf("50") if self.name == "DECREASE_DEF" else mpf("0") self.glancing_hit_chance_up: mpf = mpf("0.5") if self.name == "GLANCING" else mpf("0") self.blocks_beneficial_effects: bool = self.name == "BLOCK_BENEFICIAL_EFFECTS" self.damage_received_percentage_up: mpf = mpf("25") if self.name == "BRAND" else mpf("0") self.blocks_heal: bool = self.name == "UNRECOVERABLE" self.blocks_passive_skills: bool = self.name == "OBLIVION" self.blocks_skills_with_cooltime: bool = self.name == "SILENCE" self.damage_percentage_per_turn: mpf = mpf("5") if self.name == "DAMAGE_OVER_TIME" else mpf("0") self.prevents_moves: bool = self.name == "STUN" self.can_be_stacked: bool = self.name == "DAMAGE_OVER_TIME" def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def clone(self): # type: () -> HarmfulEffect return copy.deepcopy(self) class PlayerBase: """ This class contains attributes of the player's base. """ def __init__(self): # type: () -> None self.__islands: list = [Island()] # initial value self.island_build_gold_cost: mpf = mpf("1e8") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def add_island(self): # type: () -> None self.island_build_gold_cost *= mpf("10") ** (triangular(len(self.__islands))) self.__islands.append(Island()) def get_islands(self): # type: () -> list return self.__islands def clone(self): # type: () -> PlayerBase return copy.deepcopy(self) class Island: """ This class contains attributes of an island in a player's base. """ ISLAND_WIDTH: int = 10 ISLAND_HEIGHT: int = 10 def __init__(self): # type: () -> None self.__tiles: list = [] # initial value for i in range(self.ISLAND_WIDTH): new = [] # initial value for k in range(self.ISLAND_HEIGHT): # Ensuring that obstacles are not placed at the edges of the island place_obstacle: bool = random.random() <= 0.3 if place_obstacle and not self.is_edge(i, k): new.append(IslandTile(Obstacle())) else: new.append(IslandTile()) self.__tiles.append(new) def is_edge(self, x, y): # type: (int, int) -> bool return (x == 0 and y == 0) or (x == 0 and y == self.ISLAND_HEIGHT - 1) or \ (x == self.ISLAND_WIDTH - 1 and y == 0) or (x == self.ISLAND_WIDTH - 1 and y == self.ISLAND_HEIGHT - 1) def get_tiles(self): # type: () -> list return self.__tiles def get_tile_at(self, x, y): # type: (int, int) -> IslandTile or None if x < 0 or x >= self.ISLAND_WIDTH or y < 0 or y >= self.ISLAND_HEIGHT: return None return self.__tiles[y][x] def __str__(self): # type: () -> str return str(tabulate(self.__tiles, headers='firstrow', tablefmt='fancy_grid')) def clone(self): # type: () -> Island return copy.deepcopy(self) class IslandTile: """ This class contains attributes of a tile on an island. """ def __init__(self, building=None): # type: (Building or None) -> None self.building: Building or None = building def __str__(self): # type: () -> str if isinstance(self.building, Building): return "IslandTile(" + str(self.building.name) + ")" return "IslandTile(GRASS)" def add_building(self, building): # type: (Building) -> bool if self.building is None: self.building = building return True return False def remove_building(self): # type: () -> None self.building = None def clone(self): # type: () -> IslandTile return copy.deepcopy(self) class Building: """ This class contains attributes of a building to be built on an island tile. """ def __init__(self, name, description, gold_cost, gem_cost): # type: (str, str, mpf, mpf) -> None self.name: str = name self.description: str = description self.gold_cost: mpf = gold_cost self.gem_cost: mpf = gem_cost self.sell_gold_gain: mpf = gold_cost / 5 self.sell_gem_gain: mpf = gem_cost / 5 self.upgrade_gold_cost: mpf = gold_cost self.upgrade_gem_cost: mpf = gem_cost self.level: int = 1 def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None pass def clone(self): # type: () -> Building return copy.deepcopy(self) class TrainingArea(Building): """ This class contains attributes of a training area to automatically increase the EXP of legendary creatures. """ MAX_LEGENDARY_CREATURES: int = 5 def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "TRAINING AREA", "A training area to increase the EXP of legendary creatures.", gold_cost, gem_cost) self.legendary_creature_exp_per_second: mpf = self.gold_cost / mpf("1e5") self.__legendary_creatures_placed: list = [] # initial value def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.legendary_creature_exp_per_second *= mpf("10") ** self.level self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level def get_legendary_creatures_placed(self): # type: () -> list return self.__legendary_creatures_placed def add_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if len(self.__legendary_creatures_placed) < self.MAX_LEGENDARY_CREATURES: self.__legendary_creatures_placed.append(legendary_creature) return True return False def remove_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.__legendary_creatures_placed: self.__legendary_creatures_placed.remove(legendary_creature) return True return False class Tree(Building): """ This class contains attributes of a tree used to decorate an island. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "TREE", "A tree.", gold_cost, gem_cost) def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class Guardstone(Building): """ This class contains attributes of a building used to increase the defense of all legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "GUARDSTONE", "A building used to increase the defense of all legendary creatures.", gold_cost, gem_cost) self.legendary_creature_defense_percentage_up: mpf = mpf("3") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.legendary_creature_defense_percentage_up += mpf("3") self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class LegendaryCreatureSanctuary(Building): """ This class contains attributes of a building used to increase the attack power of all legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "LEGENDARY CREATURE SANCTUARY", "A building used to increase the attack power of all " "legendary creatures.", gold_cost, gem_cost) self.legendary_creature_attack_power_percentage_up: mpf = mpf("3") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.legendary_creature_attack_power_percentage_up += mpf("3") self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class SurvivalAltar(Building): """ This class contains attributes of a building used to increase the maximum HP of all legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "SURVIVAL ALTAR", "A building used to increase the maximum HP of all legendary " "creatures.", gold_cost, gem_cost) self.legendary_creature_max_hp_percentage_up: mpf = mpf("3") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.legendary_creature_max_hp_percentage_up += mpf("3") self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class MagicAltar(Building): """ This class contains attributes of a building used to increase the maximum magic points of all legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "MAGIC ALTAR", "A building used to increase the maximum magic points of all " "legendary creatures.", gold_cost, gem_cost) self.legendary_creature_max_magic_points_percentage_up: mpf = mpf("3") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.legendary_creature_max_magic_points_percentage_up += mpf("3") self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class BoosterTower(Building): """ This class contains attributes of a building used to increase the attack speed of all legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "BOOSTER TOWER", "A building used to increase the attack speed of all legendary " "creatures.", gold_cost, gem_cost) self.legendary_creature_attack_speed_percentage_up: mpf = mpf("3") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.legendary_creature_attack_speed_percentage_up += mpf("3") self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class PlayerEXPTower(Building): """ This class contains attributes of a tower producing EXP for the player. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "PLAYER EXP TOWER", "A tower producing EXP for the player.", gold_cost, gem_cost) self.exp_per_second: mpf = self.gold_cost / mpf("1e5") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.exp_per_second *= mpf("10") ** self.level self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class GoldMine(Building): """ This class contains attributes of a gold mine producing gold. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "GOLD MINE", "A mine producing gold.", gold_cost, gem_cost) self.gold_per_second: mpf = self.gold_cost / mpf("1e5") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.gold_per_second *= mpf("10") ** self.level self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class GemMine(Building): """ This class contains attributes of a gem mine producing gems. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "GEM MINE", "A mine producing gems.", gold_cost, gem_cost) self.gem_per_second: mpf = self.gold_cost / mpf("1e7") def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def level_up(self): # type: () -> None self.level += 1 self.gem_per_second *= mpf("10") ** self.level self.upgrade_gold_cost *= mpf("10") ** self.level self.upgrade_gem_cost *= mpf("10") ** self.level class PowerUpCircle(Building): """ This class contains attributes of a power-up circle used to power up and evolve legendary creatures. """ MAX_MATERIAL_LEGENDARY_CREATURES: int = 5 def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "POWER UP CIRCLE", "A building used to power up and evolve legendary creatures.", gold_cost, gem_cost) self.legendary_creature_to_power_up: LegendaryCreature or None = None self.__material_legendary_creatures: list = [] # initial value def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def execute_power_up(self): # type: () -> LegendaryCreature or None if isinstance(self.legendary_creature_to_power_up, LegendaryCreature): curr_legendary_creature: LegendaryCreature = self.legendary_creature_to_power_up for legendary_creature in self.__material_legendary_creatures: curr_legendary_creature.exp += legendary_creature.rating * legendary_creature.exp curr_legendary_creature.level_up() self.deselect_legendary_creature_to_power_up() self.set_material_legendary_creatures([]) return curr_legendary_creature return None def execute_evolution(self): # type: () -> LegendaryCreature or None if isinstance(self.legendary_creature_to_power_up, LegendaryCreature): curr_legendary_creature: LegendaryCreature = self.legendary_creature_to_power_up # Evolve the legendary creature if there are sufficient material legendary creatures of the # same or higher rating as the legendary creature to be evolved num_materials: int = sum(1 for legendary_creature in self.__material_legendary_creatures if legendary_creature.rating >= curr_legendary_creature.rating) if len(self.__material_legendary_creatures) == curr_legendary_creature.rating - 1 and \ num_materials == curr_legendary_creature.rating - 1: curr_legendary_creature.evolve() self.deselect_legendary_creature_to_power_up() self.set_material_legendary_creatures([]) return curr_legendary_creature return None def get_material_legendary_creatures(self): # type: () -> list return self.__material_legendary_creatures def set_material_legendary_creatures(self, material_legendary_creatures): # type: (list) -> None self.__material_legendary_creatures = material_legendary_creatures def select_legendary_creature_to_power_up(self, legendary_creature): # type: (LegendaryCreature) -> bool if self.legendary_creature_to_power_up is None: self.legendary_creature_to_power_up = legendary_creature return True return False def deselect_legendary_creature_to_power_up(self): # type: () -> bool if isinstance(self.legendary_creature_to_power_up, LegendaryCreature): self.legendary_creature_to_power_up = None return True return False def add_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if len(self.__material_legendary_creatures) < self.MAX_MATERIAL_LEGENDARY_CREATURES: self.__material_legendary_creatures.append(legendary_creature) return True return False def remove_legendary_creature(self, legendary_creature): # type: (LegendaryCreature) -> bool if legendary_creature in self.__material_legendary_creatures: self.__material_legendary_creatures.remove(legendary_creature) return True return False class Summonhenge(Building): """ This class contains attributes of a building used to summon legendary creatures. """ def __init__(self, gold_cost, gem_cost): # type: (mpf, mpf) -> None Building.__init__(self, "SUMMONHENGE", "A building used to summon legendary creatures.", gold_cost, gem_cost) def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class FusionCenter(Building): """ This class contains attributes of a fusion center used to fuse legendary creatures. """ def __init__(self, gold_cost, gem_cost, fusion_legendary_creatures): # type: (mpf, mpf, list) -> None Building.__init__(self, "FUSION CENTER", "A building used to fuse legendary creatures into a stronger one.", gold_cost, gem_cost) self.__fusion_legendary_creatures: list = fusion_legendary_creatures def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_fusion_legendary_creatures(self): # type: () -> list return self.__fusion_legendary_creatures class Obstacle(Building): """ This class contains attributes of an obstacle which the player can remove from the island. """ def __init__(self): # type: () -> None Building.__init__(self, "OBSTACLE", "A removable obstacle.", mpf("0"), mpf("0")) self.remove_gold_gain: mpf = mpf("10") ** random.randint(5, 10) self.remove_gem_gain: mpf = mpf("10") ** random.randint(2, 6) def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) class TempleOfWishes(Building): """ This class contains attributes of a temple of wishes where the player can make wishes to get random rewards. """ def __init__(self, gold_cost, gem_cost, obtainable_objects): # type: (mpf, mpf, list) -> None Building.__init__(self, "TEMPLE OF WISHES", "A building where the player can make wishes to get random rewards", gold_cost, gem_cost) self.__obtainable_objects: list = obtainable_objects self.wishes_left: int = 3 # The number of wishes a player can make in a day. self.already_reset: bool = False def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def reset_wishes_left(self): # type: () -> bool time_now: datetime = datetime.now() if not self.already_reset and time_now.hour > 0: self.already_reset = True self.wishes_left = 3 return True return False def restore(self): # type: () -> None self.already_reset = False def get_obtainable_objects(self): # type: () -> list return self.__obtainable_objects class ItemShop: """ This class contains attributes of a shop selling items. """ def __init__(self, items_sold): # type: (list) -> None self.name: str = "ITEM SHOP" self.__items_sold: list = items_sold def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_items_sold(self): # type: () -> list return self.__items_sold def clone(self): # type: () -> ItemShop return copy.deepcopy(self) class BuildingShop: """ This class contains attributes of a shop selling buildings. """ def __init__(self, buildings_sold): # type: (list) -> None self.name: str = "BUILDING SHOP" self.__buildings_sold: list = buildings_sold def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_buildings_sold(self): # type: () -> list return self.__buildings_sold def clone(self): # type: () -> BuildingShop return copy.deepcopy(self) class Reward: """ This class contains attributes of the reward gained for doing something in this game. """ def __init__(self, player_reward_exp=mpf("0"), player_reward_gold=mpf("0"), player_reward_gems=mpf("0"), legendary_creature_reward_exp=mpf("0"), player_reward_items=None): # type: (mpf, mpf, mpf, mpf, list) -> None if player_reward_items is None: player_reward_items = [] self.player_reward_exp: mpf = player_reward_exp self.player_reward_gold: mpf = player_reward_gold self.player_reward_gems: mpf = player_reward_gems self.legendary_creature_reward_exp: mpf = legendary_creature_reward_exp self.__player_reward_items: list = player_reward_items def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_player_reward_items(self): # type: () -> list return self.__player_reward_items def clone(self): # type: () -> Reward return copy.deepcopy(self) class Game: """ This class contains attributes of the saved game data. """ def __init__(self, player_data, potential_legendary_creatures, fusion_legendary_creatures, item_shop, building_shop, battle_arena, battle_areas): # type: (Player, list, list, ItemShop, BuildingShop, Arena, list) -> None self.player_data: Player = player_data self.__potential_legendary_creatures: list = potential_legendary_creatures self.__fusion_legendary_creatures: list = fusion_legendary_creatures self.item_shop: ItemShop = item_shop self.building_shop: BuildingShop = building_shop self.battle_arena: Arena = battle_arena self.__battle_areas: list = battle_areas def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def get_potential_legendary_creatures(self): # type: () -> list return self.__potential_legendary_creatures def get_fusion_legendary_creatures(self): # type: () -> list return self.__fusion_legendary_creatures def get_battle_areas(self): # type: () -> list return self.__battle_areas def clone(self): # type: () -> Game return copy.deepcopy(self) # Creating main function used to run the game. def main(): # type: () -> int """ This main function is used to run the game. :return: None """ print("Welcome to 'Ancient Invasion' by 'NativeApkDev'.") print("This game is a turn-based strategy RPG where the player brings legendary creatures to battles where ") print("legendary creatures take turns in making moves.") # Initialising a list of skills that all legendary creatures have. skills_list: list = [ ActiveSkill("SINGLE-TARGET ATTACK SKILL #1", "Normal Single-Target Attack Skill", "ATTACK", False, mpf("1e3"), 2, DamageMultiplier(multiplier_to_self_attack_power=mpf("3.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False), ActiveSkill("SINGLE-TARGET ATTACK SKILL #2", "Strong Single-Target Attack Skill", "ATTACK", False, mpf("1e10"), 4, DamageMultiplier(multiplier_to_self_attack_power=mpf("10.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False), ActiveSkill("SINGLE-TARGET ATTACK SKILL #3", "Ultimate Single-Target Attack Skill", "ATTACK", False, mpf("1e30"), 8, DamageMultiplier(multiplier_to_self_attack_power=mpf("31.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False), ActiveSkill("MULTI-TARGET ATTACK SKILL #1", "Normal Multi-Target Attack Skill", "ATTACK", True, mpf("1e3"), 2, DamageMultiplier(multiplier_to_self_attack_power=mpf("0.7")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False), ActiveSkill("MULTI-TARGET ATTACK SKILL #2", "Strong Multi-Target Attack Skill", "ATTACK", True, mpf("1e10"), 4, DamageMultiplier(multiplier_to_self_attack_power=mpf("2.1")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False), ActiveSkill("MULTI-TARGET ATTACK SKILL #3", "Ultimate Multi-Target Attack Skill", "ATTACK", True, mpf("1e30"), 8, DamageMultiplier(multiplier_to_self_attack_power=mpf("6.3")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False), ActiveSkill("HEAL SKILL #1", "First Heal Skill", "HEAL", True, mpf("1e3"), 2, DamageMultiplier(), [], [], mpf("0"), mpf("0"), mpf("2e4"), False, False, False), ActiveSkill("HEAL SKILL #2", "Better Heal Skill", "HEAL", True, mpf("1e10"), 4, DamageMultiplier(), [], [], mpf("0"), mpf("0"), mpf("2e12"), False, False, False), ActiveSkill("HEAL SKILL #3", "Ultimate Heal Skill", "HEAL", True, mpf("1e30"), 8, DamageMultiplier(), [], [], mpf("0"), mpf("0"), mpf("2e36"), False, False, False), PassiveSkill("EXTRA TURN PASSIVE SKILL", "Increase player's extra turn change by 15%.", PassiveSkillEffect(extra_turn_chance_up=mpf("0.15"))), LeaderSkill("ATTACK LEADER SKILL", "Increase all allies' attack power by 20%.", mpf("0"), LeaderSkillEffect(attack_power_percentage_up=mpf("20"))) ] # Initialising potential legendary creatures in this game. potential_legendary_creatures: list = [ LegendaryCreature("Hellchnoth", "FIRE", 1, "NORMAL", mpf("4.95e4"), mpf("4.78e4"), mpf("9.33e3"), mpf("8.74e3"), mpf("109"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0.15"), mpf("0"), mpf("0"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf("94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False))), LegendaryCreature("Chichoo", "WATER", 1, "NORMAL", mpf("5.14e4"), mpf("5.07e4"), mpf("8.12e3"), mpf("8.87e3"), mpf("107"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0"), mpf("0"), mpf("0.15"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf("94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False))), LegendaryCreature("Hylso", "WIND", 1, "NORMAL", mpf("4.78e4"), mpf("4.53e4"), mpf("9.47e3"), mpf("9.01e3"), mpf("108"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0"), mpf("0.5"), mpf("0"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf("94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False))), LegendaryCreature("Banngod", "LIGHT", 1, "NORMAL", mpf("4.57e4"), mpf("5.13e4"), mpf("9.6e3"), mpf("8.47e3"), mpf("111"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0"), mpf("0.5"), mpf("0"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf("94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False))), LegendaryCreature("Manrud", "DARK", 1, "NORMAL", mpf("5.24e4"), mpf("5.17e4"), mpf("8.08e3"), mpf("8.27e3"), mpf("110"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0"), mpf("0"), mpf("0.15"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf("94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False))), LegendaryCreature("Avaffaip", "NEUTRAL", 1, "NORMAL", mpf("5.19e4"), mpf("5.07e4"), mpf("8.57e3"), mpf("8.66e3"), mpf("112"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0"), mpf("0"), mpf("0.15"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf("94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False))) ] # Initialising legendary creatures which can be obtained from fusions. fusion_legendary_creatures: list = [ FusionLegendaryCreature("Meppee", "LIGHT", 1, "NORMAL", mpf("2.5e5"), mpf("2.47e5"), mpf("4.43e4"), mpf("4.35e4"), mpf("109"), skills_list, AwakenBonus(mpf("125"), mpf("125"), mpf("125"), mpf("125"), mpf("0"), mpf("0"), mpf("0.5"), mpf("0"), mpf("0"), ActiveSkill("SINGLE-TARGET ATTACK SKILL #4", "Extreme Single-Target Attack Skill", "ATTACK", False, mpf("1e90"), 8, DamageMultiplier( multiplier_to_self_attack_power=mpf( "94.5")), [], [], mpf("0"), mpf("0"), mpf("0"), False, False, False)), [potential_legendary_creatures[x] for x in range(1, len(potential_legendary_creatures))]) ] # Initialising the item shop item_shop: ItemShop = ItemShop([ Rune("1-STAR ENERGY RUNE - SLOT 1", "An Energy rune of rating 1 at slot 1", mpf("1e6"), mpf("0"), 1, 1, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "HP%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "MP"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "MP%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "ATK%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "DEF%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "SPD"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "CR"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "CD"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "RES"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "ACC"), Rune("1-STAR ENERGY RUNE - SLOT 3", "An Energy rune of rating 1 at slot 3", mpf("1e6"), mpf("0"), 1, 3, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "HP%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "MP"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "MP%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "ATK%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "DEF%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "SPD"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "CR"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "CD"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "RES"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "ACC"), Rune("1-STAR ENERGY RUNE - SLOT 5", "An Energy rune of rating 1 at slot 5", mpf("1e6"), mpf("0"), 1, 5, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "HP%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "MP"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "MP%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "ATK%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "DEF%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "SPD"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "CR"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "CD"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "RES"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "ACC"), AwakenShard(mpf("1e6"), mpf("10"), "Hellchnoth"), AwakenShard(mpf("1e6"), mpf("10"), "Chichoo"), AwakenShard(mpf("1e6"), mpf("10"), "Hylso"), AwakenShard(mpf("1e6"), mpf("10"), "Banngod"), AwakenShard(mpf("1e6"), mpf("10"), "Manrud"), AwakenShard(mpf("1e6"), mpf("10"), "Avaffaip"), AwakenShard(mpf("1e6"), mpf("10"), "Meppee"), EXPShard(mpf("1e6"), mpf("10"), mpf("1e5")), LevelUpShard(mpf("1e6"), mpf("10")), SkillLevelUpShard(mpf("1e6"), mpf("10")), Scroll("UNKNOWN", "A scroll to summon 1-STAR to 3-STAR legendary creatures.", mpf("1e6"), mpf("10"), potential_legendary_creatures) ]) # Initialising the building shop building_shop: BuildingShop = BuildingShop([ TrainingArea(mpf("1e8"), mpf("1000")), Tree(mpf("1e4"), mpf("0")), Guardstone(mpf("1e7"), mpf("100")), LegendaryCreatureSanctuary(mpf("1e7"), mpf("100")), SurvivalAltar(mpf("1e7"), mpf("100")), MagicAltar(mpf("1e7"), mpf("100")), BoosterTower(mpf("1e7"), mpf("100")), PlayerEXPTower(mpf("1e7"), mpf("100")), GoldMine(mpf("1e6"), mpf("10")), GemMine(mpf("1e6"), mpf("10")), PowerUpCircle(mpf("1e5"), mpf("1")), Summonhenge(mpf("1e5"), mpf("1")), FusionCenter(mpf("1e8"), mpf("1000"), fusion_legendary_creatures), TempleOfWishes(mpf("1e5"), mpf("1"), [ Reward(player_reward_exp=mpf("1e6")), Reward(player_reward_exp=mpf("5e6")), Reward(player_reward_gold=mpf("1e5")), Reward(player_reward_gold=mpf("5e5")), Reward(player_reward_gems=mpf("10")), Reward(player_reward_gems=mpf("50")), Reward(legendary_creature_reward_exp=mpf("1e6")), Reward(legendary_creature_reward_exp=mpf("5e6")), Rune("1-STAR ENERGY RUNE - SLOT 1", "An Energy rune of rating 1 at slot 1", mpf("1e6"), mpf("0"), 1, 1, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "HP%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "MP"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "MP%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "ATK%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "DEF%"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "SPD"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "CR"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "CD"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "RES"), Rune("1-STAR ENERGY RUNE - SLOT 2", "An Energy rune of rating 1 at slot 2", mpf("1e6"), mpf("0"), 1, 2, "ENERGY", "ACC"), Rune("1-STAR ENERGY RUNE - SLOT 3", "An Energy rune of rating 1 at slot 3", mpf("1e6"), mpf("0"), 1, 3, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "HP%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "MP"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "MP%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "ATK%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "DEF%"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "SPD"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "CR"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "CD"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "RES"), Rune("1-STAR ENERGY RUNE - SLOT 4", "An Energy rune of rating 1 at slot 4", mpf("1e6"), mpf("0"), 1, 4, "ENERGY", "ACC"), Rune("1-STAR ENERGY RUNE - SLOT 5", "An Energy rune of rating 1 at slot 5", mpf("1e6"), mpf("0"), 1, 5, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "HP"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "HP%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "MP"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "MP%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "ATK"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "ATK%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "DEF"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "DEF%"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "SPD"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "CR"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "CD"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "RES"), Rune("1-STAR ENERGY RUNE - SLOT 6", "An Energy rune of rating 1 at slot 6", mpf("1e6"), mpf("0"), 1, 6, "ENERGY", "ACC"), AwakenShard(mpf("1e6"), mpf("10"), "Hellchnoth"), AwakenShard(mpf("1e6"), mpf("10"), "Chichoo"), AwakenShard(mpf("1e6"), mpf("10"), "Hylso"), AwakenShard(mpf("1e6"), mpf("10"), "Banngod"), AwakenShard(mpf("1e6"), mpf("10"), "Manrud"), AwakenShard(mpf("1e6"), mpf("10"), "Avaffaip"), AwakenShard(mpf("1e6"), mpf("10"), "Meppee"), EXPShard(mpf("1e6"), mpf("10"), mpf("1e5")), LevelUpShard(mpf("1e6"), mpf("10")), SkillLevelUpShard(mpf("1e6"), mpf("10")), Scroll("UNKNOWN", "A scroll to summon 1-STAR to 3-STAR legendary creatures.", mpf("1e6"), mpf("10"), potential_legendary_creatures) ]), ]) # Initialising potential CPU players the player can face potential_cpu_players: list = [ Player("CPU #1"), Player("CPU #2"), Player("CPU #3") ] potential_cpu_players[0].battle_team = Team([potential_legendary_creatures[0:5]]) potential_cpu_players[1].battle_team = Team([potential_legendary_creatures[1:6]]) potential_cpu_players[2].battle_team = Team([potential_legendary_creatures[x] for x in [0, 2, 3, 4, 5]]) # Initialising the battle arena battle_arena: Arena = Arena(potential_cpu_players) # Initialising a list of battle areas in this game. battle_areas: list = [ MapArea("DHUULOW BUSH", [ Level("DHUULOW BUSH - ENTRANCE", [ Stage(potential_legendary_creatures[1:6]), Stage(potential_legendary_creatures[0:5]) ], Reward(mpf("1e5"), mpf("1e5"), mpf("1"), mpf("1e5"))) ], Reward(mpf("1e5"), mpf("1e5"), mpf("1"), mpf("1e5")), "EASY"), Dungeon("ITEM DUNGEON 1", [ Level("ID1 PART 1", [ Stage(potential_legendary_creatures[1:6]), Stage(potential_legendary_creatures[0:5]) ], Reward(mpf("1e5"), mpf("1e5"), mpf("1"), mpf("1e5"))) ], Reward(mpf("1e5"), mpf("1e5"), mpf("1"), mpf("1e5")), "ITEM"), Dungeon("RESOURCE DUNGEON 1", [ Level("RD1 PART 1", [ Stage(potential_legendary_creatures[1:6]), Stage(potential_legendary_creatures[0:5]) ], Reward(mpf("1e5"), mpf("1e5"), mpf("1"), mpf("1e5"))) ], Reward(mpf("1e5"), mpf("1e5"), mpf("1"), mpf("1e5")), "RESOURCE") ] # Initialising variable for the saved game data # Asking the user to enter his/her name to check whether saved game data exists or not player_name: str = input("Please enter your name: ") file_name: str = "SAVED ANCIENT INVASION GAME DATA - " + str(player_name).upper() new_game: Game try: new_game = load_game_data(file_name) # Clearing up the command line window clear() print("Current game progress:\n", str(new_game)) except FileNotFoundError: # Clearing up the command line window clear() print("Sorry! No saved game data with player name '" + str(player_name) + "' is available!") name: str = input("Please enter your name: ") player_data: Player = Player(name) new_game = Game(player_data, potential_legendary_creatures, fusion_legendary_creatures, item_shop, building_shop, battle_arena, battle_areas) # Getting the current date and time old_now: datetime = datetime.now() print("Enter 'Y' for yes.") print("Enter anything else for no.") continue_playing: str = input("Do you want to continue playing 'Ancient Invasion'? ") while continue_playing == "Y": # Clearing up the command line window clear() # Updating the old time new_now: datetime = datetime.now() time_difference = new_now - old_now seconds: int = time_difference.seconds old_now = new_now # Resetting all temple of wishes if possible if new_now.day != old_now.day: for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, TempleOfWishes): temple_of_wishes: TempleOfWishes = curr_tile.building temple_of_wishes.restore() temple_of_wishes.reset_wishes_left() # Increase player's EXP, gold, and gems new_game.player_data.exp += new_game.player_data.exp_per_second * seconds new_game.player_data.level_up() new_game.player_data.gold += new_game.player_data.gold_per_second * seconds new_game.player_data.gems += new_game.player_data.gems_per_second * seconds # Increase the exp of all legendary creatures owned by the player for legendary_creature in new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): legendary_creature.exp += legendary_creature.exp_per_second * seconds legendary_creature.level_up() # Asking the player what he/she wants to do in the game. allowed: list = ["PLAY ADVENTURE MODE", "MANAGE PLAYER BASE", "MANAGE BATTLE TEAM", "MANAGE LEGENDARY CREATURE INVENTORY", "MANAGE ITEM INVENTORY", "MAKE A WISH", "FUSE LEGENDARY CREATURES", "SUMMON LEGENDARY CREATURE", "GIVE ITEM", "POWER UP LEGENDARY CREATURE", "EVOLVE LEGENDARY CREATURE", "MANAGE TRAINING AREA", "PLACE RUNE", "REMOVE RUNE", "BUY ITEM", "VIEW STATS"] print("Enter 'PLAY ADVENTURE MODE' to play in adventure mode.") print("Enter 'MANAGE PLAYER BASE' to manage your player base.") print("Enter 'MANAGE BATTLE TEAM' to manage your battle team.") print("Enter 'MANAGE LEGENDARY CREATURE INVENTORY' to manage your legendary creature inventory.") print("Enter 'MANAGE ITEM INVENTORY' to manage your item inventory.") print("Enter 'MAKE A WISH' to make a wish in a temple of wishes.") print("Enter 'FUSE LEGENDARY CREATURES' to fuse legendary creatures using a fusion center.") print("Enter 'SUMMON LEGENDARY CREATURE' to summon a legendary creature.") print("Enter 'GIVE ITEM' to give an item to a legendary creature.") print("Enter 'POWER UP LEGENDARY CREATURE' to power up legendary creatures.") print("Enter 'EVOLVE LEGENDARY CREATURE' to evolve legendary creatures.") print("Enter 'MANAGE TRAINING AREA' to manage your training area.") print("Enter 'PLACE RUNE' to place a rune on a legendary creature.") print("Enter 'REMOVE RUNE' to remove a rune from a legendary creature.") print("Enter 'BUY ITEM' to purchase an item from the item shop.") print("Enter 'VIEW STATS' to view your stats.") action: str = input("What do you want to do? ") if action not in allowed: # Saving game data and quitting the game save_game_data(new_game, file_name) sys.exit() else: if action == "VIEW STATS": # Clearing the command line window clear() # Display player's stats print(new_game.player_data) elif action == "BUY ITEM": # Clearing the command line window clear() # Show a list of items which the player can buy item_list: list = new_game.item_shop.get_items_sold() curr_item_index: int = 1 # initial value for item in item_list: print("ITEM #" + str(curr_item_index)) print(str(item) + "\n") curr_item_index += 1 item_index: int = int(input("Please enter the index of the item you want to buy (1 - " + str(len(item_list)) + "): ")) while item_index < 1 or item_index > len(item_list): item_index: int = int(input("Sorry, invalid input! Please enter the index of the item you want " "to buy (1 - " + str(len(item_list)) + "): ")) item_to_buy: Item = item_list[item_index - 1] if new_game.player_data.purchase_item(item_to_buy): print("You have successfully bought " + str(item_to_buy.name)) else: print("Sorry, you have insufficient gold and/or gems!") elif action == "REMOVE RUNE": # Clearing up the command line window clear() # Allow the player to remove a rune if there are legendary creatures in the legendary creature # inventory. if len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) > 0: print("Below is a list of legendary creatures you have.\n") curr_legendary_creature_index: int = 1 # initial value for legendary_creature in new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 legendary_creature_index: int = int(input("Please enter the index of the legendary creature " "you want to remove a rune from (1 - " + str(len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()): legendary_creature_index = int(input("Sorry, invalid input! Please enter the index of the " "legendary creature you want to remove a rune from " "(1 - " + str(len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures())) + "): ")) chosen_legendary_creature: LegendaryCreature = \ new_game.player_data.legendary_creature_inventory.get_legendary_creatures() \ [legendary_creature_index - 1] print(str(chosen_legendary_creature.name) + " has runes placed in slots as below.") for i in chosen_legendary_creature.get_runes().keys(): print("SLOT NUMBER #" + str(i)) slot_number: int = int(input("Please enter the slot number of the rune you want to remove " "(1 - 6): ")) while slot_number < 1 or slot_number > 6: slot_number = int( input("Sorry, invalid input! Please enter the slot number of the rune you want to " "remove (1 - 6): ")) chosen_legendary_creature.remove_rune(slot_number) elif action == "PLACE RUNE": # Clearing up the command line window clear() # Allow the player to place a rune if there are legendary creatures in the legendary creature # inventory. if len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) > 0: print("Below is a list of legendary creatures you have.\n") curr_legendary_creature_index: int = 1 # initial value for legendary_creature in new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 legendary_creature_index: int = int(input("Please enter the index of the legendary creature " "you want to place a rune on (1 - " + str(len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()): legendary_creature_index = int(input("Sorry, invalid input! Please enter the index of the " "legendary creature you want to place a rune on " "(1 - " + str(len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures())) + "): ")) chosen_legendary_creature: LegendaryCreature = \ new_game.player_data.legendary_creature_inventory.get_legendary_creatures() \ [legendary_creature_index - 1] # Getting a list of runes which can be placed to the legendary creature runes: list = [] # initial value for item in new_game.player_data.item_inventory.get_items(): if isinstance(item, Rune): if not item.already_placed: runes.append(item) print("Enter 'Y' for yes.") print("Enter anything else for no.") place_rune: str = input( "Do you want to place a rune to " + str(chosen_legendary_creature.name) + "? ") if place_rune == "Y": if len(runes) > 0: print("Below is a list of runes you have.\n") curr_rune_index: int = 1 # initial value for rune in runes: print("RUNE #" + str(curr_rune_index)) print(str(rune) + "\n") curr_rune_index += 1 rune_index: int = int(input("Please enter the index of the rune you want to place to " "this legendary creature (1 - " + str(len(runes)) + "): ")) while rune_index < 1 or rune_index > len(runes): rune_index = int(input( "Sorry, invalid input! Please enter the index of the rune you want to place to " "this legendary creature (1 - " + str(len(runes)) + "): ")) chosen_rune: Rune = runes[rune_index - 1] chosen_legendary_creature.place_rune(chosen_rune) elif action == "MANAGE TRAINING AREA": # Clearing up the command line window clear() # Getting a list of training areas in the player's base training_areas: list = [] # initial value for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, TrainingArea): training_areas.append(curr_tile.building) # If there are training areas, ask the player which training area he/she wants to manage. if len(training_areas) > 0: print("Below is a list of training areas that you have.") curr_training_area_index: int = 1 # initial value for training_area in training_areas: print("TRAINING AREA #" + str(curr_training_area_index)) print(str(training_area) + "\n") curr_training_area_index += 1 training_area_index: int = int(input("Please enter the index of the training area you want to " "manage (1 - " + str(len(training_areas)) + "): ")) while training_area_index < 1 or training_area_index > len(training_areas): training_area_index = int(input("Sorry, invalid input! Please enter the index of the training " "area " "you want to manage (1 - " + str(len(training_areas)) + "): ")) chosen_training_area: TrainingArea = training_areas[training_area_index - 1] # Checking whether a legendary creature can be added to the chosen training area or not. if len(chosen_training_area.get_legendary_creatures_placed()) < \ chosen_training_area.MAX_LEGENDARY_CREATURES: # Printing a list of legendary creatures the player can add to the training area available_legendary_creatures: list = [] # initial value for legendary_creature in new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): if legendary_creature not in new_game.player_data.battle_team.get_legendary_creatures() and \ not legendary_creature.placed_in_training_area: available_legendary_creatures.append(legendary_creature) if len(available_legendary_creatures) > 0: print("Enter 'Y' for yes.") print("Enter anything else for no.") add_legendary_creature: str = input("Do you want to add a legendary creature to the " "training area? ") if add_legendary_creature == "Y": print("Below is a list of legendary creatures which you can add to the training area.") for legendary_creature in available_legendary_creatures: print(str(legendary_creature) + "\n") legendary_creature_index: int = int( input("Please enter the index of the legendary creature " "you want to add to the training area (1 - " + str(len(available_legendary_creatures)) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(available_legendary_creatures): legendary_creature_index = int( input("Sorry, invalid input! Please enter the index of the " "legendary creature you want to add to the training " "area (1 - " + str(len(available_legendary_creatures)) + "): ")) legendary_creature_to_add: LegendaryCreature = \ available_legendary_creatures[legendary_creature_index - 1] new_game.player_data.add_legendary_creature_to_training_area(legendary_creature_to_add, chosen_training_area) # Checking whether a legendary creature can be removed from the chosen training area or not. if len(chosen_training_area.get_legendary_creatures_placed()) > 0: print("Enter 'Y' for yes.") print("Enter anything else for no.") remove_legendary_creature: str = input("Do you want to remove a legendary creature from the " "training area? ") if remove_legendary_creature == "Y": # Printing a list of legendary creatures in the chosen training area curr_legendary_creature_index: int = 1 for legendary_creature in chosen_training_area.get_legendary_creatures_placed(): print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 legendary_creature_index: int = int(input("Please enter the index of the legendary " "creature " "you want to remove from the training area (1 - " + str(len(chosen_training_area. get_legendary_creatures_placed())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(chosen_training_area.get_legendary_creatures_placed()): legendary_creature_index = int(input("Sorry, invalid input! Please enter the index of " "the " "legendary creature " "you want to remove from the training area (1 - " + str(len(chosen_training_area. get_legendary_creatures_placed())) + "): ")) legendary_creature_to_remove: LegendaryCreature = \ chosen_training_area.get_legendary_creatures_placed()[legendary_creature_index - 1] new_game.player_data.remove_legendary_creature_from_training_area \ (legendary_creature_to_remove, chosen_training_area) elif action == "EVOLVE LEGENDARY CREATURE": # Clearing up the command line window clear() # Getting a list of power-up circles in the player's base power_up_circles: list = [] # initial value for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, PowerUpCircle): power_up_circles.append(curr_tile.building) # If there are power up circles, ask the player which power-up circle he/she wants to use if len(power_up_circles) > 0: print("Below is a list of power up circles that you have.") curr_power_up_circle_index: int = 1 # initial value for power_up_circle in power_up_circles: print("POWER UP CIRCLE #" + str(curr_power_up_circle_index)) print(str(power_up_circle) + "\n") curr_power_up_circle_index += 1 power_up_circle_index: int = int(input("Please enter the index of the power-up circle you want to " "use (1 - " + str(len(power_up_circles)) + "): ")) while power_up_circle_index < 1 or power_up_circle_index > len(power_up_circles): power_up_circle_index = int( input("Sorry, invalid input! Please enter the index of the power-up circle you want to " "use (1 - " + str(len(power_up_circles)) + "): ")) chosen_power_up_circle: PowerUpCircle = power_up_circles[power_up_circle_index - 1] # Ask the player to choose the legendary creature to be evolved and the materials used if # possible if len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) > 0: # Printing all the legendary creatures the player has. for legendary_creature in \ new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): print(str(legendary_creature) + "\n") # Ask the player to choose the legendary creature to be evolved to_be_evolved_index: int = int(input("Please enter the index of the legendary creature " "you want to evolve (1 - " + str(len(new_game. player_data.legendary_creature_inventory.get_legendary_creatures())) + "): ")) while to_be_evolved_index < 1 or to_be_evolved_index > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()): to_be_evolved_index = int( input("Sorry, invalid input! Please enter the index of the legendary creature " "you want to evolve (1 - " + str(len(new_game. player_data.legendary_creature_inventory.get_legendary_creatures())) + "): ")) to_be_evolved: LegendaryCreature = new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures()[to_be_evolved_index - 1] materials_to_use: list = [] num_materials: int = int(input("How many material legendary creatures do you want to place " "(0-" + str(min(5, len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures()))) + "_: ")) while num_materials < 0 or num_materials > 5 or num_materials > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) - 1: num_materials = int(input("Sorry, invalid input! How many material legendary creatures do " "you want to place " "(0-" + str(min(5, len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures()))) + "_: ")) legendary_creature_options: list = new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures() legendary_creature_options.remove(to_be_evolved) for i in range(num_materials): print("Below is a list of legendary creatures you can choose as a material.\n") curr_legendary_creature_index: int = 1 # initial value for legendary_creature in legendary_creature_options: print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 chosen_legendary_creature_index: int = int(input("Please enter the index of the legendary " "creature you want to use as a material " "(1 - " + str(len(legendary_creature_options)) + ": ")) while chosen_legendary_creature_index < 1 or chosen_legendary_creature_index > \ len(legendary_creature_options): chosen_legendary_creature_index = int( input("Sorry, invalid input! Please enter the index of the legendary " "creature you want to use as a material " "(1 - " + str(len(legendary_creature_options)) + ": ")) chosen_material: LegendaryCreature = legendary_creature_options \ [chosen_legendary_creature_index - 1] materials_to_use.append(chosen_material) legendary_creature_options.remove(chosen_material) new_game.player_data.evolve_legendary_creature(to_be_evolved, materials_to_use, chosen_power_up_circle) elif action == "POWER UP LEGENDARY CREATURE": # Clearing up the command line window clear() # Getting a list of power-up circles in the player's base power_up_circles: list = [] # initial value for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, PowerUpCircle): power_up_circles.append(curr_tile.building) # If there are power up circles, ask the player which power-up circle he/she wants to use if len(power_up_circles) > 0: print("Below is a list of power up circles that you have.") curr_power_up_circle_index: int = 1 # initial value for power_up_circle in power_up_circles: print("POWER UP CIRCLE #" + str(curr_power_up_circle_index)) print(str(power_up_circle) + "\n") curr_power_up_circle_index += 1 power_up_circle_index: int = int(input("Please enter the index of the power-up circle you want to " "use (1 - " + str(len(power_up_circles)) + "): ")) while power_up_circle_index < 1 or power_up_circle_index > len(power_up_circles): power_up_circle_index = int( input("Sorry, invalid input! Please enter the index of the power-up circle you want to " "use (1 - " + str(len(power_up_circles)) + "): ")) chosen_power_up_circle: PowerUpCircle = power_up_circles[power_up_circle_index - 1] # Ask the player to choose the legendary creature to be powered up and the materials used if # possible if len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) > 0: # Printing all the legendary creatures the player has. curr_legendary_creature_index: int = 1 # initial value for legendary_creature in \ new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 # Ask the player to choose the legendary creature to be powered up to_be_powered_up_index: int = int(input("Please enter the index of the legendary creature " "you want to power-up (1 - " + str(len(new_game. player_data.legendary_creature_inventory.get_legendary_creatures())) + "): ")) while to_be_powered_up_index < 1 or to_be_powered_up_index > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()): to_be_powered_up_index = int( input("Sorry, invalid input! Please enter the index of the legendary creature " "you want to power-up (1 - " + str(len(new_game. player_data.legendary_creature_inventory.get_legendary_creatures())) + "): ")) to_be_powered_up: LegendaryCreature = new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures()[to_be_powered_up_index - 1] materials_to_use: list = [] num_materials: int = int(input("How many material legendary creatures do you want to place " "(0-" + str(min(5, len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures()))) + "_: ")) while num_materials < 0 or num_materials > 5 or num_materials > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) - 1: num_materials = int(input("Sorry, invalid input! How many material legendary creatures do " "you want to place " "(0-" + str(min(5, len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures()))) + "_: ")) legendary_creature_options: list = new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures() legendary_creature_options.remove(to_be_powered_up) for i in range(num_materials): print("Below is a list of legendary creatures you can choose as a material.\n") curr_legendary_creature_index: int = 1 # initial value for legendary_creature in legendary_creature_options: print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 chosen_legendary_creature_index: int = int(input("Please enter the index of the legendary " "creature you want to use as a material " "(1 - " + str(len(legendary_creature_options)) + ": ")) while chosen_legendary_creature_index < 1 or chosen_legendary_creature_index > \ len(legendary_creature_options): chosen_legendary_creature_index = int( input("Sorry, invalid input! Please enter the index of the legendary " "creature you want to use as a material " "(1 - " + str(len(legendary_creature_options)) + ": ")) chosen_material: LegendaryCreature = legendary_creature_options \ [chosen_legendary_creature_index - 1] materials_to_use.append(chosen_material) legendary_creature_options.remove(chosen_material) new_game.player_data.power_up_legendary_creature(to_be_powered_up, materials_to_use, chosen_power_up_circle) elif action == "GIVE ITEM": # Clearing up the command line window clear() # Getting a list of items which are not runes in the player's item inventory non_rune_items: list = [item for item in new_game.player_data.item_inventory.get_items() if not isinstance(item, Rune)] # If non-rune items exist and there are legendary creatures in the legendary creature inventory, ask # the player to choose which item is to be given to a legendary creature. if len(non_rune_items) > 0 and \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) > 0: print("Below is a list of non-rune items that you have.\n") curr_item_index: int = 1 # initial value for item in non_rune_items: print("ITEM #" + str(curr_item_index)) print(str(item) + "\n") curr_item_index += 1 item_index: int = int(input("Please enter the index of the item you want to give (1 - " + str(len(non_rune_items)) + "): ")) while item_index < 1 or item_index > len(non_rune_items): item_index = int(input("Sorry, invalid input! Please enter the index of the item you want to " "give (1 - " + str(len(non_rune_items)) + "): ")) item_to_give: Item = non_rune_items[item_index - 1] print("Below is a list of legendary creatures you have.\n") curr_legendary_creature_index: int = 1 # initial value for legendary_creature in new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 legendary_creature_index: int = int(input("Please enter the index of the legendary creature you " "want to give the item to (1 - " + str(len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > len( new_game.player_data.legendary_creature_inventory. get_legendary_creatures()): legendary_creature_index = int( input("Sorry, invalid input! Please enter the index of the legendary creature you " "want to give the item to (1 - " + str(len(new_game.player_data.legendary_creature_inventory. get_legendary_creatures())) + "): ")) chosen_legendary_creature: LegendaryCreature = new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures()[legendary_creature_index - 1] # Give the item to the chosen legendary creature if new_game.player_data.give_item_to_legendary_creature(item_to_give, chosen_legendary_creature): print("You have successfully given " + str(item_to_give.name) + " to " + str(chosen_legendary_creature.name) + ".") else: print("Sorry! Item " + str(item_to_give.name) + " cannot be given to " + str(chosen_legendary_creature.name) + ".") elif action == "SUMMON LEGENDARY CREATURE": # Clearing up the command line window clear() # Getting a list of summonhenges in the player's base summonhenges: list = [] # initial value for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, Summonhenge): summonhenges.append(curr_tile.building) # Getting a list of scrolls in the player's item inventory scrolls: list = [] # initial value for item in new_game.player_data.item_inventory.get_items(): if isinstance(item, Scroll): scrolls.append(item) # If there are summonhenges and scrolls, ask the player which summonhenge and scroll he/she wants to use if len(summonhenges) > 0 and len(scrolls) > 0: print("Below is a list of summonhenges that you have.") curr_summonhenge_index: int = 1 # initial value for summonhenge in summonhenges: print("SUMMONHENGE #" + str(curr_summonhenge_index)) print(str(summonhenge) + "\n") curr_summonhenge_index += 1 summonhenge_index: int = int(input("Please enter the index of the summonhenge you want to " "use (1 - " + str(len(summonhenges)) + "): ")) while summonhenge_index < 1 or summonhenge_index > len(summonhenges): summonhenge_index = int( input("Sorry, invalid input! Please enter the index of the summonhenge you want to " "use (1 - " + str(len(summonhenges)) + "): ")) chosen_summonhenge: Summonhenge = summonhenges[summonhenge_index - 1] print("Below is a list of scrolls that you have.") curr_scroll_index: int = 1 # initial value for scroll in scrolls: print("SCROLL #" + str(curr_scroll_index)) print(str(scroll) + "\n") curr_scroll_index += 1 scroll_index: int = int(input("Please enter the index of the scroll you want to use " "(1 - " + str(len(scrolls)) + "): ")) while scroll_index < 1 or scroll_index > len(scrolls): scroll_index = int(input("Sorry, invalid input! Please enter the index of the scroll " "you want to use " "(1 - " + str(len(scrolls)) + "): ")) chosen_scroll: Scroll = scrolls[scroll_index - 1] new_game.player_data.summon_legendary_creature(chosen_scroll, chosen_summonhenge) elif action == "FUSE LEGENDARY CREATURES": # Clearing up the command line window clear() # Getting a list of fusion centers in the player's base fusion_centers: list = [] # initial value for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, FusionCenter): fusion_centers.append(curr_tile.building) potential_material_legendary_creatures: list = [legendary_creature for legendary_creature in new_game.player_data.legendary_creature_inventory. get_legendary_creatures() if legendary_creature not in new_game.player_data.battle_team. get_legendary_creatures() and not legendary_creature.placed_in_training_area] # If there are fusion centers and legendary creatures to choose from, ask the user to choose which # fusion center to use. if len(fusion_centers) > 0 and len(potential_material_legendary_creatures) > 0: print("Below is a list of fusion centers that you have.") curr_fusion_center_index: int = 1 # initial value for fusion_center in fusion_centers: print("FUSION CENTER #" + str(curr_fusion_center_index)) print(str(fusion_center) + "\n") curr_fusion_center_index += 1 fusion_center_index: int = int(input("Please enter the index of the fusion center you want " "to use (1 - " + str(len(fusion_centers)) + "): ")) while fusion_center_index < 1 or fusion_center_index > len(fusion_centers): fusion_center_index: int = int(input("Please enter the index of the fusion center you want " "to use (1 - " + str(len(fusion_centers)) + "): ")) chosen_fusion_center: FusionCenter = fusion_centers[fusion_center_index - 1] print("Below is a list of legendary creatures you can fuse to.") curr_fusion_legendary_creature_index: int = 1 # initial value for fusion_legendary_creature in chosen_fusion_center.get_fusion_legendary_creatures(): print("FUSION CENTER #" + str(curr_fusion_legendary_creature_index)) print(str(fusion_legendary_creature) + "\n") curr_fusion_legendary_creature_index += 1 fusion_legendary_creature_index: int = int(input("Please enter the index of the fusion legendary " "creature you want to fuse to (1 - " + str(len(chosen_fusion_center. get_fusion_legendary_creatures())) + "): ")) while fusion_legendary_creature_index < 1 or fusion_legendary_creature_index > len( chosen_fusion_center. get_fusion_legendary_creatures()): fusion_legendary_creature_index = int( input("Sorry, invalid input! Please enter the index of the fusion legendary " "creature you want to fuse to (1 - " + str(len(chosen_fusion_center. get_fusion_legendary_creatures())) + "): ")) chosen_fusion_legendary_creature: FusionLegendaryCreature = chosen_fusion_center. \ get_fusion_legendary_creatures()[fusion_legendary_creature_index - 1] print("Below is a list of material legendary creatures for fusion to " + str(chosen_fusion_legendary_creature.name) + ".") for material in chosen_fusion_legendary_creature.get_material_legendary_creatures(): print(str(material) + "\n") chosen_material_legendary_creatures: list = [] # initial value num_materials: int = int(input("How many material legendary creatures do you want to place (0 - " + str(min(5, len(potential_material_legendary_creatures))) + "): ")) for i in range(num_materials): print("Below is a list of legendary creatures which you can use as the materials.") curr_material_index: int = 1 for material_legendary_creature in potential_material_legendary_creatures: print("MATERIAL LEGENDARY CREATURE #" + str(curr_material_index)) print(str(material_legendary_creature) + "\n") curr_material_index += 1 material_index: int = int(input("Please enter the index of the material legendary creature " "you want to select (1 - " + str(len(potential_material_legendary_creatures)) + "): ")) while material_index < 1 or material_index > len(potential_material_legendary_creatures): material_index = int(input("Sorry, invalid input! Please enter the index of the " "material legendary creature " "you want to select (1 - " + str(len(potential_material_legendary_creatures)) + "): ")) chosen_material_legendary_creature: LegendaryCreature = potential_material_legendary_creatures \ [material_index - 1] if chosen_material_legendary_creature.name not in [legendary_creature.name for legendary_creature in chosen_fusion_legendary_creature. get_material_legendary_creatures()]: break else: chosen_material_legendary_creatures.append(chosen_material_legendary_creature) potential_material_legendary_creatures.remove(chosen_material_legendary_creature) new_game.player_data.fuse_legendary_creatures(chosen_material_legendary_creatures, chosen_fusion_legendary_creature, chosen_fusion_center) elif action == "MAKE A WISH": # Clearing up the command line window clear() # Getting a list of temples of wishes in the player's base temples_of_wishes: list = [] # initial value for island in new_game.player_data.player_base.get_islands(): for x in range(island.ISLAND_WIDTH): for y in range(island.ISLAND_WIDTH): curr_tile: IslandTile = island.get_tile_at(x, y) if isinstance(curr_tile.building, TempleOfWishes): temples_of_wishes.append(curr_tile.building) # If there are temples of wishes, ask the player to choose which temple of wishes he/she wants to use if len(temples_of_wishes) > 0: print("Below is a list of temples of wishes you can use.") curr_temple_of_wishes_index: int = 1 # initial value for temple_of_wishes in temples_of_wishes: print("TEMPLE OF WISHES #" + str(curr_temple_of_wishes_index)) print(str(temple_of_wishes) + "\n") curr_temple_of_wishes_index += 1 temple_of_wishes_index: int = int(input("Please enter the index of the temple of wishes " "you want to use (1 - " + str(len(temples_of_wishes)) + "): ")) while temple_of_wishes_index < 1 or temple_of_wishes_index > len(temples_of_wishes): temple_of_wishes_index = int(input("Sorry, invalid input! Please enter the index of the " "temple of wishes " "you want to use (1 - " + str(len(temples_of_wishes)) + "): ")) chosen_temple_of_wishes: TempleOfWishes = temples_of_wishes[temple_of_wishes_index - 1] new_game.player_data.make_a_wish(chosen_temple_of_wishes) elif action == "MANAGE ITEM INVENTORY": # Clearing up the command line window clear() if len(new_game.player_data.item_inventory.get_items()) > 0: print("Below is a list of items in your item inventory.\n") curr_item_index: int = 1 for item in new_game.player_data.item_inventory.get_items(): print("ITEM #" + str(curr_item_index)) print(str(item) + "\n") curr_item_index += 1 print("Enter 'Y' for yes.") print("Enter anything else for no.") sell_item: str = input("Do you want to sell an item? ") if sell_item == "Y": item_index: int = int(input("Please enter the index of the item you want to sell (1 - " + str(len(new_game.player_data.item_inventory.get_items())) + "): ")) while item_index < 1 or item_index > len(new_game.player_data.item_inventory.get_items()): item_index = int(input("Sorry, invalid input! Please enter the index of the item you " "want to sell (1 - " + str(len(new_game.player_data.item_inventory.get_items())) + "): ")) to_be_sold: Item = new_game.player_data.item_inventory.get_items()[item_index - 1] if new_game.player_data.sell_item(to_be_sold): print("Congratulations! You have earned " + str(to_be_sold.sell_gold_gain) + " gold and " + str(to_be_sold.sell_gem_gain) + " gems for selling " + str(to_be_sold.name) + "!") else: print("Sorry! " + str(to_be_sold.name) + " cannot be sold!") runes: list = [] # initial value for item in new_game.player_data.item_inventory.get_items(): if isinstance(item, Rune): runes.append(item) # Ask the player which rune to level up if there are runes in the item inventory if len(runes) > 0: print("Below is a list of runes you have.\n") curr_rune_index: int = 1 # initial value for rune in runes: print("RUNE #" + str(curr_rune_index)) print(str(rune) + "\n") curr_rune_index += 1 print("Enter 'Y' for yes.") print("Enter anything else for no.") level_up_rune: str = input("Do you want to level up a rune? ") if level_up_rune == "Y": rune_index: int = int(input("Please enter the index of the rune you want to level " "up (1 - " + str(len(runes)) + "): ")) while rune_index < 1 or rune_index > len(runes): rune_index = int(input("Sorry, invalid input! Please enter the index of the rune you " "want to level " "up (1 - " + str(len(runes)) + "): ")) chosen_rune: Rune = runes[rune_index - 1] new_game.player_data.level_up_rune(chosen_rune) elif action == "MANAGE LEGENDARY CREATURE INVENTORY": # Clearing up the command line window clear() if len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()) > 0: print("Below is a list of legendary creatures in your legendary creature inventory.\n") curr_legendary_creature_index: int = 1 # initial value for legendary_creature in new_game.player_data.legendary_creature_inventory. \ get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(curr_legendary_creature_index)) print(str(legendary_creature) + "\n") curr_legendary_creature_index += 1 legendary_creature_index: int = int(input("Please enter the index of the legendary creature " "you want to remove (1 - " + str(len(new_game.player_data. legendary_creature_inventory. get_legendary_creatures())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()): legendary_creature_index = int(input("Sorry, invalid input! Please enter the " "index of the legendary creature " "you want to remove (1 - " + str(len(new_game.player_data. legendary_creature_inventory. get_legendary_creatures())) + "): ")) to_be_removed: LegendaryCreature = \ new_game.player_data.legendary_creature_inventory.get_legendary_creatures() \ [legendary_creature_index - 1] new_game.player_data.remove_legendary_creature(to_be_removed) elif action == "MANAGE BATTLE TEAM": # Clearing up the command line window clear() if len(new_game.player_data.battle_team.get_legendary_creatures()) > 0: print("Below is a list of legendary creatures in your battle team.\n") current_legendary_creature_index: int = 1 # initial value for legendary_creature in new_game.player_data.battle_team.get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(current_legendary_creature_index)) print(str(legendary_creature) + "\n") current_legendary_creature_index += 1 print("Enter 'Y' for yes.") print("Enter anything else for no.") remove_legendary_creature: str = input("Do you want to remove a legendary creature from " "your team? ") if remove_legendary_creature == "Y": legendary_creature_index: int = int(input("Please enter the index of the legendary " "creature you want to remove from " "your battle team (1 - " + str(len(new_game.player_data. battle_team.get_legendary_creatures())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(new_game.player_data.battle_team.get_legendary_creatures()): legendary_creature_index = int(input("Sorry, invalid input! Please enter the index of the " "legendary " "creature you want to remove from " "your battle team (1 - " + str(len(new_game.player_data. battle_team.get_legendary_creatures())) + "): ")) to_be_removed: LegendaryCreature = new_game.player_data.battle_team.get_legendary_creatures() \ [legendary_creature_index - 1] new_game.player_data.remove_legendary_creature_from_team(to_be_removed) if len(new_game.player_data.battle_team.get_legendary_creatures()) < Team.MAX_LEGENDARY_CREATURES: print("Below is a list of legendary creatures you have.\n") current_legendary_creature_index: int = 1 # initial value for legendary_creature in new_game.player_data.legendary_creature_inventory.get_legendary_creatures(): print("LEGENDARY CREATURE #" + str(current_legendary_creature_index)) print(str(legendary_creature) + "\n") current_legendary_creature_index += 1 print("Enter 'Y' for yes.") print("Enter anything else for no.") add_legendary_creature: str = input("Do you want to add a legendary creature to your team? ") if add_legendary_creature == "Y": legendary_creature_index: int = int(input("Please enter the index of the legendary " "creature you want to add to your " "battle team (1 - " + str(len(new_game.player_data. legendary_creature_inventory. get_legendary_creatures())) + "): ")) while legendary_creature_index < 1 or legendary_creature_index > \ len(new_game.player_data.legendary_creature_inventory.get_legendary_creatures()): legendary_creature_index = int(input("Sorry, invalid input! Please enter the index " "of the legendary " "creature you want to add to your " "battle team (1 - " + str(len(new_game.player_data. legendary_creature_inventory. get_legendary_creatures())) + "): ")) to_be_added: LegendaryCreature = \ new_game.player_data.legendary_creature_inventory.get_legendary_creatures() \ [legendary_creature_index - 1] new_game.player_data.add_legendary_creature_to_team(to_be_added) elif action == "MANAGE PLAYER BASE": # Clearing up the command line window clear() # Asking whether the player wants to add a new island to the player base or not print("Enter 'Y' for yes.") print("Enter anything else for no.") add_island: str = input("Do you want to add a new island to your player base for " + str(new_game.player_data.player_base.island_build_gold_cost) + " gold? ") if add_island == "Y": new_game.player_data.add_island_to_player_base() # Showing the islands in the player's base if len(new_game.player_data.player_base.get_islands()) > 0: island_count: int = 1 for island in new_game.player_data.player_base.get_islands(): print("----------ISLAND #" + str(island_count) + "----------") print(str(island) + "\n") island_count += 1 chosen_island_index: int = int(input("Enter the index of the island you want to manage (1 - " + str(len(new_game.player_data.player_base.get_islands())) + "): ")) while chosen_island_index < 1 or chosen_island_index > \ len(new_game.player_data.player_base.get_islands()): chosen_island_index = int(input("Sorry, invalid input! Enter the index of the island " "you want to manage (1 - " + str(len( new_game.player_data.player_base.get_islands())) + "): ")) chosen_island: Island = new_game.player_data.player_base.get_islands()[chosen_island_index - 1] print("Enter 'LEVEL UP BUILDING' to level up a building at an island tile.") print("Enter 'BUILD BUILDING' to build at an island tile.") print("Enter 'REMOVE BUILDING' to remove a building from an island tile.") valid_sub_actions: list = ["LEVEL UP BUILDING", "BUILD BUILDING", "REMOVE BUILDING"] sub_action: str = input("What do you want to do? ") while sub_action not in valid_sub_actions: print("Enter 'LEVEL UP BUILDING' to level up a building at an island tile.") print("Enter 'BUILD BUILDING' to build at an island tile.") print("Enter 'REMOVE BUILDING' to remove a building from an island tile.") sub_action = input("Sorry, invalid input! What do you want to do? ") if sub_action == "LEVEL UP BUILDING": tile_x: int = int(input("Please enter x-coordinates of the building to be levelled up: ")) tile_y: int = int(input("Please enter y-coordinates of the building to be levelled up: ")) if new_game.player_data.level_up_building_at_island_tile(chosen_island_index - 1, tile_x, tile_y): print("You have successfully levelled up " + str(chosen_island.get_tile_at(tile_x, tile_y).building.name) + "!") else: print("Building level up failed!") elif sub_action == "BUILD BUILDING": tile_x: int = int(input("Please enter x-coordinates of the tile to build at: ")) tile_y: int = int(input("Please enter y-coordinates of the tile to build at: ")) if isinstance(chosen_island.get_tile_at(tile_x, tile_y), IslandTile): curr_tile: IslandTile = chosen_island.get_tile_at(tile_x, tile_y) if curr_tile.building is None: print("Below is a list of buildings you can build on the tile.") building_count: int = 1 for building in building_shop.get_buildings_sold(): print("BUILDING #" + str(building_count)) print(str(building) + "\n") building_count += 1 building_index: int = int(input("Please enter the index of the building you " "want to build (1 - " + str(len(building_shop.get_buildings_sold())) + "): ")) while building_index < 1 or building_index > len(building_shop.get_buildings_sold()): building_index = int(input("Sorry, invalid input! Please enter the index of " "the building you " "want to build (1 - " + str(len(building_shop.get_buildings_sold())) + "): ")) to_build: Building = building_shop.get_buildings_sold()[building_index - 1] if new_game.player_data.build_at_island_tile(chosen_island_index - 1, tile_x, tile_y, to_build): print("You have successfully built " + str(to_build.name) + "!") else: print("Sorry, you cannot build " + str(to_build.name) + "!") else: print("Sorry, you cannot build here!") else: print("Sorry, you cannot build here!") elif sub_action == "REMOVE BUILDING": tile_x: int = int(input("Please enter x-coordinates of the tile to remove building from: ")) tile_y: int = int(input("Please enter y-coordinates of the tile to remove building from: ")) if new_game.player_data.remove_building_from_island_tile(chosen_island_index - 1, tile_x, tile_y): print("You have successfully removed a building!") else: print("You failed to remove a building!") elif action == "PLAY ADVENTURE MODE": # Clearing up the command line window clear() # Asking the player what he/she wants to do allowed_sub_actions: list = ["MAP AREA", "DUNGEON", "BATTLE ARENA"] print("Enter 'MAP AREA' to play in a map area") print("Enter 'DUNGEON' to play in a dungeon.") print("Enter 'BATTLE ARENA' to battle in the battle arena.") sub_action: str = input("What do you want to do? ") while sub_action not in allowed_sub_actions: print("Enter 'MAP AREA' to play in a map area") print("Enter 'DUNGEON' to play in a dungeon.") print("Enter 'BATTLE ARENA' to battle in the battle arena.") sub_action = input("Sorry, invalid input! What do you want to do? ") if sub_action == "MAP AREA": # Clearing up the command line window clear() # Getting a list of map areas for the player to choose from map_areas: list = [battle_area for battle_area in new_game.get_battle_areas() if isinstance(battle_area, MapArea)] # Showing a list of map areas the player can battle in map_area_index: int = 1 # initial value for map_area in map_areas: print("MAP AREA #" + str(map_area_index)) print(str(map_area) + "\n") map_area_index += 1 chosen_map_area_index: int = int(input("Please enter the index of the map area you want " "to battle in (1 - " + str(len(map_areas)) + "): ")) while chosen_map_area_index < 1 or chosen_map_area_index > len(map_areas): chosen_map_area_index = int(input("Sorry, invalid input! Please enter the index of " "the map area you want " "to battle in (1 - " + str(len(map_areas)) + "): ")) chosen_map_area: MapArea = map_areas[chosen_map_area_index - 1] # Displaying a list of levels in the map area which the player can play at level_list: list = chosen_map_area.get_levels() curr_level_index: int = 1 # initial value for level in level_list: print("LEVEL #" + str(curr_level_index)) print(str(level) + "\n") curr_level_index += 1 level_index: int = int(input("Please enter the index of the level you want to " "battle in (1 - " + str(len(level_list)) + "): ")) while level_index < 1 or level_index > len(level_list): level_index = int(input("Sorry, invalid input! Please enter the index of the level you want to " "battle in (1 - " + str(len(level_list)) + "): ")) chosen_level: Level = level_list[level_index - 1] # Start the battle and battle until all stages are cleared curr_stage_number: int = 0 current_stage: Stage = chosen_level.curr_stage(curr_stage_number) while chosen_level.next_stage(curr_stage_number) is not None and \ not new_game.player_data.battle_team.all_died(): # Clearing up the command line window clear() # Show the current stage print("--------------------STAGE #" + str(curr_stage_number + 1) + "--------------------") curr_battle: Battle = Battle(new_game.player_data.battle_team, Team(current_stage.get_enemies_list())) while curr_battle.winner is None: # Printing out the stats of legendary creatures in both teams print("Below are the stats of all legendary creatures in player's team.\n") for legendary_creature in curr_battle.team1.get_legendary_creatures(): print(str(legendary_creature) + "\n") print("Below are the stats of all legendary creatures in enemy's team.\n") for legendary_creature in curr_battle.team2.get_legendary_creatures(): print(str(legendary_creature) + "\n") # Make a legendary creature move curr_battle.get_someone_to_move() assert isinstance(curr_battle.whose_turn, LegendaryCreature), "Cannot proceed with battle!" if not curr_battle.whose_turn.can_move: # Skip turn curr_battle.whose_turn.have_turn(curr_battle.whose_turn, None, "NORMAL HEAL") # Make another legendary creature move curr_battle.get_someone_to_move() assert isinstance(curr_battle.whose_turn, LegendaryCreature), \ "Cannot proceed with battle!" # Checking which legendary creature moves if curr_battle.whose_turn in curr_battle.team1.get_legendary_creatures(): moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn # Asking the player what he/she wants to do print("Enter 'NORMAL ATTACK' for normal attack.") print("Enter 'NORMAL HEAL' for normal heal.") print("Enter anything else to use a skill (only applicable if you have usable skills).") usable_skills: list = [skill for skill in curr_battle.whose_turn.get_skills() if curr_battle.whose_turn.curr_magic_points >= skill.magic_points_cost and isinstance(skill, ActiveSkill)] possible_actions: list = ["NORMAL ATTACK", "NORMAL HEAL"] trainer_battle_action: str = input("What do you want to do? ") while len(usable_skills) == 0 and trainer_battle_action not in possible_actions: print("Enter 'NORMAL ATTACK' for normal attack.") print("Enter 'NORMAL HEAL' for normal heal.") trainer_battle_action = input("Sorry, invalid input! What do you want to do? ") if trainer_battle_action not in possible_actions: # Use skill trainer_battle_action = "USE SKILL" # Show a list of skills the player can use print("Below is a list of skills you can use.\n") curr_skill_index: int = 1 # initial value for skill in usable_skills: print("SKILL #" + str(curr_skill_index)) print(str(skill) + "\n") curr_skill_index += 1 skill_index: int = int(input("Please enter the index of the skill " "you want to use (1 - " + str(len(usable_skills)) + "): ")) while skill_index < 1 or skill_index > len(usable_skills): skill_index = int(input("Sorry, invalid input! Please enter the " "index of the skill " "you want to use (1 - " + str(len(usable_skills)) + "): ")) skill_to_use: ActiveSkill = usable_skills[skill_index - 1] if skill_to_use.active_skill_type == "ATTACK": # Asking the user to select a target print("Below is a list of enemies you can attack.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, skill_to_use, trainer_battle_action) if random.random() < chosen_enemy_target.counterattack_chance + \ chosen_enemy_target.counterattack_chance_up: chosen_enemy_target.counterattack(curr_battle.whose_turn) elif skill_to_use.active_skill_type == "HEAL": # Asking the user to select who to heal print("Below is a list of allies you can heal.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, skill_to_use, trainer_battle_action) elif skill_to_use.active_skill_type == "ALLIES EFFECT": # Asking the user to select who to apply ally effect on print("Below is a list of allies you can apply ally effect on.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to apply ally effect on (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to apply ally effect on (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, skill_to_use, trainer_battle_action) elif skill_to_use.active_skill_type == "ENEMIES EFFECT": # Asking the user to select who to apply enemy effect on print("Below is a list of enemies you can apply enemy effect on.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to apply enemy effect on" " (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to apply enemy effect on" " (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, skill_to_use, trainer_battle_action) elif trainer_battle_action == "NORMAL ATTACK": # Asking the user to select a target print("Below is a list of enemies you can attack.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, None, trainer_battle_action) if random.random() < chosen_enemy_target.counterattack_chance + \ chosen_enemy_target.counterattack_chance_up: chosen_enemy_target.counterattack(curr_battle.whose_turn) elif trainer_battle_action == "NORMAL HEAL": # Asking the user to select who to heal print("Below is a list of allies you can heal.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, None, trainer_battle_action) else: pass # Checking the case where the moving legendary creature gets an extra turn if random.random() < moving_legendary_creature.extra_turn_chance + \ moving_legendary_creature.extra_turn_chance_up and \ moving_legendary_creature.can_move: curr_battle.whose_turn = moving_legendary_creature # Recovering magic points curr_battle.whose_turn.recover_magic_points() else: curr_battle.get_someone_to_move() elif curr_battle.whose_turn in curr_battle.team2.get_legendary_creatures(): curr_moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn chance: float = random.random() trainer_battle_action: str = "NORMAL ATTACK" if chance <= 1 / 3 else \ "NORMAL HEAL" if 1 / 3 < chance <= 2 / 3 else "USE SKILL" usable_skills: list = [skill for skill in curr_battle.whose_turn.get_skills() if curr_battle.whose_turn.curr_magic_points >= skill.magic_points_cost and isinstance(skill, ActiveSkill)] # If there are no usable skills and 'trainer_battle_action' is set to "USE SKILL", # change the value of 'trainer_battle_action' if len(usable_skills) == 0: trainer_battle_action = "NORMAL ATTACK" if random.random() < 0.5 else "NORMAL HEAL" if trainer_battle_action == "NORMAL ATTACK": # A normal attack occurs moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn target: LegendaryCreature = curr_battle.team1.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team1.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, None, trainer_battle_action) if random.random() < target.counterattack_chance + \ target.counterattack_chance_up: target.counterattack(moving_legendary_creature) elif trainer_battle_action == "NORMAL HEAL": # A normal heal occurs moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn target: LegendaryCreature = curr_battle.team2.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team2.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, None, trainer_battle_action) elif trainer_battle_action == "USE SKILL": # A skill is used moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn skill_to_use: ActiveSkill = usable_skills[random.randint(0, len(usable_skills) - 1)] if skill_to_use.active_skill_type == "ATTACK" or \ skill_to_use.active_skill_type == "ENEMIES EFFECT": target: LegendaryCreature = curr_battle.team1.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team1.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, skill_to_use, trainer_battle_action) if skill_to_use.active_skill_type == "ATTACK": if random.random() < target.counterattack_chance + \ target.counterattack_chance_up: target.counterattack(moving_legendary_creature) else: target: LegendaryCreature = curr_battle.team2.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team2.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, skill_to_use, trainer_battle_action) else: pass # Checking the case where the moving legendary creature gets an extra turn if random.random() < curr_moving_legendary_creature.extra_turn_chance + \ curr_moving_legendary_creature.extra_turn_chance_up and \ curr_moving_legendary_creature.can_move: curr_battle.whose_turn = curr_moving_legendary_creature # Recovering magic points curr_battle.whose_turn.recover_magic_points() else: curr_battle.get_someone_to_move() # Recovering magic points curr_battle.whose_turn.recover_magic_points() if curr_battle.winner == curr_battle.team1: print("Congratulations! You won the battle!") new_game.player_data.claim_reward(curr_battle.reward) current_stage.is_cleared = True # Checking whether the next stage is None or not. If yes, the player has cleared the level if chosen_level.next_stage(curr_stage_number) is None: new_game.player_data.claim_reward(chosen_level.clear_reward) chosen_level.is_cleared = True if chosen_map_area.mode != "EASY": chosen_level.strengthen_enemies() chosen_level.times_beaten += 1 else: # Move on to the next stage current_stage = chosen_level.next_stage(curr_stage_number) curr_stage_number += 1 elif curr_battle.winner == curr_battle.team2: print("You lost the battle! Please come back stronger!") # Restore all legendary creatures curr_battle.team1.recover_all() curr_battle.team2.recover_all() elif sub_action == "DUNGEON": # Clearing up the command line window clear() # Getting a list of dungeons for the player to choose from dungeons: list = [battle_area for battle_area in new_game.get_battle_areas() if isinstance(battle_arena, Dungeon)] # Showing a list of dungeons the player can battle in dungeon_index: int = 1 # initial value for dungeon in dungeons: print("DUNGEON #" + str(dungeon_index)) print(str(dungeon) + "\n") dungeon_index += 1 chosen_dungeon_index: int = int(input("Please enter the index of the dungeon you want " "to battle in (1 - " + str(len(dungeons)) + "): ")) while chosen_dungeon_index < 1 or chosen_dungeon_index > len(dungeons): chosen_dungeon_index = int(input("Sorry, invalid input! Please enter the index of " "the dungeon you want " "to battle in (1 - " + str(len(dungeons)) + "): ")) chosen_dungeon: Dungeon = dungeons[chosen_dungeon_index - 1] # Displaying a list of levels in the dungeon which the player can play at level_list: list = chosen_dungeon.get_levels() curr_level_index: int = 1 # initial value for level in level_list: print("LEVEL #" + str(curr_level_index)) print(str(level) + "\n") curr_level_index += 1 level_index: int = int(input("Please enter the index of the level you want to " "battle in (1 - " + str(len(level_list)) + "): ")) while level_index < 1 or level_index > len(level_list): level_index = int(input("Sorry, invalid input! Please enter the index of the level you want to " "battle in (1 - " + str(len(level_list)) + "): ")) chosen_level: Level = level_list[level_index - 1] # Start the battle and battle until all stages are cleared curr_stage_number: int = 0 current_stage: Stage = chosen_level.curr_stage(curr_stage_number) while chosen_level.next_stage(curr_stage_number) is not None and \ not new_game.player_data.battle_team.all_died(): # Clearing up the command line window clear() # Show the current stage print("--------------------STAGE #" + str(curr_stage_number + 1) + "--------------------") curr_battle: Battle = Battle(new_game.player_data.battle_team, Team(current_stage.get_enemies_list())) while curr_battle.winner is None: # Printing out the stats of legendary creatures in both teams print("Below are the stats of all legendary creatures in player's team.\n") for legendary_creature in curr_battle.team1.get_legendary_creatures(): print(str(legendary_creature) + "\n") print("Below are the stats of all legendary creatures in enemy's team.\n") for legendary_creature in curr_battle.team2.get_legendary_creatures(): print(str(legendary_creature) + "\n") # Make a legendary creature move curr_battle.get_someone_to_move() assert isinstance(curr_battle.whose_turn, LegendaryCreature), "Cannot proceed with battle!" if not curr_battle.whose_turn.can_move: # Skip turn curr_battle.whose_turn.have_turn(curr_battle.whose_turn, None, "NORMAL HEAL") # Make another legendary creature move curr_battle.get_someone_to_move() assert isinstance(curr_battle.whose_turn, LegendaryCreature), \ "Cannot proceed with battle!" # Checking which legendary creature moves if curr_battle.whose_turn in curr_battle.team1.get_legendary_creatures(): moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn # Asking the player what he/she wants to do print("Enter 'NORMAL ATTACK' for normal attack.") print("Enter 'NORMAL HEAL' for normal heal.") print("Enter anything else to use a skill (only applicable if you have usable skills).") usable_skills: list = [skill for skill in curr_battle.whose_turn.get_skills() if curr_battle.whose_turn.curr_magic_points >= skill.magic_points_cost and isinstance(skill, ActiveSkill)] possible_actions: list = ["NORMAL ATTACK", "NORMAL HEAL"] trainer_battle_action: str = input("What do you want to do? ") while len(usable_skills) == 0 and trainer_battle_action not in possible_actions: print("Enter 'NORMAL ATTACK' for normal attack.") print("Enter 'NORMAL HEAL' for normal heal.") trainer_battle_action = input("Sorry, invalid input! What do you want to do? ") if trainer_battle_action not in possible_actions: # Use skill trainer_battle_action = "USE SKILL" # Show a list of skills the player can use print("Below is a list of skills you can use.\n") curr_skill_index: int = 1 # initial value for skill in usable_skills: print("SKILL #" + str(curr_skill_index)) print(str(skill) + "\n") curr_skill_index += 1 skill_index: int = int(input("Please enter the index of the skill " "you want to use (1 - " + str(len(usable_skills)) + "): ")) while skill_index < 1 or skill_index > len(usable_skills): skill_index = int(input("Sorry, invalid input! Please enter the " "index of the skill " "you want to use (1 - " + str(len(usable_skills)) + "): ")) skill_to_use: ActiveSkill = usable_skills[skill_index - 1] if skill_to_use.active_skill_type == "ATTACK": # Asking the user to select a target print("Below is a list of enemies you can attack.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, skill_to_use, trainer_battle_action) if random.random() < chosen_enemy_target.counterattack_chance + \ chosen_enemy_target.counterattack_chance_up: chosen_enemy_target.counterattack(curr_battle.whose_turn) elif skill_to_use.active_skill_type == "HEAL": # Asking the user to select who to heal print("Below is a list of allies you can heal.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, skill_to_use, trainer_battle_action) elif skill_to_use.active_skill_type == "ALLIES EFFECT": # Asking the user to select who to apply ally effect on print("Below is a list of allies you can apply ally effect on.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to apply ally effect on (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to apply ally effect on (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, skill_to_use, trainer_battle_action) elif skill_to_use.active_skill_type == "ENEMIES EFFECT": # Asking the user to select who to apply enemy effect on print("Below is a list of enemies you can apply enemy effect on.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to apply enemy effect on" " (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to apply enemy effect on" " (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, skill_to_use, trainer_battle_action) elif trainer_battle_action == "NORMAL ATTACK": # Asking the user to select a target print("Below is a list of enemies you can attack.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, None, trainer_battle_action) if random.random() < chosen_enemy_target.counterattack_chance + \ chosen_enemy_target.counterattack_chance_up: chosen_enemy_target.counterattack(curr_battle.whose_turn) elif trainer_battle_action == "NORMAL HEAL": # Asking the user to select who to heal print("Below is a list of allies you can heal.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, None, trainer_battle_action) else: pass # Checking the case where the moving legendary creature gets an extra turn if random.random() < moving_legendary_creature.extra_turn_chance + \ moving_legendary_creature.extra_turn_chance_up and \ moving_legendary_creature.can_move: curr_battle.whose_turn = moving_legendary_creature # Recovering magic points curr_battle.whose_turn.recover_magic_points() else: curr_battle.get_someone_to_move() elif curr_battle.whose_turn in curr_battle.team2.get_legendary_creatures(): curr_moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn chance: float = random.random() trainer_battle_action: str = "NORMAL ATTACK" if chance <= 1 / 3 else \ "NORMAL HEAL" if 1 / 3 < chance <= 2 / 3 else "USE SKILL" usable_skills: list = [skill for skill in curr_battle.whose_turn.get_skills() if curr_battle.whose_turn.curr_magic_points >= skill.magic_points_cost and isinstance(skill, ActiveSkill)] # If there are no usable skills and 'trainer_battle_action' is set to "USE SKILL", # change the value of 'trainer_battle_action' if len(usable_skills) == 0: trainer_battle_action = "NORMAL ATTACK" if random.random() < 0.5 else "NORMAL HEAL" if trainer_battle_action == "NORMAL ATTACK": # A normal attack occurs moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn target: LegendaryCreature = curr_battle.team1.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team1.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, None, trainer_battle_action) if random.random() < target.counterattack_chance + \ target.counterattack_chance_up: target.counterattack(moving_legendary_creature) elif trainer_battle_action == "NORMAL HEAL": # A normal heal occurs moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn target: LegendaryCreature = curr_battle.team2.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team2.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, None, trainer_battle_action) elif trainer_battle_action == "USE SKILL": # A skill is used moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn skill_to_use: ActiveSkill = usable_skills[random.randint(0, len(usable_skills) - 1)] if skill_to_use.active_skill_type == "ATTACK" or \ skill_to_use.active_skill_type == "ENEMIES EFFECT": target: LegendaryCreature = curr_battle.team1.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team1.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, skill_to_use, trainer_battle_action) if skill_to_use.active_skill_type == "ATTACK": if random.random() < target.counterattack_chance + \ target.counterattack_chance_up: target.counterattack(moving_legendary_creature) else: target: LegendaryCreature = curr_battle.team2.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team2.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, skill_to_use, trainer_battle_action) else: pass # Checking the case where the moving legendary creature gets an extra turn if random.random() < curr_moving_legendary_creature.extra_turn_chance + \ curr_moving_legendary_creature.extra_turn_chance_up and \ curr_moving_legendary_creature.can_move: curr_battle.whose_turn = curr_moving_legendary_creature # Recovering magic points curr_battle.whose_turn.recover_magic_points() else: curr_battle.get_someone_to_move() # Recovering magic points curr_battle.whose_turn.recover_magic_points() if curr_battle.winner == curr_battle.team1: print("Congratulations! You won the battle!") new_game.player_data.claim_reward(curr_battle.reward) current_stage.is_cleared = True # Checking whether the next stage is None or not. If yes, the player has cleared the level if chosen_level.next_stage(curr_stage_number) is None: new_game.player_data.claim_reward(chosen_level.clear_reward) chosen_level.is_cleared = True chosen_level.times_beaten += 1 else: # Move on to the next stage current_stage = chosen_level.next_stage(curr_stage_number) curr_stage_number += 1 elif curr_battle.winner == curr_battle.team2: print("You lost the battle! Please come back stronger!") # Restore all legendary creatures curr_battle.team1.recover_all() curr_battle.team2.recover_all() elif sub_action == "BATTLE ARENA": # Clearing up the command line window clear() # Showing a list of CPU players the player can battle against in the arena. cpu_index: int = 1 for opponent in new_game.battle_arena.get_potential_opponents(): print("OPPONENT NUMBER #" + str(cpu_index)) print(str(opponent) + "\n") cpu_index += 1 chosen_cpu_index: int = int(input("Please enter the index of the CPU player you " "want to attack (1 - " + str(len(new_game.battle_arena. get_potential_opponents())) + "): ")) while chosen_cpu_index < 1 or chosen_cpu_index > len(new_game.battle_arena. get_potential_opponents()): chosen_cpu_index = int(input("Sorry, invalid input! Please enter the index of " "the CPU player you " "want to attack (1 - " + str(len(new_game.battle_arena. get_potential_opponents())) + "): ")) chosen_cpu: Player = new_game.battle_arena.get_potential_opponents()[chosen_cpu_index - 1] # Start the battle and battle until there is a winner # Clearing up the command line window clear() print("--------------------" + str(new_game.player_data.name) + " VS. " + str(chosen_cpu.name) + "--------------------") curr_battle: Battle = Battle(new_game.player_data.battle_team, chosen_cpu.battle_team) while curr_battle.winner is None: # Printing out the stats of legendary creatures in both teams print("Below are the stats of all legendary creatures in player's team.\n") for legendary_creature in curr_battle.team1.get_legendary_creatures(): print(str(legendary_creature) + "\n") print("Below are the stats of all legendary creatures in enemy's team.\n") for legendary_creature in curr_battle.team2.get_legendary_creatures(): print(str(legendary_creature) + "\n") # Make a legendary creature move curr_battle.get_someone_to_move() assert isinstance(curr_battle.whose_turn, LegendaryCreature), "Cannot proceed with battle!" if not curr_battle.whose_turn.can_move: # Skip turn curr_battle.whose_turn.have_turn(curr_battle.whose_turn, None, "NORMAL HEAL") # Make another legendary creature move curr_battle.get_someone_to_move() assert isinstance(curr_battle.whose_turn, LegendaryCreature), \ "Cannot proceed with battle!" # Checking which legendary creature moves if curr_battle.whose_turn in curr_battle.team1.get_legendary_creatures(): moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn # Asking the player what he/she wants to do print("Enter 'NORMAL ATTACK' for normal attack.") print("Enter 'NORMAL HEAL' for normal heal.") print("Enter anything else to use a skill (only applicable if you have usable skills).") usable_skills: list = [skill for skill in curr_battle.whose_turn.get_skills() if curr_battle.whose_turn.curr_magic_points >= skill.magic_points_cost and isinstance(skill, ActiveSkill)] possible_actions: list = ["NORMAL ATTACK", "NORMAL HEAL"] trainer_battle_action: str = input("What do you want to do? ") while len(usable_skills) == 0 and trainer_battle_action not in possible_actions: print("Enter 'NORMAL ATTACK' for normal attack.") print("Enter 'NORMAL HEAL' for normal heal.") trainer_battle_action = input("Sorry, invalid input! What do you want to do? ") if trainer_battle_action not in possible_actions: # Use skill trainer_battle_action = "USE SKILL" # Show a list of skills the player can use print("Below is a list of skills you can use.\n") curr_skill_index: int = 1 # initial value for skill in usable_skills: print("SKILL #" + str(curr_skill_index)) print(str(skill) + "\n") curr_skill_index += 1 skill_index: int = int(input("Please enter the index of the skill " "you want to use (1 - " + str(len(usable_skills)) + "): ")) while skill_index < 1 or skill_index > len(usable_skills): skill_index = int(input("Sorry, invalid input! Please enter the " "index of the skill " "you want to use (1 - " + str(len(usable_skills)) + "): ")) skill_to_use: ActiveSkill = usable_skills[skill_index - 1] if skill_to_use.active_skill_type == "ATTACK": # Asking the user to select a target print("Below is a list of enemies you can attack.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, skill_to_use, trainer_battle_action) if random.random() < chosen_enemy_target.counterattack_chance + \ chosen_enemy_target.counterattack_chance_up: chosen_enemy_target.counterattack(curr_battle.whose_turn) elif skill_to_use.active_skill_type == "HEAL": # Asking the user to select who to heal print("Below is a list of allies you can heal.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, skill_to_use, trainer_battle_action) elif skill_to_use.active_skill_type == "ALLIES EFFECT": # Asking the user to select who to apply ally effect on print("Below is a list of allies you can apply ally effect on.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to apply ally effect on (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to apply ally effect on (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, skill_to_use, trainer_battle_action) elif skill_to_use.active_skill_type == "ENEMIES EFFECT": # Asking the user to select who to apply enemy effect on print("Below is a list of enemies you can apply enemy effect on.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to apply enemy effect on" " (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to apply enemy effect on" " (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, skill_to_use, trainer_battle_action) elif trainer_battle_action == "NORMAL ATTACK": # Asking the user to select a target print("Below is a list of enemies you can attack.") enemy_index: int = 1 # initial value for enemy in curr_battle.team2.get_legendary_creatures(): print("ENEMY #" + str(enemy_index)) print(str(enemy) + "\n") enemy_index += 1 chosen_enemy_index: int = int(input("Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) while chosen_enemy_index < 1 or chosen_enemy_index > len(curr_battle. team2.get_legendary_creatures()): chosen_enemy_index = int(input("Sorry, invalid input! " "Please enter the index of the " "enemy you want to attack (1 - " + str(len(curr_battle. team2.get_legendary_creatures())) + "): ")) chosen_enemy_target: LegendaryCreature = curr_battle.team2. \ get_legendary_creatures()[chosen_enemy_index - 1] curr_battle.whose_turn.have_turn(chosen_enemy_target, None, trainer_battle_action) if random.random() < chosen_enemy_target.counterattack_chance + \ chosen_enemy_target.counterattack_chance_up: chosen_enemy_target.counterattack(curr_battle.whose_turn) elif trainer_battle_action == "NORMAL HEAL": # Asking the user to select who to heal print("Below is a list of allies you can heal.") ally_index: int = 1 # initial value for ally in curr_battle.team1.get_legendary_creatures(): print("ALLY #" + str(ally_index)) print(str(ally) + "\n") ally_index += 1 chosen_ally_index: int = int(input("Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) while chosen_ally_index < 1 or chosen_ally_index > len(curr_battle. team1.get_legendary_creatures()): chosen_ally_index = int(input("Sorry, invalid input! " "Please enter the index of the " "ally you want to heal (1 - " + str(len(curr_battle. team1.get_legendary_creatures())) + "): ")) chosen_ally_target: LegendaryCreature = curr_battle.team1. \ get_legendary_creatures()[chosen_ally_index - 1] curr_battle.whose_turn.have_turn(chosen_ally_target, None, trainer_battle_action) else: pass # Checking the case where the moving legendary creature gets an extra turn if random.random() < moving_legendary_creature.extra_turn_chance + \ moving_legendary_creature.extra_turn_chance_up and \ moving_legendary_creature.can_move: curr_battle.whose_turn = moving_legendary_creature # Recovering magic points curr_battle.whose_turn.recover_magic_points() else: curr_battle.get_someone_to_move() elif curr_battle.whose_turn in curr_battle.team2.get_legendary_creatures(): curr_moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn chance: float = random.random() trainer_battle_action: str = "NORMAL ATTACK" if chance <= 1 / 3 else \ "NORMAL HEAL" if 1 / 3 < chance <= 2 / 3 else "USE SKILL" usable_skills: list = [skill for skill in curr_battle.whose_turn.get_skills() if curr_battle.whose_turn.curr_magic_points >= skill.magic_points_cost and isinstance(skill, ActiveSkill)] # If there are no usable skills and 'trainer_battle_action' is set to "USE SKILL", # change the value of 'trainer_battle_action' if len(usable_skills) == 0: trainer_battle_action = "NORMAL ATTACK" if random.random() < 0.5 else "NORMAL HEAL" if trainer_battle_action == "NORMAL ATTACK": # A normal attack occurs moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn target: LegendaryCreature = curr_battle.team1.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team1.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, None, trainer_battle_action) if random.random() < target.counterattack_chance + \ target.counterattack_chance_up: target.counterattack(moving_legendary_creature) elif trainer_battle_action == "NORMAL HEAL": # A normal heal occurs moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn target: LegendaryCreature = curr_battle.team2.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team2.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, None, trainer_battle_action) elif trainer_battle_action == "USE SKILL": # A skill is used moving_legendary_creature: LegendaryCreature = curr_battle.whose_turn skill_to_use: ActiveSkill = usable_skills[random.randint(0, len(usable_skills) - 1)] if skill_to_use.active_skill_type == "ATTACK" or \ skill_to_use.active_skill_type == "ENEMIES EFFECT": target: LegendaryCreature = curr_battle.team1.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team1.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, skill_to_use, trainer_battle_action) if skill_to_use.active_skill_type == "ATTACK": if random.random() < target.counterattack_chance + \ target.counterattack_chance_up: target.counterattack(moving_legendary_creature) else: target: LegendaryCreature = curr_battle.team2.get_legendary_creatures() \ [random.randint(0, len(curr_battle.team2.get_legendary_creatures()) - 1)] moving_legendary_creature.have_turn(target, skill_to_use, trainer_battle_action) else: pass # Checking the case where the moving legendary creature gets an extra turn if random.random() < curr_moving_legendary_creature.extra_turn_chance + \ curr_moving_legendary_creature.extra_turn_chance_up and \ curr_moving_legendary_creature.can_move: curr_battle.whose_turn = curr_moving_legendary_creature # Recovering magic points curr_battle.whose_turn.recover_magic_points() else: curr_battle.get_someone_to_move() # Recovering magic points curr_battle.whose_turn.recover_magic_points() if curr_battle.winner == curr_battle.team1: print("Congratulations! You won the battle!") new_game.player_data.claim_reward(curr_battle.reward) new_game.player_data.arena_wins += 1 chosen_cpu.arena_losses += 1 if new_game.player_data.arena_points > chosen_cpu.arena_points: new_game.player_data.arena_points += 5 chosen_cpu.arena_points -= 2 else: new_game.player_data.arena_points += 10 chosen_cpu.arena_points -= 5 elif curr_battle.winner == curr_battle.team2: print("You lost the battle! Please come back stronger!") new_game.player_data.arena_losses += 1 chosen_cpu.arena_wins += 1 if new_game.player_data.arena_points > chosen_cpu.arena_points: new_game.player_data.arena_points -= 5 chosen_cpu.arena_points += 10 else: new_game.player_data.arena_points -= 2 chosen_cpu.arena_points += 5 # Restore all legendary creatures curr_battle.team1.recover_all() curr_battle.team2.recover_all() else: pass else: pass print("Enter 'Y' for yes.") print("Enter anything else for no.") continue_playing: str = input("Do you want to continue playing 'Ancient Invasion'? ") # Saving game data and quitting the game. save_game_data(new_game, file_name) return 0 if __name__ == '__main__': main()
ANCIENT-INVASION
/ANCIENT_INVASION-1.tar.gz/ANCIENT_INVASION-1/ANCIENT_INVASION/ancient_invasion.py
ancient_invasion.py
.. -*- mode: rst -*- |Travis| |Wheel| |GithubRepo| |GithubRelease| |PyPiPackage| |License| |Maintenance| |PyPiStatus| .. |Travis| image:: https://travis-ci.com/m1ghtfr3e/ANF-Feed.svg?branch=main :target: https://travis-ci.com/m1ghtfr3e/ANF-Feed .. |License| image:: https://img.shields.io/github/license/m1ghtfr3e/ANF-Feed?style=plastic :alt: License .. |Wheel| image:: https://img.shields.io/pypi/wheel/ANF-Feed?style=plastic :alt: PyPI - Wheel .. |GithubRepo| image:: https://img.shields.io/github/repo-size/m1ghtfr3e/ANF-Feed?style=plastic :alt: GitHub repo size .. |Maintenance| image:: https://img.shields.io/maintenance/yes/2021?style=plastic :alt: Maintenance .. |PyPiStatus| image:: https://img.shields.io/pypi/status/ANF-Feed?style=plastic :alt: PyPI - Status .. |GithubRelease| image:: https://img.shields.io/github/v/release/m1ghtfr3e/ANF-Feed?color=purple&include_prereleases&style=plastic :alt: GitHub release (latest by date including pre-releases) .. |PyPiPackage| image:: https://badge.fury.io/py/ANF-Feed.svg :target: https://badge.fury.io/py/ANF-Feed ======== ANF Feed ======== This is an Application to read RSS Feeds from `ANFNews <https://anfenglishmobile.com>`__ Currently following languages are supported: - English (default) - German - Kurmanjî - Spanish - Arab *Languages can be changed during usage in the Menu Bar (left upper corner of the window)* Installation ------------ - **Via PyPi** The easiest installation would be over PyPi, via ``pip`` which is unfortunately not available right now, but very soon:: $ pip install ANF-Feed - **Cloning this Repo** You can also install it with cloning this repository:: $ git clone https://github.com/m1ghtfr3e/ANF-Feed.git or via Github CLI: $ gh repo clone m1ghtfr3e/ANF-Feed $ cd ANF-Feed $ pip install -r requirements.txt Optionally you can pip install it locally:: $ pip install . Usage ----- **After installation you have two options to start**: - Calling the __main__ of the package:: $ python3 -m anfrss or:: $ python -m anfrss - Or using the entry point. In this case you can just enter:: $ anfrss **There is also a Dark Mode which can be used**:: $ python -m anfrss dark or: $ anfrss dark Issues / Bugs / Problems ------------------------ **Open an Issue preferably on the** `Issue Tracker of the GitHub Repository`_. .. _Issue Tracker of the GitHub Repository: https://github.com/m1ghtfr3e/ANF-Feed/issues Meta ---- :Authors: m1ghtfr3e :Version: 0.0.2
ANF-Feed
/ANF-Feed-0.0.2.3.tar.gz/ANF-Feed-0.0.2.3/README.rst
README.rst
import sys from pathlib import Path from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QWidget, QListWidget, QVBoxLayout, QLabel, QTextEdit, QSplitter, QMenuBar, QMessageBox, ) from PyQt5.QtGui import QIcon, QFont from PyQt5.QtCore import Qt, pyqtSignal try: import qdarkstyle except ImportError: print('qdarkstyle not installed! "pip install qdarkstyle"') pass try: from ..parser.anffeed import ANFFeed except ImportError: from ..parser.anffeed import ANFFeed # Get the Parent of the current directory # to set the Icon. # DIR = Path(__file__).parents[1] # FEEDS = ANFFeed() class ArticleWidget(QWidget): ''' Article Widget ============== This widget is holding a :class: QTextEdit as read-only, so there is no edit enabled for the User. ''' def __init__(self, *args) -> None: super().__init__(*args) self.setGeometry(0, 0, 400, 600) self.initUi() def initUi(self) -> None: ''' Defines UI of the :class: ArticleWidget The Layout is a :class: QVBoxLayout There is a :class: QLabel over the Text Box Central Widget of this class is the :class: QTextEdit - Read-onldy so user can not change or delete text by acci- dent - Font is set to: Times, and size 12 Text to the QTextEdit is added in the :class: ANFApp: It catches the signal if a title is clicked and appends the: - Summary of the content - Link of the article - The article (just text, no pictures etc.) ''' self.hbox = QVBoxLayout(self) self.setLayout(self.hbox) self.label = QLabel('Your chosen Feed (Summary, Link and Article):') self.hbox.addWidget(self.label) self.text = QTextEdit() self.text.setReadOnly(True) font = QFont('Times', 12) self.text.setFont(font) self.text.setPlaceholderText('Click on a title to read the article') self.hbox.addWidget(self.text) class TitleWidget(QWidget): ''' Title Widget ============ This widget is presenting the Feed titles of the :class: ANFFeed ; It is also containing a :class: pyqtSignal on double click which will be responsible to present the linked feed in the :class: ArticleWidget ''' TitleClicked = pyqtSignal([list]) def __init__(self, *args) -> None: super().__init__(*args) self.setGeometry(0, 0, 350, 600) self.initUi() def initUi(self) -> None: ''' Defines UI of the :class: TitleWidget The structure of this Widget: The Layout is a :class: QVBoxLayout :class: QLabel :class: QListWidget ''' self.hbox = QVBoxLayout() self.setLayout(self.hbox) self.label = QLabel('Titles of available Feeds:') self.hbox.addWidget(self.label) self.titleList = QListWidget() self.titleList.itemPressed.connect(self.onClicked) self.newsFeed() def newsFeed(self, language: str = None) -> None: ''' Set ANF Feeds ============= This method is interacting with the :class: ANFFeed It is getting the RSS Feeds and is representing the Titles of each Feed. Furthermore, it is changing the language if the User is interacting with the "Language" option of the Menu. -> See more in the :class: ANFApp :param language: The language to be set (The ANFFeed is setting to English by default) Default here is None, so it is able to track if a language was chosen by the User or not :type language: str, optional ''' self.news = ANFFeed() if language: self.news.set_language(language) for item in self.news.all_feeds: self.titleList.addItem(item[0]) self.titleList.addItem('') font = QFont('Times') font.setBold(True) self.titleList.setFont(font) self.hbox.addWidget(self.titleList) def onClicked(self, item) -> None: ''' Emit Content ============ This method will be called on double click on one of the titles. Depending on the Title clicked on, it gets the Summary, Link and the article's text. After the pyqtSignal TitleClicked is emitting the content. :param item: Item contained by the article clicked on :type item: PyQt Obj ''' feeds = self.news.all_feeds id = 0 for elem in range(len(feeds)): if feeds[elem][0] == item.text(): id = elem self.id = id summary = feeds[id][1] + '\n\n' link = feeds[id][2] detailed = feeds[id][3] self.TitleClicked.emit([summary, link, detailed]) class ANFApp(QMainWindow): ''' Main Window =========== All other Widgets and Elements are organized. Referring objets: - :class: TitleWidget - :class: ArticleWidget General Layout: - QStatusBar - QSplitter() - TitleWidget - ArticleWidget - QMenuBar - QPushButton (Exit) ''' def __init__(self, *args) -> None: super().__init__(*args) self.setWindowState(Qt.WindowMaximized) self.setWindowIcon(QIcon(f'{DIR}/assets/anf.png')) self.setAutoFillBackground(True) self.setWindowTitle('ANF RSS Reader') self.statusBar() self.anfInit() self.show() def anfInit(self) -> None: ''' Defines UI of the :class: ANFApp (Main Window) Both, the Article and the Title Widget are organized inside :class: QSplitter Moreover there is: :class: QMenuBar :class: QPushButton (Exit Button) ''' self.central_widget = QSplitter() self.title_widget = TitleWidget() self.article_widget = ArticleWidget() self.setCentralWidget(self.central_widget) # Define Menu Bar # Main Options: # - Edit # - Settings # - Download # - Help # - Language # self.menu_bar = QMenuBar() self.actionEdit = self.menu_bar.addMenu('Edit') self.actionEdit.addAction('Size +') self.actionEdit.addAction('Size -') self.actionEdit.addSeparator() self.actionEdit.addAction('Settings') self.actionDownload = self.menu_bar.addMenu('Download') self.actionDownload.addAction('Download as Text file.') self.actionDownload.triggered.connect(self.download_article) self.actionHelp = self.menu_bar.addMenu('Help') # Set / Change Language # The String of the Language names needs to be the # same as the "set_language"- method in ANFFeed # is expecting it as parameter # self.actionLang = self.menu_bar.addMenu('Language') self.actionLang.addAction('german') self.actionLang.addAction('english') self.actionLang.addAction('kurmanjî') self.actionLang.addAction('spanish') self.actionLang.addAction('arab') self.actionLang.hovered.connect(self.languageAction) self.central_widget.addWidget(self.menu_bar) self.central_widget.addWidget(self.title_widget) self.central_widget.addWidget(self.article_widget) self.exitBtn = QPushButton(self) self.exitBtn.setGeometry(50, 600, 100, 55) self.exitBtn.setText('Exit') self.exitBtn.setStyleSheet("background-color: red") self.exitBtn.setStatusTip('Exit the Application') self.exitBtn.clicked.connect(self.exit) # Catch Slot Signal from the TitleWidget self.title_widget.TitleClicked.connect(self.title_click) self.show() def languageAction(self, lang) -> None: ''' Change Language =============== Changing the Language of the Feeds if Menu Option is hovered. :param lang: The Language Text given by Menu Option :type lang: PyQt obj ''' self.title_widget.titleList.clear() self.title_widget.newsFeed(lang.text()) self.title_widget.update() def download_article(self) -> None: ''' Download Article ================ ''' download_dialog = QMessageBox(self) download_dialog.setGeometry(0, 0, 800, 700) download_dialog.exec_() ... def title_click(self, feed: list) -> None: ''' Signal Catcher ============== Catches the Slot Signal of the :class: TitleWidget and sets the Text for the :class: ArticleWidget; :param feed: The Signal in the TitleWidget emits a list with the contents; :type feed: list ''' # Title = feed[0] # Link = feed[1] # Detailed = feed[2] # Set Title with Italic Font. self.article_widget.text.setFontItalic(True) self.article_widget.text.setText(feed[0]) self.article_widget.text.setFontItalic(False) # Underline & Append Link. self.article_widget.text.setFontUnderline(True) self.article_widget.text.append(feed[1]) self.article_widget.text.setFontUnderline(False) # Append Detailed self.article_widget.text.append('\n\n') self.article_widget.text.append(feed[2]) # Make sure that scroll bar starts at top # because it is set to the widget area # where the last line was appended. self.article_widget.text.scrollToAnchor('https*') def exit(self) -> None: ''' Exit the Application ==================== Called when Exit Button is clicked. ''' self.close() def run(*args) -> None: ''' Run the App =========== Default Style is set to "Breeze" ''' app = QApplication(sys.argv) # Switch into Dark Mode # if it is part of command. # for arg in args: if 'dark' in arg: app.setStyleSheet(qdarkstyle.load_stylesheet()) else: app.setStyle('breeze') window = ANFApp() sys.exit(app.exec_()) if __name__ == '__main__': pass
ANF-Feed
/ANF-Feed-0.0.2.3.tar.gz/ANF-Feed-0.0.2.3/anfrss/gui/guiapp.py
guiapp.py
import re import feedparser # Define available Languages: # The specific link is assigned to. ENGLISH = 'https://anfenglishmobile.com/feed.rss' GERMAN = 'https://anfdeutsch.com/feed.rss' KURMANJI = 'https://anfkurdi.com/feed.rss' SPANISH = 'https://anfespanol.com/feed.rss' ARAB = 'https://anfarabic.com/feed.rss' # RegEx Tag HTML_TAG = re.compile(r'<[^>]+>') # To remove HTML tags later class ANFFeed: ''' ANF Feed Parser This class fetches the news posts from one of the links defined at the more up (depending on the chosen language, default="english"). Parameters ---------- source : str Link to set; Depending on chosen Language. ''' source = ENGLISH def __init__(self) -> None: try: self.feed = feedparser.parse(self.source) except NameError: raise NameError self.entries = self.feed.entries @classmethod def set_language(cls, language: str) -> None: ''' Set language of link ==================== :param language: Language to set :type language: str ''' if language == 'english': cls.source = ENGLISH elif language == 'german': cls.source = GERMAN elif language == 'kurmanjî': cls.source = KURMANJI elif language == 'spanish': cls.source = SPANISH elif language == 'arab': cls.source = ARAB else: # We should not reach this # as the GUI just shows # available options raise NotImplementedError() @property def title(self) -> None: ''' Titles Attribute ================ ''' titles = [] for i in self.entries: titles.append(i.title) return titles @property def summary(self) -> None: ''' Summary Attribute ================= ''' summary = [] for i in self.entries: summary.append(i.summary) return summary @property def detailed(self) -> None: ''' Detailed Attribute ================== ''' detailed = [] for i in self.entries: text = i.content[0]['value'] text = HTML_TAG.sub('', text) # Remove Html Tags detailed.append(text) return detailed @property def link(self) -> None: ''' Links Attribute =============== ''' links = [] for i in self.entries: links.append(i.link) return links @property def all_feeds(self) -> None: ''' All Feeds Attribute =================== ''' return list(zip(self.title, self.summary, self.link, self.detailed)) def download_article(self, ident, target): ''' Download Article =============== Requests a chosen article and writes it to a file :param ident: Identifier; Can be link or title which will identify the article to down- load :type ident: str :param target: Directory to write to :type target: str ''' query = self.entries[ident] result = query.content[0]['value'] result = HTML_TAG.sub('', result) link = self.link[ident] title = self.title[ident] file_name = target + title + '.txt' with open(file_name, 'a') as f: f.write(title) f.write('\n\n') f.write(link) f.write('\n\n') f.write(result) return f'\n{title} written succesfully to {target}.\n' def __repr__(self) -> str: return (f'Spider: {self.__class__.__name__}\n' f'URL: {self.source!r}\n' f'Available articles: {len(self.entries)}\n' ) if __name__ == '__main__': anf = ANFFeed() article = anf.download_article(1, '/home/n0name/')
ANF-Feed
/ANF-Feed-0.0.2.3.tar.gz/ANF-Feed-0.0.2.3/anfrss/parser/anffeed.py
anffeed.py
# ANI-1 dataset support repository This repository contains the scripts needed to access the ANI-1 data set. The structure of this repository has been updated so the tools can be installed easily using pip. This modified repository is a fork of https://github.com/isayev/ANI1_dataset created by [Olexandr Isayev](https://github.com/isayev) ##### If you use ANI-1 dataset please cite the following two papers: Justin S. Smith, Olexandr Isayev, Adrian E. Roitberg. *ANI-1: An extensible neural network potential with DFT accuracy at force field computational cost.* Chemical Science, 2017, DOI: 10.1039/C6SC05720A Justin S. Smith, Olexandr Isayev, Adrian E. Roitberg. *ANI-1, A data set of 20 million calculated off-equilibrium conformations for organic molecules.* Scientific Data, 4, Article number: 170193, DOI: 10.1038/sdata.2017.193 https://www.nature.com/articles/sdata2017193 ### Required software Python3.5 or better Numpy H5PY ### Included extraction software pyanitools.py -Contains a class called "anidataloader" for loading and parsing the ANI-1 data set. example_data_sampler.py -Example of how to sample data from the anidataloader class. ### Installation instructions 1) run `pip install ANI1datatools` ### Description The downloaded file (https://doi.org/10.6084/m9.figshare.c.3846712) can be extracted on a Unix based system with the “tar -xzf ani-1_dataset.tar.gz” command. Once extracted, a folder named “ANI-1_release” is the root directory for all files. The individual data files are separated into 8 HDF5 files (extension .h5) named ani_gdb_s0x.h5 where x is a number between 1 and 8 representing the number of heavy atoms (CNO) in the molecules contained in the file. The README file contains information about the data set and scripts included. The folder named “readers” has a code sample for reading the HDF5 file called “example_data_sampler.py” and “lib/pyanitools.py”, which contains classes for loading and storing data in our in-house format. File format The ANI-1 data set is stored in the HDF5 [http://www.hdfgroup.org/HDF5] file format. Two python classes are included with the data set’s compressed archive in the python file: “ANI-1_release/readers/lib/pyanitools.py”. These classes are only tested for python version 3.5 and greater, and requires the h5py library [http://www.h5py.org/]. An example script for reading the data from the HDF5 files is given in: “ANI-1_release/readers/example_data_sampler.py”. ### Data Units Coordinates: Angstroms Energies: Hartrees ### Self-interaction atomic energies H = -0.500607632585 C = -37.8302333826 N = -54.5680045287 O = -75.0362229210 ## Related work ### Developing the ANI-1x potential with active learning: Justin S. Smith, Ben Nebgen, Nicholas Lubbers, Olexandr Isayev, Adrian E. Roitberg. *Less is more: sampling chemical space with active learning*. arXiv, 2018, DOI: [arXiv:1801.09319] (https://arxiv.org/abs/1801.09319)
ANI1datatools
/ANI1datatools-0.0.3.tar.gz/ANI1datatools-0.0.3/README.md
README.md
# ANIAnimator [![PyPI version](https://badge.fury.io/py/ANIAnimator.svg)](https://badge.fury.io/py/ANIAnimator) [![PyPI pyversions](https://img.shields.io/pypi/pyversions/ANIAnimator.svg)](https://pypi.python.org/pypi/ANIAnimator/) [![Python package](https://github.com/eftalgezer/ANIAnimator/actions/workflows/python-package.yml/badge.svg)](https://github.com/eftalgezer/ANIAnimator/actions/workflows/python-package.yml) [![codecov](https://codecov.io/gh/eftalgezer/ANIAnimator/branch/main/graph/badge.svg?token=Q9TJFIN1U1)](https://codecov.io/gh/eftalgezer/ANIAnimator) [![Codacy Badge](https://app.codacy.com/project/badge/Coverage/b0a14b0216c4451a8743ebb9712eac64)](https://www.codacy.com/gh/eftalgezer/ANIAnimator/dashboard?utm_source=github.com&utm_medium=referral&utm_content=eftalgezer/ANIAnimator&utm_campaign=Badge_Coverage) [![PyPI download month](https://img.shields.io/pypi/dm/ANIAnimator.svg)](https://pypi.python.org/pypi/ANIAnimator/) [![PyPI download week](https://img.shields.io/pypi/dw/ANIAnimator.svg)](https://pypi.python.org/pypi/ANIAnimator/) [![PyPI download day](https://img.shields.io/pypi/dd/ANIAnimator.svg)](https://pypi.python.org/pypi/ANIAnimator/) ![GitHub all releases](https://img.shields.io/github/downloads/eftalgezer/ANIAnimator/total?style=flat) [![GitHub contributors](https://img.shields.io/github/contributors/eftalgezer/ANIAnimator.svg)](https://github.com/eftalgezer/ANIAnimator/graphs/contributors/) [![CodeFactor](https://www.codefactor.io/repository/github/eftalgezer/ANIAnimator/badge)](https://www.codefactor.io/repository/github/eftalgezer/ANIAnimator) [![Codacy Badge](https://app.codacy.com/project/badge/Grade/b0a14b0216c4451a8743ebb9712eac64)](https://www.codacy.com/gh/eftalgezer/ANIAnimator/dashboard?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=eftalgezer/ANIAnimator&amp;utm_campaign=Badge_Grade) [![PyPI license](https://img.shields.io/pypi/l/ANIAnimator.svg)](https://pypi.python.org/pypi/ANIAnimator/) [![DOI](https://zenodo.org/badge/541231117.svg)](https://zenodo.org/badge/latestdoi/541231117) ANIAnimator makes the GIF file from a given chemical ANI file. ## Installation Use the package manager [pip](https://pip.pypa.io/en/stable/) to install ANIAnimator. ```bash $ pip install ANIAnimator # to make sure you have the latest version $ pip install -U ANIAnimator # latest available code base $ pip install -U git+https://github.com/eftalgezer/ANIAnimator.git ``` ## Tutorial - [ANIAnimator v0.0.1 tutorial](https://beyondthearistotelian.blogspot.com/2022/09/anianimator-v001-tutorial.html) - [What is new in ANIAnimator v0.1.0?](https://beyondthearistotelian.blogspot.com/2022/10/what-is-new-in-anianimator-v010.html) ## Usage ### In code #### Simple usage ```python ANIAnimator.animate(anifile="graphene.ANI") ``` #### Advance usage ```python ANIAnimator.animate(anifile="graphene.ANI", width=1920, height=1080) # defaults are 1920 × 1080, respectively ANIAnimator.animate(anifile="graphene.ANI", loop=1) # default is 0; 0 means loop, 1 means no loop ANIAnimator.animate(anifile="graphene.ANI", bonds_param=1.3) # default is 1.3, sets the bonds between atoms ANIAnimator.animate(anifile="graphene.ANI", camera=((40, 0, 0), (0, 0, 0), (0, 1, 0))) # if you need to set the camera ``` ### In terminal #### Simple usage ```sh $ python -m ANIAnimator graphene.ANI ``` #### Advance usage ```sh $ python -m ANIAnimator <ANI file> <width> <height> <loop> <bonds_param> camera=<param> $ python -m ANIAnimator graphene.ANI 1920 1080 1 1.3 $ python -m ANIAnimator graphene.ANI 1920 1080 1 1.3 camera=40,0,0,0,0,0,0,1,0 # if you need to set the camera ``` ### About `bonds_param` parameter ANIAnimator uses [mogli](https://github.com/sciapp/mogli) to create PNG images of the steps in ANI files. The default of mogli package is `1.0`. ANIAnimator default is `1.3` since the experience shows that `1.3` is better. For details, see the [README.md of mogli package](https://github.com/sciapp/mogli/blob/master/README.md). ## Contributing Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. Please make sure to update tests as appropriate. ## Citation If you are using ANIAnimator, please citate relevant version. You can find the relevant citation [here](https://doi.org/10.5281/zenodo.7112024). ```bibtex @software{eftal_gezer_2023_7182193, author = {Eftal Gezer}, title = {eftalgezer/ANIAnimator: v0.2.2}, month = jan, year = 2023, publisher = {Zenodo}, version = {v0.2.2}, doi = {10.5281/zenodo.7577421}, url = {https://doi.org/10.5281/zenodo.7577421} } ``` ## License [GNU General Public License v3.0](https://github.com/eftalgezer/ANIAnimator/blob/master/LICENSE)
ANIAnimator
/ANIAnimator-0.2.2.tar.gz/ANIAnimator-0.2.2/README.md
README.md
import pandas as pd from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt from .graph_show import GraphShow import networkx as nx from operator import itemgetter import warnings import os from os.path import join import sys dic_design_default = {} dic_design_default["color_link"] = "#ee4b7e" dic_design_default["color_text"] = "#680726" dic_design_default["color_label"] = "#680726" dic_design_default["shape"] = 'circle' dic_design_default["mobility"] = True dic_design_default["legend_bool"] = True dic_items_default = {} def _graph_generation(events_list, save_localisation, color_link, color_text, color_label, shape, file_name, mobility, legend, legend_bool, title): """ Creation of the html page using the design Args: events_list(list): a list of 2 nodes, 1 labels and 2 groups to create the graph. save_localisation (string): a string to find the directory to save the html page. color_link(str): the color of the edges color_text(str): the color of the text color_label(str): the color of the label shape(str): the shape of the nodes file_name (string): the name you want to give to the html file. mobility(bool): A bool to decide if the graph move. legend(str): It take the name of the legend. legend_bool(str): A bool to decide if there is a legend or not. title (str): The title of the html page if you want no title use "No" which is the default parameter. Return: Nothing but create the html page. """ graph_shower = GraphShow() graph_shower.create_page(events_list, save_localisation, color_link, color_text, color_label, shape, file_name, mobility, legend, legend_bool, title) def _cut_legend(legend): """ Creation of the new_image by cutting the legend Args: legend(str): It take the name of the legend. """ from PIL import Image # Download Image: im = Image.open(legend) # Check Image Size im_size = im.size # Define box inside image left = (im.size[0]/2) - (im.size[0]*3/8) top = (im.size[0]/2) - (im.size[0]/3) width = (im.size[0]*3/4) height = (im.size[0]/3) # Create Box box = (left, top, left+width, top+height) # Crop Image area = im.crop(box) #area.show() # Save Image area.save(legend, "PNG") def _groups_graph(file_name): """ It load the different groups of the graph Args: file_name(str): It take the name of the file Return: Nothing """ # Loading the file f = open(file_name, "r") # Recuperation of the list of groups for lines in f: if "var nodes" in lines: L_ligne = lines break List_groups = L_ligne List_groups = List_groups[19:] List_groups = List_groups.split("},") Group = [] no_group = 0 #it's use to know if whether there are groups or not new_List_groups = [] for i in List_groups: list_ap = [] list_ap.append(i.split(",")[0].split(":")[1][1:]) list_ap.append(int(i.split(",")[1].split(":")[1][1:])) new_List_groups.append(list_ap) new_List_groups = sorted(new_List_groups, key=itemgetter(1)) for i in new_List_groups: group = i[0] if group[0] == "'": group = group[1:-1] if group != '' and group not in Group: Group.append(group) elif group == '' and no_group == 0: no_group = 1 Group_total = [] if no_group == 1: Group_total.append("No group") for i in Group: Group_total.append(i) return Group_total def _index_generator(file_name): """ It create the index of the graph using the list of nodes and groups Args: file_name(str): It take the name of the file Return: Nothing but create and cut the legend. """ # Recuperation of groups Group = _groups_graph(file_name) if len(Group) > 20: warnings.warn("Too many groups, there's overlay beyond 20") name_graph = file_name.split("/")[-1] # Selection of good colors using the groups or not color = ['#f5f3e1', '#97C2FC', '#FFFF00', '#FB7E81', '#7BE141', '#EB7DF4', "#AD85E4", "#ffe0b2", "#6E6EFD", "#FFC0CB", "#C2FABC", "#bcaaa4", "#9e9d24", "#fd7c32", "#26a69a", "#e91e63", "#ff1744", "#fdd835", "#db883d", "#79fbe7", "#c5e1a5", "#BACA03"] color1 = ['black','#97C2FC', '#FFFF00', '#FB7E81', '#7BE141', '#EB7DF4', "#AD85E4", "#ffe0b2", "#6E6EFD", "#FFC0CB", "#C2FABC", "#bcaaa4", "#9e9d24", "#fd7c32", "#26a69a", "#e91e63", "#ff1744", "#fdd835", "#db883d", "#79fbe7", "#c5e1a5", "#BACA03"] if Group[0] != "No group": color = color[1:len(Group)+1] color1 = color1[1:len(Group)+1] else: color = color[0:len(Group)] color1 = color1[0:len(Group)] # Realization of the index plt.figure() L=[] for i in range(len(color)): L.append(i*3) #i's use for the separation between nodes fig = plt.figure(1, figsize=(10, 3)) ax = fig.add_subplot(111) ax.set_aspect(aspect=4.6) plt.scatter(L, [1 for x in range(len(color))], s = 2000/len(L), color = color, marker = 'D') plt.xlim(-1, (len(color)-1)*3+1) plt.ylim(0.7, 1.3) plt.axis('off') def label_node(xy, text): """ Creation of the label of the Nodes Args: xy(list): The list of position x an y. text(str): The text to write on the picture. Returns: Nothing """ y = xy[1] plt.text(xy[0], y, text, ha = "center", family = 'sans-serif', size = 12) if Group[0] != "No group": for i in L: label_node([i,1], str(i//3 + 1)) else: for i in L: label_node([i,1], str(i//3)) def label_number_under(xy, text, col): """ Creation of the label of the Nodes under Args: xy(list): The list of position x an y. text(str): The text to write on the picture. col(str): The color of the nodes Returns: Nothing """ y = xy[1] - 0.25 plt.text(xy[0], y, text, ha = "center", family = 'sans-serif', size = 12, color = col) def label_number_upper(xy, text, col): """ Creation of the label of the Nodes upper Args: xy(list): The list of position x an y. text(str): The text to write on the picture. col(str): The color of the nodes Returns: Nothing """ y = xy[1] + 0.2 plt.text(xy[0], y, text, ha = "center", family='sans-serif', size = 14, color = col) # Writing in the good colors in alternance for i in range(len(L)): if i%2 == 0: label_number_under([L[i],1], Group[L[i]//3][:], color1[i]) else: label_number_upper([L[i],1], Group[L[i]//3][:], color1[i]) # Saving and adjusting the size plt.savefig(file_name + '.png', dpi = 100) plt.close legend = file_name + '.png' #_cut_legend(legend) def draw_from_list(events_list, dic_design = dic_design_default, saving_localisation = "./graph.html", title = "No"): """ Creation of one html graph using the list of edges and the design wanted. Args: events_list (list): a list of 2 nodes, 1 labels and 2 groups to create the graph. dic_design (dictionnary): A dictionnary with the parameters for the design saving_localisation (string): a string to find the directory to save the html page. title (str): The title of the html page if you want no title use "No" which is the default parameter. Returns: Nothing but create the html page. Examples: >>> from tools_graph import draw_from_list >>> events_list = [['no_group', 'group_0', '',1 , 1], ['group_0', 'group_1', '',1 , 2], ['group_1', 'group_2', '',2 , 3]] >>> draw_from_list(events_list) """ # Recuperation of the name and the localisation store = saving_localisation.split("/") save_localisation = "" for i in store[:-2]: save_localisation += i + "/" save_localisation += store[-2] file_name = store[-1] # Recuperation of the data color_link = dic_design['color_link'] color_text = dic_design["color_text"] color_label = dic_design["color_label"] shape = dic_design["shape"] mobility = dic_design["mobility"] legend = file_name + ".png" legend_bool = dic_design["legend_bool"] # Generation of the graph and indexation if events_list != []: _graph_generation(events_list, save_localisation, color_link, color_text, color_label, shape, file_name , mobility, legend, legend_bool, title) _index_generator(save_localisation+ "/" + file_name) else: warnings.warn("Your list is empty") def _adding_graphs(events_list_multi, dic_design, saving_localisation, title): """ Function to create union of list to create a graph. Args: events_list_multi (list): a list of list of events. dic_design (dictionnary): A dictionnary with the parameters for the design saving_localisation (string): a string to find the directory to save the html page. title (str): The title of the html page if you want no title use "No" which is the default parameter. Returns: Nothing but create the html page. """ events_list = events_list_multi[0] # Adding which is not already in the list for j in range (1,len(events_list_multi)): for i in events_list_multi[j]: if i not in events_list: events_list.append(i) # Visualisation draw_from_list(events_list, dic_design, saving_localisation, title) def _choice_attributs(graph, dic_items): """ It takes a graph from networkx and the item to keep and return a list in good format for creating a html graph Args: graph (list): It takes a graph from networkx. dic_items (dictionnary): a dictionnary with the items to select. As an exemple use the dictionnary in the example section. The items correspond to the name you want for the nodes with "label_node", it's a list of name, they will be concatenate with an underscore. The same principle is used to create the group in "group_node". You also have the possibility to put a name to the label using a list of name in "label_edge". It's important to know that you haven't to fill all the categories in the dictionnary, by default the categories are empty. "cut" is to only put an int of letters in the name. The option with separators are to create different separators in the name, label and group. Returns: A list with edges. """ # To fill the data if the user don't put the section for i in ["size_distinction", "label_edge", "group_node", "label_node"]: if i not in dic_items: dic_items[i] = [] for i in ["size_name_A", "size_name_B"]: if i not in dic_items: dic_items[i] = "" if "cut" not in dic_items: dic_items["cut"] = "No" for i in ["separator_group", "separator_name", "separator_label"]: if i not in dic_items: dic_items[i] = "_" # Recuperation of the items to keep size_distinction = [] size_name_A = "" size_name_B = "" label_edge = dic_items["label_edge"] group_nodes_A = dic_items["group_node"] group_nodes_B = dic_items["group_node"] item_name_1 = dic_items["label_node"] item_name_2 = dic_items["label_node"] cut = dic_items["cut"] sep_gr = dic_items["separator_group"] sep_nam = dic_items["separator_name"] sep_lab = dic_items["separator_label"] # Recuperation of names if item_name_1 != []: Names = [] for i in range(len(item_name_1)): name = [] for (u,v, d) in graph.edges(data=True): if item_name_1[i] in graph.nodes[u]: name.append(graph.nodes[u][item_name_1[i]]) else: name.append("") Names.append(name) Big_list_1 = [] for j in range(len(Names[0])): a = "" for i in range(len(item_name_1)): a += sep_nam if Names[i][j] != "": # selection of names or cutting version if cut != "No": a += str(Names[i][j]).lower()[:int(cut)] else: a += str(Names[i][j]).lower()[:] else: a += sep_nam Big_list_1.append(a) if item_name_2 != []: Names = [] for i in range(len(item_name_2)): name = [] for (u, v, d) in graph.edges(data=True): if item_name_2[i] in graph.nodes[v]: name.append(graph.nodes[v][item_name_2[i]]) else: name.append("") Names.append(name) Big_list_2 = [] for j in range(len(Names[0])): a = "" for i in range(len(item_name_2)): a += sep_nam if Names[i][j] != "": # selection of names or cutting version if cut != "No": a += str(Names[i][j]).lower()[:int(cut)] else: a += str(Names[i][j]).lower()[:] else: a += sep_nam Big_list_2.append(a) list_html = [] edges = [(str(u), str(v)) for (u, v, d) in graph.edges(data=True)] # Load the data for i in range(len(edges)): L = [] for j in range(len(edges[i])): if j == 0 and item_name_1 != []: L.append(edges[i][j].lower() + Big_list_1[i]) elif j == 1 and item_name_2 != []: L.append(edges[i][j].lower() + Big_list_2[i]) else: L.append(edges[i][j]) L.append("") L.append("") L.append("") list_html.append(L) # Put in descending order so that the largest sizes appear this way. Size_1 = [9,5] Size_2 = [10,6,4] Size_3 = [12, 9, 7, 5] Size_4 = [12,10, 8, 6, 4] # Management of size and puting some limits if size_name_A != "": size_A = [(d[size_name_A]) for (u, v, d) in graph.edges(data=True)] if len(size_distinction) == 1: Size = Size_1 for i in range(len(list_html)): if size_A[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[0]] + "_"*M else: M = Size[1] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[1]] + "_"*M if len(size_distinction) == 2: Size = Size_2 for i in range(len(list_html)): if size_A[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[0]] + "_"*M elif (size_A[i] < size_distinction[0] and size_A[i] > size_distinction[1]): M = Size[1] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[1]] + "_"*M else: M = Size[2] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[2]] + "_"*M if len(size_distinction) == 3: Size = Size_3 for i in range(len(list_html)): if size_A[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[0]] + "_"*M elif size_A[i] < size_distinction[0] and size_A[i] > size_distinction[1]: M = Size[1] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[1]] + "_"*M elif size_A[i] < size_distinction[1] and size_A[i] > size_distinction[2]: M = Size[2] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[2]] + "_"*M else: M = Size[3] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[3]] + "_"*M if len(size_distinction) == 4: Size = Size_4 for i in range(len(list_html)): if size_A[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[0]] + "_"*M elif size_A[i] < size_distinction[0] and size_A[i] > size_distinction[1]: M = Size[1] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[1]] + "_"*M elif size_A[i] < size_distinction[1] and size_A[i] > size_distinction[2]: M = Size[2] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[2]] + "_"*M elif size_A[i] < size_distinction[2] and size_A[i] > size_distinction[3]: M = Size[3] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[3]] + "_"*M else: M = Size[4] - len(list_html[i][0]) list_html[i][0] = list_html[i][0][:Size[4]] + "_"*M if size_name_B != "": size_B = [(d[size_name_B]) for (u, v, d) in graph.edges(data=True)] if len(size_distinction) == 1: Size = Size_1 for i in range(len(list_html)): if size_B[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[0]] + "_"*M else: M = Size[1] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[1]] + "_"*M if len(size_distinction) == 2: Size = Size_2 for i in range(len(list_html)): if size_B[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[0]] + "_"*M elif (size_B[i] < size_distinction[0] and size_B[i] >= size_distinction[1]): M = Size[1] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[1]] + "_"*M else: M = Size[2] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[2]]+"_"*M if len(size_distinction) == 3: Size = Size_3 for i in range(len(list_html)): if size_B[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[0]] + "_"*M elif size_B[i] < size_distinction[0] and size_B[i] >= size_distinction[1]: M = Size[1] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[1]] + "_"*M elif size_B[i] < size_distinction[1] and size_B[i] >= size_distinction[2]: M = Size[2] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[2]] + "_"*M else: M = Size[3] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[3]] + "_"*M if len(size_distinction) == 4: Size = Size_4 for i in range(len(list_html)): if size_B[i] >= size_distinction[0]: M = Size[0] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[0]] + "_"*M elif size_B[i] < size_distinction[0] and size_B[i] >= size_distinction[1]: M = Size[1] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[1]] + "_"*M elif size_B[i] < size_distinction[1] and size_B[i] >= size_distinction[2]: M = Size[2] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[2]] + "_"*M elif size_B[i] < size_distinction[2] and size_B[i] >= size_distinction[3]: M = Size[3] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[3]] + "_"*M else: M = Size[4] - len(list_html[i][1]) list_html[i][1] = list_html[i][1][:Size[4]] + "_"*M # Management of Groups if group_nodes_A != []: Group_all = [] for i in range(len(group_nodes_A)): group_it = [] for (u, v, d) in graph.edges(data=True): if group_nodes_A[i] in graph.nodes[u]: group_it.append(graph.nodes[u][group_nodes_A[i]]) else: group_it.append("") Group_all.append(group_it) Big_list_group_A =[] for j in range(len(Group_all[0])): a = "" for i in range(len(group_nodes_A)): if Group_all[i][j] != "": a += str(Group_all[i][j]).lower() else: a += " " if i != len(group_nodes_A)-1: a += sep_gr Big_list_group_A.append(a) if group_nodes_B != []: Group_all = [] for i in range(len(group_nodes_B)): group_it = [] for (u, v, d) in graph.edges(data=True): if group_nodes_B[i] in graph.nodes[v]: group_it.append(graph.nodes[v][group_nodes_B[i]]) else: group_it.append("") Group_all.append(group_it) Big_list_group_B = [] for j in range(len(Group_all[0])): a = "" for i in range(len(group_nodes_B)): if Group_all[i][j] != "": a += str(Group_all[i][j]).lower() else: a += " " if i != len(group_nodes_B)-1: a += sep_gr Big_list_group_B.append(a) # Management of labels if label_edge != []: Group_all = [] for i in range(len(label_edge)): group_it = [] for (u, v, d) in graph.edges(data=True): if label_edge[i] in d: group_it.append(d[label_edge[i]]) else: group_it.append("") Group_all.append(group_it) Big_list_label_edge = [] for j in range(len(Group_all[0])): a = "" for i in range(len(label_edge)): if Group_all[i][j] != "": a += str(Group_all[i][j]).lower() else: a += " " if i != len(label_edge)-1: a += sep_lab Big_list_label_edge.append(a) # Complete the matrix with groups and label for i in range(len(list_html)): if group_nodes_A != []: list_html[i][3] = Big_list_group_A[i] if group_nodes_B != []: list_html[i][4] = Big_list_group_B[i] if label_edge != []: list_html[i][2] = Big_list_label_edge[i] return list_html def draw_from_networkx(graph, dic_items = dic_items_default, saving_localisation = "./graphs.html", dic_design = dic_design_default, title = "No"): """ Creation of the graph using the good items to keep from a networkx graph Args: graph (list): It takes a graph from networkx. dic_items (dictionnary): a dictionnary with the items to select. As an exemple use the dictionnary in the example section. The items correspond to the name you want for the nodes with "label_node", it's a list of name, they will be concatenate with an underscore. The same principle is used to create the group in "group_node". You also have the possibility to put a name to the label using a list of name in "label_edge". It's important to know that you haven't to fill all the categories in the dictionnary, by default the categories are empty. "cut" is to only put an int of letters in the name. The option with separators are to create different separators in the name, label and group. file_name (string): the name you want to give to the html file. saving_localisation (string): a string to find the directory to save the html page. dic_design (dictionnary): a dictionnary with the design you want for the visualisation. title (str): The title of the html page if you want no title use "No" which is the default parameter. Returns: Nothing but create the html page. Examples: >>> from tools_graph import draw_from_networkx >>> import networkx as nx >>> G1 = nx.Graph() >>> G1.add_edge('0', '1', weight=13, group1 = "Flowers", group2 = "Flowers", details_link = "bordeau", name_1 = "Pivoine", name_2 = "Tulip", color_1 = "bordeau", color_2 = "red", size_1 = 18, size_2 =20) >>> G1.add_edge('0', '2', weight=13, group1 = "Flowers", group2 = "People", details_link = "beautiful", name_1 = "Pivoine", name_2 = "Anne", color_1 = "bordeau", color_2 = "white", size_1 = 18, size_2 =180) >>> dic_items = {} >>> dic_items["label_node"] = [] >>> dic_items["group_node"] = ["group1"] >>> dic_items["label_edge"] = ["details_link"] >>> draw_from_networkx(G1, dic_items) """ # Choosing attributs events_list = _choice_attributs(graph, dic_items) # Generation of the graph draw_from_list(events_list, dic_design, saving_localisation, title) def draw_from_networkx_graphs_list(graph_list, dic_items_list = [], saving_localisation = "./graph.html", dic_design = dic_design_default, title = "No"): """ Creation of one graph using different networkx graph. Args: graph_list (list): it's a list of graphs from networkx. dic_items_list (list): it's a list of dictionnary to select the items to generate the list from the networkx graph. With a dictionnary with the items to select. As an exemple use the dictionnary in the example section. The items correspond to the name you want for the nodes with "label_node", it's a list of name, they will be concatenate with an underscore. The same principle is used to create the group in "group_node". You also have the possibility to put a name to the label using a list of name in "label_edge". It's important to know that you haven't to fill all the categories in the dictionnary, by default the categories are empty. "cut" is to only put an int of letters in the name. The option with separators are to create different separators in the name, label and group. file_name (string): the name you want to give to the html file. saving_localisation (string): a string to find the directory to save the html page. dic_design (dictionnary): a dictionnary with the design you want for the visualisation. title (str): The title of the html page if you want no title use "No" which is the default parameter. Returns: Nothing but create the html page. Examples: >>> from tools_graph import draw_from_networkx_graphs_list >>> import networkx as nx >>> G1 = nx.Graph() >>> G1.add_edge('0', '1', weight=13, group1 = "Flowers", group2 = "Flowers", details_link = "bordeau", name_1 = "Pivoine", name_2 = "Tulip", color_1 = "bordeau", color_2 = "red", size_1 = 18, size_2 =20) >>> G1.add_edge('0', '2', weight=13, group1 = "Flowers", group2 = "People", details_link = "beautiful", name_1 = "Pivoine", name_2 = "Anne", color_1 = "bordeau", color_2 = "white", size_1 = 18, size_2 =180) >>> G2 = nx.Graph() >>> G2.add_edge('0', '1', weight=13, group1 = "Flowers", group2 = "Flowers", details_link = "bordeau", name_1 = "Pivoine", name_2 = "Tulip", color_1 = "bordeau", color_2 = "red", size_1 = 18, size_2 =20) >>> G2.add_edge('0', '2', weight=13, group1 = "Flowers", group2 = "People", details_link = "beautiful", name_1 = "Pivoine", name_2 = "Anne", color_1 = "bordeau", color_2 = "white", size_1 = 18, size_2 =180) >>> graph_list = [G1, G2] >>> dic_items = {} >>> dic_items["label_node"] = [] >>> dic_items["group_node"] = ["group1"] >>> dic_items["label_edge"] = ["details_link"] >>> dic_items_list = [dic_items, dic_items] >>> draw_from_networkx_graphs_list(graph_list, dic_items_list) """ if dic_items_list == []: for i in range(len(graph_list)): dic_items_list.append({}) # Creation of the first list graph_1_list = _choice_attributs(graph_list[0], dic_items_list[0]) for j in range (1,len(graph_list)): # Creation of others list graph_2_list = _choice_attributs(graph_list[j], dic_items_list[j]) # Add of Nodes and link if the lines of the list was not in the previous one for i in graph_2_list: if i not in graph_1_list: graph_1_list.append(i) # Generation of the graph events_list = graph_1_list draw_from_list(events_list, dic_design, saving_localisation, title) def adjusting_someone_graph(path_file, path_data_VIS = ""): """ Modification of an html page to correct the pass to the js directory Args: path_file (str): The path of the file to correct. path_data_VIS (string): The path of the VIS directory or if you use the package use default = "". Returns: Nothing but correct the html page. Examples: >>> path_file = "/home/manie/Documents/Stage/graph_visualization/graphs/graph_list_design.html" >>> adjusting_someone_graph(path_file, path_data_VIS ="/home/manie/Documents/ENTER/data_VIS/") """ if path_data_VIS != "": new_js = path_data_VIS + "/vis.js" new_css = path_data_VIS + "/vis.css" stop_bool = True else: # recuperation with the package new_js = join(sys.prefix , 'data_VIS/vis.js') new_css = join(sys.prefix , 'data_VIS/vis.css') if not os.path.isfile(new_js): warnings.warn("Please select a VIS folder or install the package") stop_bool = False else: stop_bool = True if stop_bool: my_file = open(path_file, "r") text = my_file.readlines() text2 = [] for i in text: if '<script type="text/javascript" src=' in i: a = '<script type="text/javascript" src=' b = '></script>' text2.append(a + new_js + b) elif '<link href=' in i: a = '<link href=' b = ' rel="stylesheet" type="text/css">' text2.append(a + new_css + b) else: text2.append(i) my_file.close() my_file_2 = open(path_file, "w") for i in text2: my_file_2.write(i) my_file_2.close()
ANIBBLE-X
/ANIBBLE_X-0.7.22-py3-none-any.whl/ANIBBLE_X/graph_visualisation_toolskit.py
graph_visualisation_toolskit.py
import sys import os from os.path import join import platform class GraphShow(): def __init__(self): """ Creation of the structure of the html page """ self.base = """ <html> <head> <script type="text/javascript" src="VIS/dist/vis.js"></script> <link href="VIS/dist/vis.css" rel="stylesheet" type="text/css"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title> Knowledge graphs </title> </head> <body> <p style ="position:absolute;top:0px;left:02px"> &nbsp; &nbsp; Image &nbsp; &nbsp; &nbsp; &nbsp; &emsp; &emsp; &emsp; &emsp; &emsp; &emsp; &emsp; &emsp;<FONT face="Verdana" color="#06040a" size="6" > ~It's my title~ </FONT> </p> <div id="VIS_draw"></div> <script type="text/javascript"> var nodes = data_nodes; var edges = data_edges; var container = document.getElementById("VIS_draw"); var data = { nodes: nodes, edges: edges }; var options = { nodes: { borderWidth: 2, borderWidthSelected: 6, shape: 'Choose_your_shape', size: 15, color: {border: "#141411", background: "#f5f3e1", highlight: { border: "#141411", background: "#f5f3e1"}, hover: {border: "#141411", background: "#f5f3e1"}}, font: { color: 'Choose_your_color_text', size: 15 } }, edges: { font: { size: 12, align: 'top', color: 'Choose_your_color_label' }, color: 'Choose_your_color_node', arrows: { to: {enabled: true, scaleFactor: 1.2} }, size: 20, smooth: {enabled: true} }, physics: { enabled: mobility }, }; var network = new vis.Network(container, data, options); </script> </body> </html> """ def create_page(self, events, save_localisation, color_node, color_text, color_label, shape, file_name, mobility, legend, legend_bool, title): """ Creating of link with the list, it send the nodes and edges in good format """ # Recuperation of nodes and creation of the dictionnary nodes = [] for event in events: nodes.append(event[0]) nodes.append(event[1]) node_dict = {node: index for index, node in enumerate(nodes)} # Creation of informations of the nodes data_nodes = [] data_edges = [] for node, id in node_dict.items(): data = {} # Gestion of groups for node A and B if id%2 == 1: data['group'] = events[id//2][4] else: data['group'] = events[id//2][3] data["id"] = id data["label"] = node data_nodes.append(data) for edge in events: data = {} data['from'] = node_dict.get(edge[0]) data['label'] = edge[2] data['to'] = node_dict.get(edge[1]) data_edges.append(data) # Creation of the html page using this information self.create_html(data_nodes, data_edges, save_localisation, color_node, color_text, color_label, shape, file_name, mobility, legend, legend_bool, title) def create_html(self, data_nodes, data_edges, save_localisation, color_node, color_text, color_label, shape, file_name, mobility, legend, legend_bool, title): """ Replace the good values in the html file """ if platform.system() == "Linux": if sys.prefix.split("/")[1] == "home": data_js = join(sys.prefix , 'data_VIS/vis.js') data_css = join(sys.prefix , 'data_VIS/vis.css') if not os.path.isfile(data_js): graph_show_path = os.path.abspath(__file__) folder = "/".join(graph_show_path.split("/")[:-2]) data_js = folder + "/data/vis.js" data_css= folder + "/data/vis.css" else: graph_show_path = os.path.abspath(__file__) folder = "/".join(graph_show_path.split("/")[:4]) data_js = join(folder, 'data_VIS/vis.js') data_css = join(folder , 'data_VIS/vis.css') elif platform.system() == "Windows": data_js = join(sys.prefix , "data_VIS\\vis.js") data_js = "file:///" + data_js data_css = join(sys.prefix , "data_VIS\\vis.css") data_css = "file:///" + data_css if not os.path.isfile(data_js): graph_show_path = os.path.abspath(__file__) folder = "\\".join(graph_show_path.split("\\")[:-2]) else: data_js = join(sys.prefix , 'data_VIS/vis.js') data_css = join(sys.prefix , 'data_VIS/vis.css') if not os.path.isfile(data_js): graph_show_path = os.path.abspath(__file__) folder = "/".join(graph_show_path.split("/")[:-2]) data_js = folder + "/data/vis.js" data_css= folder + "/data/vis.css" # Generation of the html file by remplacing node save_localisation_file = save_localisation + "/" + file_name save_legend = legend f = open(save_localisation_file, 'w+') # Gestion of the replacement of options selected and the nodes and edges html = self.base.replace('data_nodes', str(data_nodes)).replace('data_edges', str(data_edges)).replace('Choose_your_color_node', color_node).replace('Choose_your_color_text', color_text).replace('Choose_your_color_label', color_label).replace('Choose_your_shape', shape).replace("VIS/dist/vis.js", str(data_js)).replace("VIS/dist/vis.css", str(data_css)) # Gestion of the mobility or not if mobility : html = html.replace("enabled: mobility","enabled: true") else: html = html.replace("enabled: mobility","enabled: false") # Gestion of the possibility to have a legend or not if legend_bool : Text = """ <img style ="position:absolute;top:-48px;left:02px" src="Choose your legend" alt="[No legend]" height ="200" />""" Text = Text.replace("Choose your legend", save_legend) html = html.replace("Image", Text) else: html = html.replace("Image", "") # Title if title != "No": html = html.replace("It's my title", title) else: html = html.replace("~It's my title~", "") f.write(html) f.close()
ANIBBLE-X
/ANIBBLE_X-0.7.22-py3-none-any.whl/ANIBBLE_X/graph_show.py
graph_show.py
import argparse from ANN_Implementation.src.utils.common import read_config from ANN_Implementation.src.utils.data_mgmt import (get_data, save_model, plot_data, predict, get_log_path, create_log, callback_function, train_model_checkpoint) from ANN_Implementation.src.utils.models import create_model def training_data(config_path): config = read_config(config_path) validation_datasize = config['params']['validation_datasize'] NUM_CLASSES = config['params']['no_classes'] OPTIMIZER = config['params']['optimizer'] METRICS = config['params']['metrics'] LOSS_FN = config['params']['loss_function'] EPOCHS = config['params']['epochs'] MODEL_DIR = config['artifacts']['model_dir'] ARTIFACT_DIR = config['artifacts']['artifacts_dir'] MODEL_NAME = config['artifacts']['model_name'] PLOT_DIR = config['artifacts']['plots_dir'] PLOT_NAME = config['artifacts']['plot_name'] PREDICTION_IMAGE = config['artifacts']['prediction_image_dir'] CKPT_MODEL = config['artifacts']['checkpoint_model'] (x_train, y_train), (x_valid, y_valid), (x_test, y_test) = get_data(validation_datasize) log_dir = get_log_path() create_log(log_dir, x_train) CallBack_list = callback_function(log_dir, ARTIFACT_DIR, CKPT_MODEL) model = create_model(LOSS_FN, OPTIMIZER, METRICS, NUM_CLASSES) VALIDATION = (x_valid, y_valid) history = model.fit(x_train, y_train, epochs=EPOCHS, validation_data=VALIDATION, callbacks=CallBack_list) history2 = train_model_checkpoint(ARTIFACT_DIR, CKPT_MODEL, EPOCHS, x_train, y_train, VALIDATION, CallBack_list) file_name = save_model(model, ARTIFACT_DIR, MODEL_DIR, MODEL_NAME) plot_data(history, ARTIFACT_DIR, PLOT_DIR, PLOT_NAME) predict(ARTIFACT_DIR, MODEL_DIR, file_name, PLOT_DIR, PREDICTION_IMAGE, x_test, y_test) if __name__ == '__main__': args = argparse.ArgumentParser() import pdb pdb.set_trace() args.add_argument("--config","-c",default="config.yml") parsed_args = args.parse_args() training_data(config_path=parsed_args.config)
ANN-Implementation-AmanGupta0112
/ANN_Implementation_AmanGupta0112-0.0.1-py3-none-any.whl/ANN_Implementation/src/training.py
training.py
import time import tensorflow as tf import os import matplotlib.pyplot as plt import pandas as pd from tensorflow.keras import models import numpy as np def get_data(validation_datasize): mnist = tf.keras.datasets.mnist (x_train_full, y_train_full), (x_test, y_test) = mnist.load_data() x_valid, x_train = x_train_full[:validation_datasize]/255, x_train_full[validation_datasize:]/255 y_valid, y_train = y_train_full[:validation_datasize], y_train_full[validation_datasize:] x_test = x_test/255 return (x_train, y_train),(x_valid,y_valid),(x_test,y_test) def filename_unique(filename): filename = time.strftime(f"{filename}_%Y%m%d%H%M%S") return filename def save_model(model, ARTIFACT_DIR, MODEL_DIR, MODEL_NAME): model_dir = f"{ARTIFACT_DIR}/{MODEL_DIR}" os.makedirs(model_dir, exist_ok=True) file_name = filename_unique(MODEL_NAME) filepath = os.path.join(model_dir, f"{file_name}.h5") model.save(filepath) return f"{file_name}.h5" def plot_data(history, ARTIFACT_DIR, PLOT_DIR, PLOT_NAME): df = pd.DataFrame(history.history) plot_dir = f"{ARTIFACT_DIR}/{PLOT_DIR}" os.makedirs(plot_dir, exist_ok=True) # Only create if dir doesn't exist file_name = filename_unique(PLOT_NAME) filepath = os.path.join(plot_dir, f'{file_name}.png') df.plot(figsize=(10, 7)) plt.grid(True) plt.savefig(filepath) def predict(ARTIFACT_DIR, MODEL_DIR, MODEL_NAME, PLOT_DIR, PREDICTION_IMAGE, x_test, y_test): model = models.load_model( f"{ARTIFACT_DIR}/{MODEL_DIR}/{MODEL_NAME}") model.evaluate(x_test, y_test) x_new = x_test[:30] y_prob = model.predict(x_new) y_prob.round(3) y_pred = np.argmax(y_prob, axis=-1) counter = 0 for img_array,pred,actual in zip(x_new,y_pred,y_test[:30]): os.makedirs(f"{ARTIFACT_DIR}/{PLOT_DIR}/{PREDICTION_IMAGE}", exist_ok=True) plt.imshow(img_array,cmap="binary") plt.title(f'predicted: {pred}, Actual: {actual}') plt.axis('off') plt.savefig( f"{ARTIFACT_DIR}/{PLOT_DIR}/{PREDICTION_IMAGE}/Image{counter}.png") counter += 1 def get_log_path(Log_dir="logs/fit"): uniqueName = time.strftime("log_%Y_%m_%d_%H_%M_%S") log_path = os.path.join(Log_dir, uniqueName) return log_path def create_log(log_dir, x_train): log_dir = get_log_path() file_writer = tf.summary.create_file_writer(logdir=log_dir) with file_writer.as_default(): images = np.reshape(x_train[10:30],(-1,28,28,1)) tf.summary.image("20 hand written digit sample", images, max_outputs=25,step=0) def callback_function(log_dir, ARTIFACT_DIR, CKPT_MODEL): tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=5,restore_best_weights=True) CKPT_path = f"{ARTIFACT_DIR}/checkpoint/{CKPT_MODEL}.h5" checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(CKPT_path, save_best_only=True) return [tensorboard_callback, early_stopping_cb, checkpoint_cb] def train_model_checkpoint(ARTIFACT_DIR, CKPT_MODEL, EPOCHS, x_train, y_train, VALIDATION, CallBack_list): CKPT_path = f"{ARTIFACT_DIR}/checkpoint/{CKPT_MODEL}.h5" ckpt_model = tf.keras.models.load_model(CKPT_path) history = ckpt_model.fit(x_train, y_train, epochs=EPOCHS, validation_data=VALIDATION, callbacks=CallBack_list) return history
ANN-Implementation-AmanGupta0112
/ANN_Implementation_AmanGupta0112-0.0.1-py3-none-any.whl/ANN_Implementation/src/utils/data_mgmt.py
data_mgmt.py
# ANN - Implementation using tensorflow INTRODUCTION In this article, I will explain to you the basics of neural networks and their code. Nowadays many students just learn how to code for neural networks without understanding the core concepts behind it and how it internally works. First, Understand what is Neural Networks? What is Neural Network? Neural Network is a series of algorithms that are trying to mimic the human brain and find the relationship between the sets of data. It is being used in various use-cases like in regression, classification, Image Recognition and many more. As we have talked above that neural networks tries to mimic the human brain then there might be the difference as well as the similarity between them. Let us talk in brief about it. Some major differences between them are biological neural network does parallel processing whereas the Artificial neural network does series processing also in the former one processing is slower (in millisecond) while in the latter one processing is faster (in a nanosecond). Architecture Of ANN A neural network has many layers and each layer performs a specific function, and as the complexity of the model increases, the number of layers also increases that why it is known as the multi-layer perceptron. The purest form of a neural network has three layers input layer, the hidden layer, and the output layer. The input layer picks up the input signals and transfers them to the next layer and finally, the output layer gives the final prediction and these neural networks have to be trained with some training data as well like machine learning algorithms before providing a particular problem. Now, let’s understand more about perceptron. About Perceptron As discussed above multi-layered perceptron these are basically the hidden or the dense layers. They are made up of many neurons and neurons are the primary unit that works together to form perceptron. In simple words, as you can see in the above picture each circle represents neurons and a vertical combination of neurons represents perceptrons which is basically a dense layer. About Perceptron ANN Now in the above picture, you can see each neuron’s detailed view. Here, each neurons have some weights (in above picture w1, w2, w3) and biases and based on this computations are done as, combination = bias + weights * input (F = w1*x1 + w2*x2 + w3*x3) and finally activation function is applied output = activation(combination) in above picture activation is sigmoid represented by 1/(1 + e-F). There are some other activation functions as well like ReLU, Leaky ReLU, tanh, and many more. Working Of ANN At First, information is feed into the input layer which then transfers it to the hidden layers, and interconnection between these two layers assign weights to each input randomly at the initial point. and then bias is added to each input neuron and after this, the weighted sum which is a combination of weights and bias is passed through the activation function. Activation Function has the responsibility of which node to fire for feature extraction and finally output is calculated. This whole process is known as Foreward Propagation. After getting the output model to compare it with the original output and the error is known and finally, weights are updated in backward propagation to reduce the error and this process continues for a certain number of epochs (iteration). Finally, model weights get updated and prediction is done. # How to use this Module ## The Coding is done in a way that u don't have to build the code, u just need to change the data in the configuration file(yaml) ### A glimpse of what is present in the configuration file is mentioned below ```yaml params: epochs: 5 batch_size: 32 no_classes: 10 input_shape: [28,28] loss_function: sparse_categorical_crossentropy metrics: accuracy optimizer: SGD validation_datasize: 5000 artifacts: artifacts_dir: artifacts model_dir: model plots_dir: plots checkoint_dir: checkpoints model_name: model.h5 plots_name: plot.png logs: logs_dir: logs_dir general_logs: general_logs tensorboard_logs: tensorboard_logs ``` ## A glimpse of the Layers ```python LAYERS = [ tf.keras.layers.Flatten(input_shape=[28,28], name="inputlayer"), tf.keras.layers.Dense(300,activation="relu", name="hiddenlayer1"), tf.keras.layers.Dense(100,activation="relu", name="hiddenlayer2"), tf.keras.layers.Dense(OUTPUT_CLASSES,activation="softmax", name="outputlayer")] ```
ANN-Implementation-kkkumar2
/ANN---Implementation-kkkumar2-0.0.2.tar.gz/ANN---Implementation-kkkumar2-0.0.2/README.md
README.md
import os import csv import math import copy import numpy as np from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper from annogesiclib.coverage_detection import coverage_comparison from annogesiclib.coverage_detection import replicate_comparison from annogesiclib.lib_reader import read_wig, read_libs def modify_attributes(pre_srna, srna, srna_type, input_type): if (srna_type == "UTR") or (srna_type == "both"): if pre_srna.attributes["sRNA_type"] != srna.attributes["sRNA_type"]: if input_type == "pre": if "antisense" in pre_srna.attributes["sRNA_type"]: pre_srna.attributes["sRNA_type"] = ( srna.attributes["sRNA_type"]) else: if "antisense" not in pre_srna.attributes["sRNA_type"]: srna.attributes["sRNA_type"] = ( pre_srna.attributes["sRNA_type"]) def del_attributes(feature, entry): attributes = {} for key, value in entry.attributes.items(): if feature not in key: attributes[key] = value return attributes def detect_overlap(srna, pre_srna, srna_type, overlap): '''check the sRNA is overlap with other sRNA or not''' if (srna.seq_id == pre_srna.seq_id) and ( srna.strand == pre_srna.strand): if (pre_srna.start >= srna.start) and ( pre_srna.end <= srna.end): modify_attributes(pre_srna, srna, srna_type, None) overlap = True elif (pre_srna.start >= srna.start) and ( pre_srna.start <= srna.end) and ( pre_srna.end >= srna.end): modify_attributes(pre_srna, srna, srna_type, None) overlap = True elif (pre_srna.start <= srna.start) and ( pre_srna.end >= srna.start) and ( pre_srna.end <= srna.end): modify_attributes(pre_srna, srna, srna_type, None) overlap = True elif (pre_srna.start <= srna.start) and ( pre_srna.end >= srna.end): overlap = True modify_attributes(pre_srna, srna, srna_type, "pre") return overlap def merge_tss_pro(pre_srna, srna, feature): if (feature not in pre_srna.attributes.keys()) and ( feature in srna.attributes.keys()): if srna.attributes[feature] != "NA": pre_srna.attributes[feature] = srna.attributes[feature] elif (feature in pre_srna.attributes.keys()) and ( feature in srna.attributes.keys()): if (pre_srna.attributes[feature] == "NA") and ( srna.attributes[feature] != "NA"): pre_srna.attributes[feature] = srna.attributes[feature] elif (srna.attributes[feature] not in pre_srna.attributes[feature]) and ( srna.attributes[feature] != "NA"): pre_srna.attributes[feature] = ",".join( [pre_srna.attributes[feature], srna.attributes[feature]]) def modify_overlap(pre_srna, srna): '''If the sRNA is overlap with other sRNA, it will modify the position and attributes of gff file''' merge_tss_pro(pre_srna, srna, "with_TSS") merge_tss_pro(pre_srna, srna, "end_cleavage") if (srna.attributes["sRNA_type"] == "5utr") or ( srna.attributes["sRNA_type"] == "3utr") or ( srna.attributes["sRNA_type"] == "interCDS"): merge_tss_pro(pre_srna, srna, "start_cleavage") if (srna.start < pre_srna.start): pre_srna.start = srna.start if (srna.end > pre_srna.end): pre_srna.end = srna.end return pre_srna def merge_srna(srnas, srna_type): '''Merge the overlaped sRNA''' final_srnas = [] first = True pre_srna = "" for srna in srnas: if srna.feature != "ncRNA": srna.feature = "ncRNA" if "with_TSS" in srna.attributes.keys(): if srna.attributes["with_TSS"] == "False": srna.attributes["with_TSS"] = "NA" else: srna.attributes["with_TSS"] = "NA" if "end_cleavage" in srna.attributes.keys(): if srna.attributes["end_cleavage"] == "False": srna.attributes["end_cleavage"] = "NA" else: srna.attributes["end_cleavage"] = "NA" overlap = False if first: first = False pre_srna = srna else: if (srna.seq_id != pre_srna.seq_id): if not overlap: if pre_srna not in final_srnas: final_srnas.append(pre_srna) pre_srna = srna continue overlap = detect_overlap(srna, pre_srna, srna_type, overlap) if overlap: pre_srna = modify_overlap(pre_srna, srna) if (srna.attributes["sRNA_type"] != "antisense") and ( pre_srna.attributes["sRNA_type"] == "antisense"): pre_srna = srna else: if pre_srna not in final_srnas: final_srnas.append(pre_srna) pre_srna = srna srna.source = "ANNOgesic" if overlap: pre_srna = modify_overlap(pre_srna, srna) if pre_srna not in final_srnas: final_srnas.append(pre_srna) else: if srna not in final_srnas: final_srnas.append(srna) return final_srnas def read_gff(gff_file, type_, ex_srna): datas = [] if os.path.exists(gff_file): for entry in Gff3Parser().entries(open(gff_file)): if type_ == "sRNA": datas.append(entry) elif type_ == "tss": datas.append(entry) else: if (Helper().feature_without_notgene(entry)): if (ex_srna) and (entry.feature != "ncRNA"): datas.append(entry) elif not ex_srna: datas.append(entry) datas = sorted(datas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return datas def read_table(table_file, file_type): datas = [] if os.path.exists(table_file): f_h = open(table_file, "r") for row in csv.reader(f_h, delimiter='\t'): datas.append(import_data(row, file_type)) return datas def merge_incds_utr(utrs, inters): '''merge the sRNA within CDS and UTR-derived sRNA''' new_inters = [] for inter in inters: remove = False for utr in utrs: if inter.source == "in_CDS": if (inter.seq_id == utr.seq_id) and ( inter.strand == utr.strand): if ((inter.end < utr.end) and ( inter.end > utr.start) and ( inter.start <= utr.start)) or ( (inter.start > utr.start) and ( inter.start < utr.end) and ( inter.end >= utr.end)) or ( (inter.end >= utr.end) and ( inter.start <= utr.start)) or ( (inter.end <= utr.end) and ( inter.start >= utr.start)): utr.start = min(inter.start, utr.start) utr.end = max(inter.end, utr.end) remove = True if not remove: new_inters.append(inter) return new_inters def compare_srna_cds(srna, cdss, cutoff_overlap): '''compare sRNA and CDS to get the information of overlap between sRNA and CDS''' detect = False overlap = False for cds in cdss: if (srna.seq_id == cds.seq_id) and ( srna.strand == cds.strand): if ((srna.end < cds.end) and ( srna.end > cds.start) and ( srna.start <= cds.start)) or ( (srna.start > cds.start) and ( srna.start < cds.end) and ( srna.end >= cds.end)) or ( (srna.end >= cds.end) and ( srna.start <= cds.start)) or ( (srna.end <= cds.end) and ( srna.start >= cds.start)): overlap = True per_c = float(min(srna.end, cds.end) - max( srna.start, cds.start)) / float(cds.end - cds.start) if per_c <= cutoff_overlap: if "product" in cds.attributes.keys(): cds_name = "".join([ cds.feature, ":", str(cds.start), "-", str(cds.end), "_", cds.strand, "(", cds.attributes["product"], ")"]) else: cds_name = "".join([ cds.feature, ":", str(cds.start), "-", str(cds.end), "_", cds.strand]) if "overlap_cds" not in srna.attributes.keys(): srna.attributes["overlap_cds"] = cds_name srna.attributes["overlap_percent"] = str(per_c) else: srna.attributes["overlap_cds"] = ( ",".join([srna.attributes["overlap_cds"], cds_name])) srna.attributes["overlap_percent"] = ( ",".join([srna.attributes["overlap_percent"], str(per_c)])) detect = True if not overlap: srna.attributes["overlap_cds"] = "NA" srna.attributes["overlap_percent"] = "NA" return srna elif overlap and detect: return srna else: return None def merge_srna_gff(gffs, in_cds, cutoff_overlap, gff_file, ex_srna): '''merge all types of sRNA and print to one gff files''' out = open(gffs["merge"], "w") out.write("##gff-version 3\n") utrs = read_gff(gffs["utr"], "sRNA", ex_srna) inters = read_gff(gffs["normal"], "sRNA", ex_srna) cdss = read_gff(gff_file, "CDS", ex_srna) num_srna = 0 srnas = None if (in_cds) and (len(utrs) != 0) and (len(inters) != 0): inters = merge_incds_utr(utrs, inters) if (len(utrs) != 0) and (len(inters) != 0): pre_srnas = inters + utrs pre_srnas = sorted(pre_srnas, key=lambda x: ( x.seq_id, x.start, x.end, x.strand)) srnas = merge_srna(pre_srnas, "both") elif len(utrs) != 0: srnas = merge_srna(utrs, "UTR") elif len(inters) != 0: srnas = merge_srna(inters, "inter") if srnas is not None: sort_srnas = sorted(srnas, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) else: sort_srnas = None for srna in sort_srnas: new_srna = compare_srna_cds(srna, cdss, cutoff_overlap) if new_srna: new_srna.attributes["ID"] = ( new_srna.seq_id + "_srna" + str(num_srna)) name = '%0*d' % (5, num_srna) new_srna.attributes["Name"] = "sRNA_" + str(name) new_srna.attributes = del_attributes("best_high_coverage", new_srna) new_srna.attributes = del_attributes("best_low_coverage", new_srna) new_srna.attributes = del_attributes("best_avg_coverage", new_srna) attribute_string = ";".join([ "=".join(items) for items in new_srna.attributes.items()]) new_srna.info_without_attributes = ( "\t".join([str(field) for field in [ new_srna.seq_id, new_srna.source, new_srna.feature, new_srna.start, new_srna.end, new_srna.score, new_srna.strand, new_srna.phase]])) out.write(new_srna.info_without_attributes + "\t" + attribute_string + "\n") num_srna += 1 out.close() def import_data(row, type_): if type_ == "inter": return {"strain": row[0], "name": row[1], "start": int(row[2]), "end": int(row[3]), "strand": row[4], "libs": row[5], "detect": row[6], "avg": row[7], "high": row[8], "low": row[9], "detail": row[11], "tss": row[10]} if type_ == "utrr": return {"strain": row[0], "name": row[1], "start": int(row[2]), "end": int(row[3]), "strand": row[4], "libs": row[5], "detect": row[6], "avg": row[7], "high": row[8], "low": row[9], "detail": row[10]} def check_real_cut(inter_cuts, tss_type, cut): for tss, value in inter_cuts.items(): if tss in tss_type.lower(): if cut is None: cut = inter_cuts[tss] else: if cut > inter_cuts[tss]: cut = inter_cuts[tss] if cut is None: if "no_tss" not in inter_cuts.keys(): cut = 0 else: cut = inter_cuts["no_tss"] return cut def get_cutoff(srna, tsss, type_, tables, args_srna): if type_ == "inter": tss_type = None inter_cuts = {"frag": {}, "tex": {}, "notex": {}} fh = open(os.path.join(args_srna.out_folder, "tmp_cutoff_inter"), "r") for row in csv.reader(fh, delimiter='\t'): inter_cuts[row[0]][row[1]] = float(row[2]) if tsss is not None: for tss in tsss: if (srna.seq_id == tss.seq_id) and ( srna.strand == tss.strand): if srna.strand == "+": if math.fabs(srna.start - tss.start) <= args_srna.fuzzy_inter: tss_type = tss.attributes["type"] if srna.start == tss.start: break else: if (math.fabs(srna.end - tss.start) <= args_srna.fuzzy_inter): tss_type = tss.attributes["type"] if srna.end == tss.start: break cut = {"frag": None, "tex": None, "notex": None} if tss_type is None: tss_type = "no_tss" for key, types in inter_cuts.items(): cut[key] = check_real_cut(types, tss_type, cut[key]) elif type_ == "utr": cut = {} fh = open(os.path.join(args_srna.out_folder, "tmp_median"), "r") for row in csv.reader(fh, delimiter='\t'): if (row[0] == srna.seq_id) and ( row[1] == srna.attributes["sRNA_type"]): if row[1] not in cut.keys(): cut[row[1]] = {} cut[row[1]][row[2]] = {"median": float(row[3])} fh.close() return cut def devide_covers(covers): frag_covers = {} tex_covers = {} for cond, tracks in covers.items(): if "frag" in cond: frag_covers[cond] = tracks elif "tex" in cond: tex_covers[cond] = tracks return frag_covers, tex_covers def merge_srna_datas(srna_datas_tex, srna_datas_frag): if (len(srna_datas_tex["conds"]) != 0) and ( len(srna_datas_frag["conds"]) != 0): srna_datas = copy.deepcopy(srna_datas_tex) for key, values in srna_datas_frag.items(): if key == "conds": srna_datas["conds"] = dict(srna_datas["conds"], **values) elif key == "best": if srna_datas["best"] < values: srna_datas["best"] = values srna_datas["high"] = srna_datas_frag["high"] srna_datas["low"] = srna_datas_frag["low"] srna_datas["track"] = srna_datas_frag["track"] elif key == "detail": srna_datas["detail"] = srna_datas["detail"] + values elif len(srna_datas_tex["conds"]) != 0: srna_datas = copy.deepcopy(srna_datas_tex) elif len(srna_datas_frag["conds"]) != 0: srna_datas = copy.deepcopy(srna_datas_frag) else: srna_datas = copy.deepcopy(srna_datas_tex) return srna_datas def compare_table(srna, tables, type_, wigs_f, wigs_r, texs, out, tsss, args_srna): detect = False tss_pro, end_pro = get_tss_pro(type_, srna) if not detect: if type_ == "inter": if srna.strand == "+": covers = get_coverage(wigs_f, srna) else: covers = get_coverage(wigs_r, srna) cut = get_cutoff(srna, tsss, type_, tables, args_srna) frag_covers, tex_covers = devide_covers(covers) srna_datas_tex = replicate_comparison( args_srna, tex_covers, srna.strand, "normal", None, None, None, cut["notex"], cut["tex"], texs) srna_datas_frag = replicate_comparison( args_srna, frag_covers, srna.strand, "normal", None, None, None, None, cut["frag"], texs) srna_datas = merge_srna_datas(srna_datas_tex, srna_datas_frag) elif type_ == "utr": if srna.strand == "+": covers = get_coverage(wigs_f, srna) else: covers = get_coverage(wigs_r, srna) cut = get_cutoff(srna, tsss, type_, tables, args_srna) srna_datas = replicate_comparison( args_srna, covers, srna.strand, "sRNA_utr_derived", cut[srna.attributes["sRNA_type"]], cut, srna.attributes["sRNA_type"], None, cut, texs) if len(srna_datas["conds"]) != 0: out.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}" "\t{8}\t{9}\t".format( srna.seq_id, srna.attributes["Name"], srna.start, srna.end, srna.strand, ";".join(srna_datas["conds"].keys()), ";".join(srna_datas["conds"].values()), tss_pro, end_pro, srna_datas["best"])) first = True for data in srna_datas["detail"]: if first: out.write("{0}({1})".format( data["track"], data["avg"])) first = False else: out.write(";{0}({1})".format( data["track"], data["avg"])) out.write("\t{0}\t{1}\n".format( srna.attributes["overlap_cds"].replace(",", ";"), srna.attributes["overlap_percent"].replace(",", ";"))) def check_start_and_end(start, end, covers): if (start - 2) < 0: c_start = 0 else: c_start = start - 2 if (end + 2) > len(covers): c_end = len(covers) else: c_end = end + 2 return c_start, c_end def get_coverage(wigs, srna): cover_sets = {"high": -1, "low": -1, "total": 0, "diff": 0} poss = {"high": 0, "low": 0, "pos": 0} srna_covers = {} for wig_strain, conds in wigs.items(): if wig_strain == srna.seq_id: for cond, tracks in conds.items(): srna_covers[cond] = [] for lib_name, covers in tracks.items(): track = lib_name.split("|")[-3] lib_strand = lib_name.split("|")[-2] lib_type = lib_name.split("|")[-1] cover_sets["total"] = 0 cover_sets["diff"] = 0 first = True c_start, c_end = check_start_and_end( srna.start, srna.end, covers) covers = covers[c_start: c_end] if srna.strand == "-": covers = covers[::-1] pos = 0 for cover in covers: if (lib_strand == srna.strand): if srna.strand == "+": cover_pos = c_start + pos else: cover_pos = c_end - pos if (srna.start <= cover_pos) and ( srna.end >= cover_pos): cover_sets["total"] = (cover_sets["total"] + cover) first = coverage_comparison( cover, cover_sets, poss, first, srna.strand, cover_pos) else: if (srna.strand == "+") and ( cover_pos > srna.end): cover_sets_pos = cover_pos break elif (srna.strand == "-") and ( cover_pos < srna.start): cover_sets["pos"] = cover_pos break pos += 1 avg = cover_sets["total"] / float( srna.end - srna.start + 1) srna_covers[cond].append({"track": track, "high": cover_sets["high"], "low": cover_sets["low"], "avg": avg, "pos": poss["pos"], "type": lib_type, "final_start": srna.start, "final_end": srna.end}) return srna_covers def get_tss_pro(type_, srna): '''check the sRNA is associated with TSS or processing site''' if type_ == "utr": if "start_cleavage" not in srna.attributes.keys(): srna.attributes["start_cleavage"] = "NA" if "with_TSS" not in srna.attributes.keys(): srna.attributes["with_TSS"] = "NA" if "end_cleavage" not in srna.attributes.keys(): srna.attributes["end_cleavage"] = "NA" if (srna.attributes["with_TSS"] != "NA") and ( srna.attributes["start_cleavage"] != "NA"): tss_pro = ";".join([srna.attributes["with_TSS"], srna.attributes["start_cleavage"]]) elif (srna.attributes["with_TSS"] != "NA"): tss_pro = srna.attributes["with_TSS"] elif srna.attributes["start_cleavage"] != "NA": tss_pro = srna.attributes["start_cleavage"] else: tss_pro = "NA" if (srna.attributes["end_cleavage"] != "NA"): end_pro = srna.attributes["end_cleavage"] else: end_pro = "NA" tss_pro = tss_pro.replace(",", ";") end_pro = end_pro.replace(",", ";") elif type_ == "inter": tss_pro = "" end_pro = "" if (srna.attributes["with_TSS"] != "NA"): tss_pro = srna.attributes["with_TSS"].replace(",", ";") else: tss_pro = "NA" if (srna.attributes["end_cleavage"] != "NA"): end_pro = srna.attributes["end_cleavage"].replace(",", ";") else: end_pro = "NA" return tss_pro, end_pro def free_memory(paras): for data in paras: del(data) def merge_srna_table(srna_file, csvs, wigs_f, wigs_r, tss_file, args_srna): libs, texs = read_libs(args_srna.libs, args_srna.merge_wigs) srnas = read_gff(srna_file, "sRNA", args_srna.ex_srna) if tss_file is not None: tsss = read_gff(tss_file, "tss", args_srna.ex_srna) else: tsss = None inters = read_table(csvs["normal"], "inter") utrs = read_table(csvs["utr"], "utr") out = open(csvs["merge"], "w") for srna in srnas: if ("5utr" in srna.attributes["sRNA_type"]) or ( "3utr" in srna.attributes["sRNA_type"]) or ( "interCDS" in srna.attributes["sRNA_type"]): compare_table(srna, utrs, "utr", wigs_f, wigs_r, texs, out, tsss, args_srna) elif ("intergenic" in srna.attributes["sRNA_type"]) or ( "in_CDS" in srna.attributes["sRNA_type"]) or ( "antisense" in srna.attributes["sRNA_type"]): compare_table(srna, inters, "inter", wigs_f, wigs_r, texs, out, tsss, args_srna) out.close() paras = [wigs_r, wigs_f, srnas, tsss, inters, utrs] free_memory(paras)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/merge_sRNA.py
merge_sRNA.py
class WigParser(object): '''parser the wiggle file based on strain, track, position and coverage''' def parser(self, wig_fh, strand): track = "" strain = "" for line in wig_fh.readlines(): line = line.strip() if (len(line) != 0) and (not line.startswith("#")): datas = line.split(" ") if (datas[0] == "variableStep") or (datas[0] == "fixedStep"): strain = datas[1].split("=") strain = strain[1].strip() pre_pos = 0 first = True if (datas[0] == "track"): track = datas[2].split("=") track = track[1].replace("\"", "") pre_pos = 0 first = True if (datas[0] != "track") and ( datas[0] != "variableStep") and ( datas[0] != "fixedStep"): if len(datas) != 2: datas = line.split("\t") if int(datas[0]) - 1 != pre_pos: for pos in range(pre_pos + 1, int(datas[0])): yield AssignValue(pos, 0, strand, strain, track) pre_pos = int(datas[0]) first = True if (int(datas[0]) - 1 == pre_pos) or (first): pre_pos = int(datas[0]) first = False yield AssignValue(datas[0], datas[1], strand, strain, track) class AssignValue(object): def __init__(self, pos, coverage, strand, strain, track): self.pos = int(pos) if strand == "+": self.coverage = float(coverage) else: if float(coverage) < 0: self.coverage = -1 * float(coverage) else: self.coverage = float(coverage) self.strand = strand self.strain = strain self.track = track def __str__(self): return "{0} {1} {2} {3} {4}".format( self.pos, self.coverage, self.strand, self.strain, self.track)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/parser_wig.py
parser_wig.py
import os def print_track(track_num, svg_out, figure_width): id_num = 3067 x = 2.0744663 y = 131 for track in range(track_num): if (track % 2) == 0: svg_out.write(" <rect\n") svg_out.write(" width=\"{0}\"\n".format(figure_width)) svg_out.write(" height=\"40\"\n") svg_out.write(" x=\"{0}\"\n".format(x)) if track == 0: svg_out.write(" y=\"{0}\"\n".format(y)) else: y = y + 40 svg_out.write(" y=\"{0}\"\n".format(y)) svg_out.write(" id=\"rect{0}\"\n".format(id_num)) svg_out.write(" style=\"opacity:0.25;fill:#37c84f;" "fill-opacity:0.25;fill-rule:evenodd;") svg_out.write("stroke:#000000;stroke-width:1px;" "stroke-linecap:butt;stroke-linejoin:miter;" "stroke-opacity:0.25\" />\n") if (track % 2) == 1: svg_out.write(" <rect\n") svg_out.write(" width=\"{0}\"\n".format(figure_width)) svg_out.write(" height=\"40\"\n") svg_out.write(" x=\"{0}\"\n".format(x)) y = y + 40 svg_out.write(" y=\"{0}\"\n".format(y)) svg_out.write(" id=\"rect{0}\"\n".format(id_num)) svg_out.write(" style=\"opacity:0.25;fill:#c8374f;" "fill-opacity:0.25;fill-rule:evenodd;") svg_out.write("stroke:#000000;stroke-width:1px;" "stroke-linecap:butt;stroke-linejoin:miter;" "stroke-opacity:0.25\" />\n") id_num += 1 def gen_svg(input_png, track_num, figure_height, figure_width): svg_out = open(input_png[:-4] + ".svg", "w") svg_out.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!-- Created with Inkscape (http://www.inkscape.org/) --> <svg xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" """) svg_out.write(" width=\"{0}\"\n".format(figure_width)) svg_out.write(" height=\"{0}\"\n".format(figure_height)) svg_out.write(" viewBox=\"0 0 1860 {0}\"\n".format(figure_height)) svg_out.write(""" id="svg3055"> <metadata id="metadata3061"> <rdf:RDF> <cc:Work rdf:about=""> <dc:format>image/svg+xml</dc:format> <dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> <dc:title></dc:title> </cc:Work> </rdf:RDF> </metadata> <defs id="defs3059" /> <image """) svg_out.write(" xlink:href=\"file://{0}/{1}\"\n".format( os.getcwd(), input_png)) svg_out.write(""" width="100%" height="100%" preserveAspectRatio="xMidYMin meet" id="image3063" />\n""") print_track(track_num, svg_out, figure_width) svg_out.write("</svg>") svg_out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gen_svg.py
gen_svg.py
from annogesiclib.gff3 import Gff3Parser def get_proteins(datas, checks, blast_f, score_n): '''filter and import the protein hit of blast''' proteins = [] nums = {"index": 0, "hypo": 0} for data in datas: if (nums["index"] % 4 == 0) and (nums["index"] != 0): if "[" in data: data1 = data.split("[") data2 = data1[1].split("]") strain = data2[0].strip() else: data1 = data.split("Length") strain = "NA" name = data1[0].strip() tag = datas[nums["index"] - 1] if ("hypothetical" in name) or ( "Hypothetical" in name) or ( "unknown" in name) or ( "Unknown" in name) or ( "predicted coding region" in name) or ( "Predicted coding region" in name) or ( "PREDICTED:" in name) or ( "putative" in name) or ("Putative" in name): nums["hypo"] += 1 if not checks["detect"]: for line in blast_f: line = line.strip() if ("Expect" in line) and ("Score" in line) and ( "Method" in line): e_value = line.split(",")[1].split(" ")[-1] score = line.split("Score = ")[-1].split(" bits ")[0].strip() if score_n is None: checks["detect"] = True checks["print"] = True proteins.append({"name": name, "strain": strain, "e": e_value, "tag": tag, "score": score}) elif (score_n is not None) and (float(score) >= score_n): checks["detect"] = True checks["print"] = True proteins.append({"name": name, "strain": strain, "e": e_value, "tag": tag, "score": score}) break else: if score_n is None: proteins.append({"name": name, "strain": strain, "e": e_value, "tag": tag, "score": score}) elif (score_n is not None) and (float(score) >= score_n): proteins.append({"name": name, "strain": strain, "e": e_value, "tag": tag, "score": score}) nums["index"] += 1 return proteins, nums def detect_hypo(proteins, blasts, type_): '''remove the hit which is hypothetical protein or unknown''' protein_names = {} for protein in proteins: name = protein["name"].replace("\n", "") if ("hypothetical" not in name) and ( "Hypothetical" not in name) and ( "Unknown" not in name) and ( "unknown" not in name) and ( "Predicted coding region" not in name) and ( "predicted coding region" not in name) and ( "PREDICTED:" not in name) and ("putative" not in name) and ( "Putative" not in name): if (name not in protein_names.keys()): protein_names[name] = [] protein_names[name].append(protein["tag"]) if type_ != "equal": blasts["blast"] = True return protein_names, protein["e"], protein["score"] def detect_nr(line, blast_f, out_t, blasts, prefix, score_n): '''detect the hit in nr database''' checks = {"print": False, "detect": False} if line.startswith(">"): info = line.replace(">", "") for line in blast_f: line = line.strip() if len(line) != 0: info = " ".join([info, line]) else: break datas = info.split("|") proteins, nums = get_proteins(datas, checks, blast_f, score_n) if checks["print"]: if blasts["hit_num"] < 3: protein_names, e, score = detect_hypo(proteins, blasts, "low") if len(protein_names) != 0: for key, value in protein_names.items(): out_t.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format( prefix, key, ",".join(value), e, score)) blasts["hit_num"] += 1 if checks["print"]: return 2 else: return 1 def detect_srna(line, blast_f, out_t, blasts, prefix, score_s): '''detect hit in sRNA database''' print_ = False blasts["name"] = "" if line[0] == ">": name_complete = False blasts["name"] = line[1:] blasts["hit_num"] += 1 for line in blast_f: line.strip() if line.startswith("Length="): name_complete = True if not name_complete: blasts["name"] = " ".join([blasts["name"], line]) if "Expect =" in line: e_value = line.split(" ")[-1].strip() score = line.split("Score = ")[-1].split(" bits ")[0].strip() if score_s is None: print_ = True elif (score_s is not None) and (float(score) >= score_s): print_ = True break if print_: blasts["name"] = blasts["name"].lstrip().replace("\n", "") out_t.write("{0}\t{1}\t{2}\t{3}\n".format( prefix, blasts["name"], e_value, score)) blasts["blast"] = True if print_: return 2 else: return 1 def read_gff(srna_file, data_type): srnas = [] srna_f = open(srna_file, "r") for entry in Gff3Parser().entries(srna_f): attributes = {} for key, value in entry.attributes.items(): if (data_type == "sRNA") and ( "sRNA_hit" not in key): attributes[key] = value elif (data_type == "nr") and ( "nr_hit" not in key): attributes[key] = value entry.attributes = attributes attribute_string = ";".join( ["=".join(items) for items in entry.attributes.items()]) entry.info = "\t".join([entry.info_without_attributes, attribute_string]) srnas.append(entry) srnas = sorted(srnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return srnas def print_file(database, out_f, info, srna_hit, nr_hit): if database == "sRNA": out_f.write("{0};sRNA_hit={1}\n".format(info, srna_hit)) elif database == "nr": out_f.write("{0};nr_hit={1}\n".format(info, nr_hit)) def gen_out_flie(blasts, out_t, prefix, out_f, database, srna, names): if not blasts["blast"]: out_t.write("{0}\tNA\n".format(prefix)) print_file(database, out_f, srna.info, "NA", "NA") else: print_file(database, out_f, srna.info, len(names), blasts["hit_num"]) def get_whole_query(line, blast_f): whole_line = line if (not line.endswith("+")) and ( not line.endswith("-")): for line in blast_f: line = line.strip() whole_line = whole_line + line if (line.endswith("+")) or ( line.endswith("-")): return whole_line else: return whole_line def extract_blast(blast_result, srna_file, output_file, output_table, database, score_s, score_n): '''extract the result of blast''' out_f = open(output_file, "w") out_t = open(output_table, "w") out_f.write("##gff-version 3\n") srnas = read_gff(srna_file, database) for srna in srnas: blasts = {"hit_num": 0, "blast": False, "name": ""} names = [] prefix = "\t".join([srna.seq_id, srna.attributes["ID"], srna.strand, str(srna.start), str(srna.end)]) print_ = 0 with open(blast_result, "r") as blast_f: for line in blast_f: line = line.strip() if line.startswith("Query= "): if print_ == 2: print_ = 0 elif print_ == 1: print_ = 0 out_t.write("{0}\tNA\n".format(prefix)) line = get_whole_query(line, blast_f) go_out = False query = line.split("=")[1].strip() if (query == ("|".join([ srna.attributes["ID"], srna.seq_id, str(srna.start), str(srna.end), srna.strand]))): for line in blast_f: line = line.strip() if line.find("No hits found") != -1: print_file(database, out_f, srna.info, "NA", "NA") out_t.write("{0}\tNA\n".format(prefix)) break elif line.find("Sequences producing " "significant alignments:") != -1: for line in blast_f: line = line.strip() if len(line) != 0: if line.startswith( "Effective search space"): go_out = True break if database == "sRNA": p = detect_srna( line, blast_f, out_t, blasts, prefix, score_s) if p: print_ = p if (len(blasts["name"]) > 0): if blasts["name"] not in names: names.append( blasts["name"]) elif database == "nr": p = detect_nr( line, blast_f, out_t, blasts, prefix, score_n) if p: print_ = p gen_out_flie(blasts, out_t, prefix, out_f, database, srna, names) blasts["hit_num"] = 0 break if go_out: break out_f.close() out_t.close() def extract_energy(srna_file, sec_file, out_file): '''extract the folding energy of sRNA''' s_f = open(srna_file, "r") check = False get_length = False out = open(out_file, "w") out.write("##gff-version 3\n") for srna in Gff3Parser().entries(s_f): with open(sec_file, "r") as d_f: for structure in d_f: structure = structure.rstrip('\n') if get_length: length = len(structure) get_length = False if (structure.startswith(">")): if (("|".join([srna.attributes["ID"], srna.seq_id, str(srna.start), str(srna.end), srna.strand])) == structure[1:]) or ( ("|".join([srna.feature, srna.seq_id, str(srna.start), str(srna.end), srna.strand])) == structure[1:]): check = True get_length = True if (check) and ( (structure[0] == "(") or ( structure[0] == ")") or ( structure[0] == ".")) and ( structure[-1] == ")"): check = False data = structure.split(" ") if (data[-1].find("(") == -1): energy = float(data[-1][0:-1]) else: energy = float(data[-1][1:-1]) out.write("{0};2d_energy={1:.4f}\n".format( srna.info, (energy / float(length)))) break s_f.close() out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/extract_sRNA_info.py
extract_sRNA_info.py
import sys import os, gc import numpy as np from annogesiclib.parser_wig import WigParser def read_libs(input_libs, wig_folder): libs = [] texs = {} for lib in input_libs: datas = lib.split(":") name = None for wig in os.listdir(wig_folder): if wig == datas[0]: with open(os.path.join(wig_folder, wig), "r") as w_h: for line in w_h: line = line.strip() if line.startswith("track"): name = line.split("=")[-1].replace("\"", "") break if name is None: print("Error: The {0} can not be found in lib names!!!".format(wig)) if (datas[1] == "tex") or (datas[1] == "notex"): cond = "texnotex" else: cond = datas[1] libs.append({"name": name, "type": datas[1], "cond": "_".join([datas[2], cond]), "rep": datas[3], "strand": datas[4]}) for lib1 in libs: if lib1["type"] == "frag": pass elif (lib1["type"] == "tex") or (lib1["type"] == "notex"): prefix1 = lib1["cond"].split("_")[0] for lib2 in libs: prefix2 = lib2["cond"].split("_")[0] if (prefix1 == prefix2) and \ (lib1["rep"] == lib2["rep"]) and \ (lib1["type"] == "tex") and \ (lib2["type"] == "notex") and \ (lib1["strand"] == lib2["strand"]): texs[lib1["name"] + "@AND@" + lib2["name"]] = 0 else: print("Error: Wrong library types are detected, " "please assign frag, tex or notex.") sys.exit() return libs, texs def read_wig(filename, strand, libs): wig_parser = WigParser() wigs = {} if filename is not False: wig_fh = open(filename) for entry in wig_parser.parser(wig_fh, strand): if entry.strain not in wigs.keys(): wigs[entry.strain] = {} for lib in libs: if lib["cond"] not in wigs[entry.strain]: wigs[entry.strain][lib["cond"]] = {} for lib in libs: if (lib["name"] == entry.track) and ( lib["strand"] == entry.strand): lib_name = "|".join([ entry.track, entry.strand, lib["type"]]) if lib_name not in wigs[entry.strain][lib["cond"]].keys(): wigs[entry.strain][lib["cond"]][lib_name] = [] wigs[entry.strain][lib["cond"]][lib_name].append(entry.coverage) wig_fh.close() for strain, conds in wigs.items(): for cond, lib_names in conds.items(): for lib_name, cover_list in lib_names.items(): wigs[strain][cond][lib_name] = np.array( wigs[strain][cond][lib_name]) return wigs
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/lib_reader.py
lib_reader.py
import os import sys import csv import shutil from glob import glob from subprocess import call from annogesiclib.multiparser import Multiparser from annogesiclib.seq_editer import SeqEditer from annogesiclib.transcript_SNP import snp_detect from annogesiclib.helper import Helper class SNPCalling(object): '''detection of SNP''' def __init__(self, args_snp): self.multiparser = Multiparser() self.seq_editer = SeqEditer() self.helper = Helper() if args_snp.types == "related_genome": file_type = "compare_related_and_reference_genomes" else: file_type = "mutations_of_reference_genomes" self.seq_path = os.path.join(args_snp.out_folder, file_type, "seqs") self.stat_path = os.path.join(args_snp.out_folder, file_type, "statistics") self.fig_path = os.path.join(self.stat_path, "figs") self.helper.check_make_folder(self.fig_path) self.outputs = {"table": os.path.join( args_snp.out_folder, file_type, "SNP_tables"), "raw": os.path.join( args_snp.out_folder, file_type, "SNP_raw_outputs"), "tmp": os.path.join(args_snp.out_folder, "tmp_bcf"), "depth": os.path.join(args_snp.out_folder, "tmp_depth")} self.bams = {"whole": os.path.join(args_snp.out_folder, "whole_reads.bam"), "sort": os.path.join(args_snp.out_folder, "whole_reads_sorted.bam"), "bams": []} self.header = os.path.join(args_snp.out_folder, "header") self.baqs = {"with": "with_BAQ", "without": "without_BAQ", "extend": "extend_BAQ"} def _transcript_snp(self, fasta, out_table_prefix, type_, prefix, bam_datas, table_path, args_snp): seq_path = os.path.join(self.seq_path, self.baqs[type_], prefix) for bam in bam_datas: stat_prefix = os.path.join(self.stat_path, "_".join([ "stat", "_".join([prefix, self.baqs[type_], bam["sample"]]), "SNP"])) snp_file = os.path.join(self.outputs["raw"], prefix, "_".join( [prefix, self.baqs[type_], bam["sample"] + ".vcf"])) snp_detect( fasta, snp_file, self.outputs["depth"] + bam["sample"], "_".join([out_table_prefix, bam["sample"]]), os.path.join(seq_path, "_".join([prefix, bam["sample"]])), bam["bam_number"], stat_prefix, args_snp, bam["rep"]) self.helper.move_all_content(table_path, self.fig_path, [".png"]) def _get_para(self, args_snp): if args_snp.caller == "c": bcf_para = "-vcO" else: bcf_para = "-vmO" return bcf_para def _run_tools(self, fasta_file, type_, args_snp, bam_datas, log): bcf_para = self._get_para(args_snp) for bam in bam_datas: bam_file = os.path.join(args_snp.out_folder, bam["sample"] + ".bam") if type_ == "with": command = [args_snp.samtools_path, "mpileup", "-t", "DP"] elif type_ == "without": command = [args_snp.samtools_path, "mpileup", "-t", "DP", "-B"] elif type_ == "extend": command = [args_snp.samtools_path, "mpileup", "-t", "DP", "-E"] if args_snp.rg: command = command + ["-ugf", fasta_file, bam_file] else: command = command + ["--ignore-RG", "-ugf", fasta_file, bam_file] log.write(" ".join(command) + ">" + self.outputs["tmp"] + "\n") os.system(" ".join(command) + ">" + self.outputs["tmp"]) bam["vcf"] = os.path.join(self.outputs["raw"], "_".join( [self.baqs[type_], bam["sample"] + ".vcf"])) if args_snp.chrom == "1": log.write(" ".join([ args_snp.bcftools_path, "call", "--ploidy", args_snp.chrom, self.outputs["tmp"], bcf_para, "v", "-o", bam["vcf"]]) + "\n") call([args_snp.bcftools_path, "call", "--ploidy", args_snp.chrom, self.outputs["tmp"], bcf_para, "v", "-o", bam["vcf"]]) elif args_snp.chrom == "2": log.write(" ".join([args_snp.bcftools_path, "call", self.outputs["tmp"], bcf_para, "v", "-o", bam["vcf"]]) + "\n") call([args_snp.bcftools_path, "call", self.outputs["tmp"], bcf_para, "v", "-o", bam["vcf"]]) log.write("Done!\n") log.write("The following files are generated:\n") for file_ in os.listdir(self.outputs["raw"]): log.write("\t" + os.path.join(self.outputs["raw"], file_) + "\n") def _parse_vcf_by_fa(self, args_snp, type_, num_prog, log): seq_names = [] fa_prefixs = [] log.write("Parsing Vcf files by comparing fasta information.\n") for fa in os.listdir(args_snp.fastas): if (fa != "all.fa") and (not fa.endswith(".fai")): with open(os.path.join(args_snp.fastas, fa)) as fh: for line in fh: line = line.strip() if line.startswith(">"): seq_names.append(line[1:]) fa_prefix = ".".join(fa.split(".")[:-1]) fa_prefixs.append(fa_prefix) vcf_folder = os.path.join( self.outputs["raw"], fa_prefix) if num_prog == 0: self.helper.check_make_folder(vcf_folder) self.helper.check_make_folder(os.path.join( self.outputs["table"], fa_prefix)) self.helper.check_make_folder( os.path.join(self.seq_path, self.baqs[type_], fa_prefix)) for vcf in os.listdir(self.outputs["raw"]): if vcf.endswith(".vcf"): out = open(os.path.join(vcf_folder, "_".join( [fa_prefix, vcf])), "w") with open(os.path.join(self.outputs["raw"], vcf)) as vh: for line in vh: line = line.strip() if line.startswith("#"): out.write(line + "\n") else: if line.split("\t")[0] in seq_names: out.write(line + "\n") out.close() log.write("\t" + os.path.join(vcf_folder, "_".join( [fa_prefix, vcf])) + " is generated.\n") for vcf in os.listdir(self.outputs["raw"]): if vcf.endswith(".vcf"): os.remove(os.path.join(self.outputs["raw"], vcf)) return fa_prefixs def _run_sub(self, args_snp, all_fasta, type_, bam_datas, num_prog, log): self._run_tools(all_fasta, type_, args_snp, bam_datas, log) fa_prefixs = self._parse_vcf_by_fa(args_snp, type_, num_prog, log) log.write("Running transcript_SNP.py to do statistics, filter SNPs, " "and generate potential sequences.\n") log.write("The following files are generated:\n") for fa_prefix in fa_prefixs: for fasta in os.listdir(args_snp.fastas): if fa_prefix in fasta: fasta_file = os.path.join(args_snp.fastas, fasta) table_path = os.path.join(self.outputs["table"], fa_prefix) table_prefix = os.path.join(table_path, "_".join( [fa_prefix, self.baqs[type_]])) self._transcript_snp( fasta_file, table_prefix, type_, fa_prefix, bam_datas, table_path, args_snp) seq_path = os.path.join(self.seq_path, self.baqs[type_], fa_prefix) for folder in (table_path, self.stat_path, seq_path, self.fig_path): for file_ in os.listdir(folder): if os.path.isfile(os.path.join(folder, file_)): log.write("\t" + os.path.join(folder, file_) + "\n") def _run_program(self, all_fasta, bam_datas, args_snp, log): num_prog = 0 log.write("Running Samtools to mpileup, and using Bcftools to " "call snp.\n") log.write("Please make sure the version of Samtools and Bcftools " "are both at least 1.3.1.\n") for index in args_snp.program: if index == "with_BAQ": type_ = "with" print("Running SNP calling with BAQ") log.write("Running SNP calling with BAQ.\n") elif index == "without_BAQ": type_ = "without" print("Running SNP calling without BAQ") log.write("Running SNP calling without BAQ.\n") elif index == "extend_BAQ": print("Running SNP calling extend BAQ") log.write("Running SNP calling extend BAQ.\n") type_ = "extend" else: print("Error: No correct program, please assign " "\"with_BAQ\", \"without_BAQ\", \"extend_BAQ\"!") log.write("No valid program can be found, please assign" "\"with_BAQ\", \"without_BAQ\", \"extend_BAQ\".\n") sys.exit() self._run_sub(args_snp, all_fasta, type_, bam_datas, num_prog, log) num_prog += 1 def _run_bam(self, samtools_path, sub_command, bam_file, type_file, log): if sub_command == "merge": command = (" ".join([samtools_path, sub_command, self.bams["whole"], bam_file])) elif sub_command == "sort": if type_file == "all": command = (" ".join([samtools_path, sub_command, "-o", bam_file, self.bams["whole"]])) else: command = (" ".join([samtools_path, sub_command, "-o", bam_file, type_file])) log.write(command + "\n") os.system(command) def _merge_bams(self, args_snp, bam_datas, log): bams = [] num_normal = 0 num_frag = 0 log.write("Using Samtools to merge and sort BAM files.\n") log.write("Please make sure the version of Samtools is at least 1.3.1.\n") for bam in bam_datas: bam["bam_number"] = 0 out_bam = os.path.join(args_snp.out_folder, bam["sample"] + ".bam") if len(bam["bams"]) == 1: print("Sorting BAM files of " + bam["sample"]) self._run_bam( args_snp.samtools_path, "sort", out_bam, bam["bams"][0], log) bam["bam_number"] = 1 else: print("Merging BAM files of " + bam["sample"]) self._run_bam(args_snp.samtools_path, "merge", " ".join(bam["bams"]), "all", log) print("Sorting BAM files of " + bam["sample"]) self._run_bam( args_snp.samtools_path, "sort", out_bam, "all", log) bam["bam_number"] += 1 if os.path.exists(self.bams["whole"]): os.remove(self.bams["whole"]) out_depth = open(self.outputs["depth"] + bam["sample"], "w") log.write(" ".join([args_snp.samtools_path, "index", out_bam]) + "\n") call([args_snp.samtools_path, "index", out_bam]) log.write(" ".join([args_snp.samtools_path, "depth", out_bam]) + "\n") call([args_snp.samtools_path, "depth", out_bam], stdout=out_depth) out_depth.close() log.write("Done!\n") log.write("The following files are generated:\n") log.write("\t" + self.bams["whole"] + " is temporary generated " "(be deleted afterward).\n") for file_ in os.listdir(args_snp.out_folder): if os.path.isfile(os.path.join(args_snp.out_folder, file_)): log.write("\t" + os.path.join(args_snp.out_folder, file_) + "\n") def _modify_header(self, fastas): for fasta in os.listdir(fastas): if fasta.endswith("fasta") or \ fasta.endswith("fa") or \ fasta.endswith("fna"): self.seq_editer.modify_header(os.path.join(fastas, fasta)) def _get_header(self, samtools_path, bam, seq_names): command = " ".join([samtools_path, "view", "-H", bam]) os.system(">".join([command, self.header])) fh = open(self.header, "r") for row in csv.reader(fh, delimiter="\t"): if row[0] == "@SQ": if row[1].split(":")[1] not in seq_names: seq_names.append(row[1].split(":")[1]) fh.close() def _get_genome_name(self, args_snp, bam_datas): seq_names = [] for bam in bam_datas: bam_file = os.path.join(args_snp.out_folder, bam["sample"] + ".bam") self._get_header(args_snp.samtools_path, bam_file, seq_names) return seq_names def _remove_bams(self, bam_datas, args_snp): for bam in bam_datas: bam_file = os.path.join(args_snp.out_folder, bam["sample"] + ".bam") if os.path.exists(bam_file): os.remove(bam_file) if os.path.exists(bam_file + ".bai"): os.remove(bam_file + ".bai") if os.path.exists(self.header): os.remove(self.header) os.remove(self.outputs["depth"] + bam["sample"]) def _extract_bams(self, bams, log): bam_datas = [] for bam in bams: datas = bam.split(":") if len(datas) != 2: log.write("the format of --bam_files is wrong!\n") print("Error: the format of --bam_files is wrong!") sys.exit() for file_ in datas[-1].split(","): if not os.path.exists(file_): print("Error: there are some Bam files " "which do not exist!") log.write(file_ + " is not found.\n") sys.exit() bam_datas.append({"sample": datas[0], "rep": len(datas[-1].split(",")), "bams": datas[-1].split(",")}) return bam_datas def _merge_fasta(self, fastas, log): all_fasta = os.path.join(fastas, "all.fa") names = [] out = open(all_fasta, "w") print_ = False for fasta in os.listdir(fastas): if (fasta.endswith(".fa")) or ( fasta.endswith(".fasta")) or ( fasta.endswith(".fna")): with open(os.path.join(fastas, fasta)) as fh: for line in fh: line = line.strip() if line.startswith(">"): if line not in names: print_ = True names.append(line) else: print_ = False if print_: out.write(line + "\n") log.write(os.path.join(fastas, fasta) + " is loaded.\n") out.close() return all_fasta def run_snp_calling(self, args_snp, log): self._modify_header(args_snp.fastas) all_fasta = self._merge_fasta(args_snp.fastas, log) bam_datas = self._extract_bams(args_snp.bams, log) self._merge_bams(args_snp, bam_datas, log) if ("with_BAQ" not in args_snp.program) and ( "without_BAQ" not in args_snp.program) and ( "extend_BAQ" not in args_snp.program): print("Error: Please assign a correct programs: " "\"with_BAQ\", \"without_BAQ\", \"extend_BAQ\".") sys.exit() else: print("Detecting mutations now") self._run_program(all_fasta, bam_datas, args_snp, log) os.remove(self.outputs["tmp"]) os.remove(all_fasta) os.remove(all_fasta + ".fai") self.helper.remove_tmp_dir(args_snp.fastas) self._remove_bams(bam_datas, args_snp) log.write("Remove all the temporary files.\n")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/snp.py
snp.py
from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def get_type(inter, gffs): '''get the types of sORF''' utr5 = False utr3 = False anti = False for gff in gffs: if (gff.seq_id == inter["strain"]) and \ (gff.strand == inter["strand"]): if gff.strand == "+": if inter["end"] + 1 == gff.start: utr5 = True if inter["start"] - 1 == gff.end: utr3 = True else: if inter["end"] + 1 == gff.start: utr3 = True if inter["start"] - 1 == gff.end: utr5 = True elif (gff.seq_id == inter["strain"]) and \ (gff.strand != inter["strand"]): if ((inter["start"] <= gff.start) and ( inter["end"] >= gff.end)) or ( (inter["start"] >= gff.start) and ( inter["end"] <= gff.end)) or ( (inter["start"] <= gff.start) and ( inter["end"] <= gff.end) and ( inter["end"] >= gff.start)) or ( (inter["start"] >= gff.start) and ( inter["start"] <= gff.end) and ( inter["end"] >= gff.end)): anti = True if utr3 and utr5: inter["source"] = "interCDS" elif utr3: inter["source"] = "3utr" elif utr5: inter["source"] = "5utr" elif anti: inter["source"] = "antisense" else: inter["source"] = "intergenic" def read_gff(gff_file, tran_file, hypo): trans = [] gffs = [] gh = open(gff_file) for entry in Gff3Parser().entries(gh): if (Helper().feature_without_notgene(entry)) and ( entry.feature != "sORF"): if ("product" in entry.attributes.keys()) and (hypo): if "hypothetical protein" not in entry.attributes["product"]: gffs.append(entry) else: gffs.append(entry) th = open(tran_file) for entry in Gff3Parser().entries(th): trans.append(entry) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) trans = sorted(trans, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) gh.close() th.close() return gffs, trans def compare_tran_cds(trans, gffs): '''compare transcript and CDS to find the intergenic region''' inters = [] for tran in trans: poss = [{"start": tran.start, "end": tran.end}] for pos in poss: exclude = False for gff in gffs: if (tran.seq_id == gff.seq_id) and \ (tran.strand == gff.strand): if (gff.start <= pos["start"]) and \ (gff.end >= pos["start"]) and \ (gff.end < pos["end"]): pos["start"] = gff.end + 1 elif (gff.start > pos["start"]) and \ (gff.start <= pos["end"]) and \ (gff.end >= pos["end"]): pos["end"] = gff.start - 1 elif (gff.start <= pos["start"]) and \ (gff.end >= pos["end"]): exclude = True break elif (gff.start > pos["start"]) and \ (gff.end < pos["end"]): poss.append({"start": gff.end + 1, "end": pos["end"]}) pos["end"] = gff.start - 1 if not exclude: inters.append({"strain": tran.seq_id, "strand": tran.strand, "start": pos["start"], "end": pos["end"]}) return inters def get_intergenic(gff_file, tran_file, out_file, utr_detect, hypo, extend_5, extend_3): gffs, trans = read_gff(gff_file, tran_file, hypo) inters = compare_tran_cds(trans, gffs) num = 0 out = open(out_file, "w") for inter in inters: get_type(inter, gffs) name = '%0*d' % (5, num) if (inter["source"] != "intergenic") and ( inter["source"] != "antisense"): source = "UTR_derived" if utr_detect: attribute_string = ";".join( ["=".join(items) for items in ( ["ID", inter["strain"] + "_sorf" + str(num)], ["Name", "sORF_" + name], ["UTR_type", inter["source"]])]) else: if inter["source"] == "intergenic": source = "intergenic" elif inter["source"] == "antisense": source = "antisense" attribute_string = ";".join( ["=".join(items) for items in ( ["ID", inter["strain"] + "_sorf" + str(num)], ["Name", "sORF_" + name])]) if ((source == "UTR_derived") and (utr_detect)) or ( source == "intergenic") or (source == "antisense"): if inter["strand"] == "+": start = inter["start"] - extend_5 end = inter["end"] + extend_3 else: start = inter["start"] - extend_3 end = inter["end"] + extend_5 out.write("\t".join([str(field) for field in [ inter["strain"], source, "sORF", str(start), str(end), ".", inter["strand"], ".", attribute_string]]) + "\n") num += 1 out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sORF_intergenic.py
sORF_intergenic.py
from annogesiclib.gff3 import Gff3Parser class FormatFixer(object): '''Fix the format which is not fit with ANNOgesic''' def _read_gff(self, gff_file, genes, datas, strain): gene_num = 0 fh = open(gff_file, "r") for entry in Gff3Parser().entries(fh): entry.seq_id = strain entry.info_without_attributes = "\t".join([ str(field) for field in [ entry.seq_id, entry.source, entry.feature, entry.start, entry.end, entry.score, entry.strand, entry.phase]]) datas.append(entry) if entry.feature == "gene": if "locus_tag" in entry.attributes.keys(): name = entry.attributes["locus_tag"] if "gene" in entry.attributes.keys(): name = entry.attributes["gene"] entry.attribute_string = ";".join(["ID=gene" + str(gene_num), "Name=" + name, entry.attribute_string]) gene_id = "gene" + str(gene_num) entry.attributes["ID"] = gene_id genes.append(entry) gene_num += 1 fh.close() def fix_ratt(self, gff_file, strain, out_file): out = open(out_file, "w") out.write("##gff-version 3\n") nums = {"cds": 0, "rna": 0, "gene": 0} genes = [] datas = [] check_parent = False self._read_gff(gff_file, genes, datas, strain) check_parent = False for data in datas: if data.feature == "gene": data = genes[nums["gene"]] nums["gene"] += 1 elif (data.feature == "rRNA") or \ (data.feature == "tRNA"): name = data.attributes["locus_tag"] data.attribute_string = ";".join([ "ID=rna" + str(nums["rna"]), "Name=" + name, data.attribute_string]) nums["rna"] += 1 elif data.feature == "CDS": if "protein_id" in data.attributes.keys(): name = data.attributes["protein_id"] for gene in genes: if ((gene.start <= data.start) and ( gene.end >= data.end)) or ( gene.attributes["locus_tag"] == data.attributes["locus_tag"]): data.attribute_string = ";".join([ "ID=cds" + str(nums["cds"]), "Name=" + name, "Parent=" + gene.attributes["ID"], data.attribute_string]) check_parent = True break if check_parent: check_parent = False pass else: data.attribute_string = ";".join([ "ID=cds" + str(nums["cds"]), "Name=" + name, data.attribute_string]) nums["cds"] += 1 if "group" in data.attributes.keys(): ref_f = open(gff_file, "r") for ref in Gff3Parser().entries(ref_f): if "group" in ref.attributes.keys(): if (data.attributes["group"] == ref.attributes["group"]): if (data.strand != ref.strand): data.strand = ref.strand break ref_f.close() out.write("\t".join([data.info_without_attributes, data.attribute_string]) + "\n") out.close() def fix_rnaplex(self, rnaplex_file, out_file): out = open(out_file, "w") with open(rnaplex_file, "r") as f_h: for line in f_h: line = line.strip() if line != ("Error during initialization of " "the duplex in duplexfold_XS"): out.write(line + "\n") out.close() def fix_emboss(self, input_file, out_file): out = open(out_file, "w") with open(input_file, "r") as f_h: for line in f_h: line = line.strip() if line.startswith(">"): out.write(line[:-2] + "\n") else: out.write(line + "\n") out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/format_fixer.py
format_fixer.py
import math from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def import_candidate(cands, term_features, strain, start, end, ut, name, total_length, strand, parent_p, parent_m, p_pos, m_pos): cands.append({"strain": strain, "start": start, "end": end, "print": False, "ut": ut, "name": name, "miss": term_features["real_miss"], "loop": term_features["loop"], "length": total_length, "r_stem": term_features["r_stem"], "strand": strand, "l_stem": term_features["l_stem"], "parent_p": parent_p, "parent_m": parent_m, "detect_p": False, "detect_m": False, "p_pos": p_pos, "m_pos": m_pos}) def get_feature(gene): if "Name" in gene.attributes.keys(): feature = gene.attributes["Name"] elif "locus_tag" in gene.attributes.keys(): feature = gene.attributes["locus_tag"] else: strand = Helper().get_strand_name(gene.strand) feature = "".join([gene.feature, ":", str(gene.start), "-", str(gene.end), "_", strand]) return feature def check_miss(cand1, cand2, cutoff_miss): stem_len = (cand2["r_stem"] + cand2["l_stem"] - cand2["miss"]) if (float(cand2["miss"]) / float(stem_len)) <= cutoff_miss: cand1["miss"] = cand2["miss"] cand1["r_stem"] = cand2["r_stem"] cand1["l_stem"] = cand2["l_stem"] def filter_term(cands, terms, miss_rate): '''remove the low possibilty terminator''' cutoff_miss = miss_rate for cand1 in cands: stem_len = (cand1["r_stem"] + cand1["l_stem"] - cand1["miss"]) if (not cand1["print"]) and \ ((float(cand1["miss"]) / float(stem_len)) <= cutoff_miss): tmp_term = cand1.copy() for cand2 in cands: if (tmp_term["strain"] == cand2["strain"]) and ( tmp_term["miss"] >= cand2["miss"]): if (tmp_term["start"] >= cand2["start"]) and ( tmp_term["start"] < cand2["end"]) and ( tmp_term["end"] > cand2["end"]): tmp_term["start"] = cand2["start"] check_miss(tmp_term, cand2, cutoff_miss) cand2["print"] = True elif (cand2["start"] > tmp_term["start"]) and ( cand2["start"] < tmp_term["end"]) and ( cand2["end"] >= tmp_term["end"]): tmp_term["end"] = cand2["end"] check_miss(tmp_term, cand2, cutoff_miss) cand2["print"] = True elif (tmp_term["start"] >= cand2["start"]) and ( tmp_term["end"] <= cand2["end"]): tmp_term["start"] = cand2["start"] tmp_term["end"] = cand2["end"] check_miss(tmp_term, cand2, cutoff_miss) cand2["print"] = True elif (cand2["start"] >= tmp_term["start"]) and ( cand2["end"] <= tmp_term["end"]): cand2["print"] = True check_miss(tmp_term, cand2, cutoff_miss) terms.append(tmp_term) def check_sec(sec, nts): '''check the criteria of sec str of terminator''' term_features = {"st_pos": 0, "rights": 0, "lefts": 0, "tmp_miss": 0, "real_miss": 0, "loop": 0, "r_stem": 0, "l_stem": 0} detects = {"detect_r": False, "detect_l": False, "conflict": False} for s_t in reversed(sec[0:nts]): term_features["st_pos"] += 1 if s_t == ")": if not detects["detect_l"]: term_features["rights"] += 1 term_features["real_miss"] = term_features["tmp_miss"] detects["detect_r"] = True else: detects["conflict"] = True break elif s_t == ".": if detects["detect_r"]: term_features["tmp_miss"] += 1 elif s_t == "(": term_features["lefts"] += 1 if not detects["detect_l"]: term_features["loop"] = ( term_features["tmp_miss"] - term_features["real_miss"]) term_features["tmp_miss"] = term_features["real_miss"] term_features["r_stem"] = ( term_features["rights"] + term_features["real_miss"]) else: term_features["real_miss"] = term_features["tmp_miss"] detects["detect_l"] = True if term_features["lefts"] == term_features["rights"]: break return term_features, detects def check_u(seq, num_sec, nts, args_term): if (len(seq) - num_sec) >= args_term.at_tail: tmp_seq_num = 0 for nt in (seq[(len(seq) - num_sec):]): nts["seq_num"] += 1 if (nt == "U") or (nt == "T"): nts["ut"] += 1 tmp_seq_num = nts["seq_num"] else: nts["no_ut"] += 1 if nts["no_ut"] > args_term.mut_u: break nts["no_ut"] = nts["no_ut"] - (nts["seq_num"] - tmp_seq_num) nts["seq_num"] = tmp_seq_num - 1 def detect_candidates(seq, sec, name, strain, start, end, parent_p, parent_m, strand, args_term, p_pos, m_pos): '''check the criteria of sec str of terminator''' term_len = 2 * args_term.max_stem + 2 * ( args_term.max_stem * args_term.miss_rate) + args_term.max_loop cands = [] nts = {"ut": 0, "no_ut": 0, "seq_num": 0, "detect": False} num_sec = 0 for st in reversed(sec): if (st == "(") or (not nts["detect"]): nts = {"ut": 0, "no_ut": 0, "seq_num": 0, "detect": False} if (st == ")") and (not nts["detect"]): check_u(seq, num_sec, nts, args_term) if nts["ut"] >= args_term.at_tail: stop = len(seq) - num_sec + nts["seq_num"] if stop > 10: term_features = {"st_pos": 0, "rights": 0, "lefts": 0, "tmp_miss": 0, "real_miss": 0, "loop": 0, "r_stem": 0, "l_stem": 0} detects = {"detect_r": False, "detect_l": False, "conflict": False} term_features, detects = check_sec(sec, stop + 1) if detects["conflict"] is False: total_length = term_features["st_pos"] - nts["seq_num"] term_features["l_stem"] = ( total_length - term_features["r_stem"] - term_features["loop"]) if (total_length <= term_len) and ( term_features["loop"] <= args_term.max_loop) and ( term_features["loop"] >= args_term.min_loop) and ( ((term_features["r_stem"] + term_features["l_stem"] - term_features["real_miss"]) / 2) >= args_term.min_stem) and ( ((term_features["r_stem"] + term_features["l_stem"] - term_features["real_miss"]) / 2) <= args_term.max_stem): nts["detect"] = True if strand == "+": import_candidate( cands, term_features, strain, start + (len(sec[0:stop + 1]) - term_features["st_pos"]) - 1, start + stop, nts["ut"], name, total_length, strand, parent_p, parent_m, p_pos, m_pos) else: import_candidate( cands, term_features, strain, end - (stop), end - (len(sec[0:stop + 1]) - term_features["st_pos"]) + 1, nts["ut"], name, total_length, strand, parent_p, parent_m, p_pos, m_pos) num_sec += 1 return cands def check_parent(genes, term, detects, strand, fuzzy_up, fuzzy_down, type_): tmp = None for gene in genes: if (term["strain"] == gene.seq_id) and ( gene.strand == strand): if type_ == "parent_p": if ((term["start"] - fuzzy_down) <= gene.end) and ( term["start"] >= gene.end): detects[type_] = True tmp = get_feature(gene) elif ((gene.end - term["end"]) <= fuzzy_up) and ( (gene.end - term["end"]) >= 0): detects[type_] = True tmp = get_feature(gene) elif ((gene.end - term["start"]) > fuzzy_up) and ( (gene.end - term["start"]) >= 0): break elif type_ == "parent_m": if ((term["end"] + fuzzy_down) >= gene.start) and ( term["end"] <= gene.start): detects[type_] = True tmp = get_feature(gene) elif ((term["start"] - gene.start) <= fuzzy_up) and ( (term["start"] - gene.start) >= 0): detects[type_] = True tmp = get_feature(gene) elif (gene.start - term["end"] > fuzzy_down): break return tmp def parents(terms, genes, args_term): '''assign the associated gene to terminator''' for term in terms: detects = {"parent_p": False, "parent_m": False} if "tran" in term["parent_p"]: tmp_p = check_parent(genes, term, detects, "+", args_term.fuzzy_up_gene, args_term.fuzzy_down_gene, "parent_p") pos = term["p_pos"].split("-")[-1] if ((term["start"] - int(pos) <= args_term.fuzzy_down_ta) and ( term["start"] - int(pos) >= 0)) or ( (int(pos) - term["end"] <= args_term.fuzzy_up_ta) and ( int(pos) - term["end"] >= 0)): pass else: term["parent_p"] = "" if "tran" in term["parent_m"]: tmp_m = check_parent(genes, term, detects, "-", args_term.fuzzy_up_gene, args_term.fuzzy_down_gene, "parent_m") pos = term["m_pos"].split("-")[0] if ((int(pos) - term["end"] <= args_term.fuzzy_down_ta) and ( int(pos) - term["end"] >= 0)) or ( (term["start"] - int(pos) <= args_term.fuzzy_up_ta) and ( term["start"] - int(pos) >= 0)): pass else: term["parent_m"] = "" if detects["parent_p"]: if len(term["parent_p"]) == 0: term["parent_p"] = tmp_p else: term["parent_p"] = ",".join([term["parent_p"], tmp_p]) if detects["parent_m"]: if len(term["parent_m"]) == 0: term["parent_m"] = tmp_m else: term["parent_m"] = ",".join([term["parent_m"], tmp_m]) def read_gff(seq_file, gff_file, tran_file): genome = {} genes = [] trans = [] for entry in Gff3Parser().entries(open(gff_file)): if (entry.feature == "gene"): genes.append(entry) for entry in Gff3Parser().entries(open(tran_file)): trans.append(entry) with open(seq_file, "r") as q_h: for line in q_h: line = line.strip() if line.startswith(">"): strain = line[1:] genome[strain] = "" else: genome[strain] = genome[strain] + line genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) trans = sorted(trans, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return genes, genome, trans def compare_anno(gffs, cands, fuzzy_up, fuzzy_down): '''compare the terminator with CDS''' detect = False new_cands = [] for cand in cands: for gff in gffs: if (gff.seq_id == cand["strain"]) and ( gff.strand == cand["strand"]): if cand["strand"] == "+": if (gff.start <= cand["start"]) and ( gff.end >= cand["start"]) and ( gff.end <= cand["end"]): detect = True break elif (math.fabs(gff.end - cand["end"]) <= fuzzy_up) and ( gff.end >= cand["end"]): detect = True break elif (math.fabs(gff.end - cand["start"]) <= fuzzy_down) and (gff.end <= cand["start"]): detect = True break else: if (gff.start >= cand["start"]) and ( gff.start <= cand["end"]) and ( gff.end >= cand["end"]): detect = True break elif (math.fabs(gff.start - cand["end"]) <= fuzzy_down) and (gff.start >= cand["end"]): detect = True break elif (math.fabs(gff.start - cand["start"]) <= fuzzy_up) and (cand["start"] >= gff.start): detect = True break if detect: detect = False new_cands.append(cand) return new_cands def merge_cands(new_cands_gene, new_cands_ta): new_cands = [] for cand_gene in new_cands_gene: new_cands.append(cand_gene) for cand_ta in new_cands_ta: if cand_ta not in new_cands: new_cands.append(cand_ta) new_cands = sorted(new_cands, key=lambda k: (k["strain"], k["start"], k["end"], k["strand"])) return new_cands def get_seq_sec(s_h, sec_seq): '''extract the secondary structure information''' for line in s_h: line = line.strip() if ("(" in line) or ("." in line) or (")" in line): line = line.split(" ") sec_seq["sec"] = line[0] break else: sec_seq["seq"] = line def print_term(terms, out): for term in terms: print_ = False if (term["strand"] == "+") and (len(term["parent_p"]) != 0): print_ = True elif (term["strand"] == "-") and (len(term["parent_m"]) != 0): print_ = True if print_: out.write("\t".join([term["strain"], str(term["start"]), str(term["end"]), term["name"], str(term["miss"]), str(term["loop"]), str(term["length"]), str(term["r_stem"]), term["strand"], str(term["l_stem"]), term["parent_p"], term["parent_m"], str(term["ut"])]) + "\n") def poly_t(seq_file, sec_file, gff_file, tran_file, out_file, args_term): '''detect the sec str of terminator''' terms = [] genes, genome, trans = read_gff(seq_file, gff_file, tran_file) out = open(out_file, "w") with open(sec_file, "r") as s_h: for line in s_h: line = line.strip() if line.startswith(">"): line = line[1:] name = line.split("|")[0] start = int(line.split("|")[1]) end = int(line.split("|")[2]) strain = line.split("|")[3] parent_p = line.split("|")[4] parent_m = line.split("|")[5] p_pos = line.split("|")[6] m_pos = line.split("|")[7] strand = line.split("|")[-1] sec_seq = {"sec": "", "seq": ""} get_seq_sec(s_h, sec_seq) if len(sec_seq["seq"]) <= 6: continue else: cands = detect_candidates( sec_seq["seq"], sec_seq["sec"], name, strain, start, end, parent_p, parent_m, strand, args_term, p_pos, m_pos) cands = sorted(cands, key=lambda x: (x["miss"], x["start"])) new_cands_gene = compare_anno(genes, cands, args_term.fuzzy_up_gene, args_term.fuzzy_down_gene) new_cands_ta = compare_anno(trans, cands, args_term.fuzzy_up_ta, args_term.fuzzy_down_ta) new_cands = merge_cands(new_cands_gene, new_cands_ta) filter_term(new_cands, terms, args_term.miss_rate) parents(terms, genes, args_term) print_term(terms, out)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/get_polyT.py
get_polyT.py
import os from subprocess import call from annogesiclib.gen_svg import gen_svg from annogesiclib.helper import Helper class ColorPNG(object): def _convert_svg(self, imagemagick_path, out_path, screenshot, svg_file, log): call([imagemagick_path, os.path.join(out_path, screenshot), os.path.join(out_path, svg_file)]) log.write("\t" + " ".join([imagemagick_path, os.path.join(out_path, screenshot), os.path.join(out_path, svg_file)]) + "\n") def _convert_png(self, imagemagick_path, out_path, screenshot, png_file, log): call([imagemagick_path, "-background", "none", os.path.join(out_path, screenshot), os.path.join(out_path, png_file)]) log.write("\t" + " ".join([imagemagick_path, "-background", "none", os.path.join(out_path, screenshot), os.path.join(out_path, png_file)]) + "\n") def generate_color_png(self, track_num, out_folder, imagemagick_path, log): '''generation of color png based on tracks''' out_folder = os.path.join(out_folder, "screenshots") for strain in os.listdir(out_folder): if os.path.isdir(os.path.join(out_folder, strain)): for strand in ["forward", "reverse"]: print("Running for {0}_{1}".format(strain, strand)) out_path = os.path.join(out_folder, strain, strand) # convert original png to svg and give color on it. log.write("Converting png file in {0} to svg.\n".format( out_path)) log.write("Colorizing svg files.\n" "Make sure the version of ImageMagick is " "at least 6.9.0-0.\n") for screenshot in os.listdir(out_path): if screenshot.endswith(".png"): print("Converting {0} to svg files and " "Painting tracks now".format( screenshot)) svg_file = screenshot.replace(".png", ".svg") self._convert_svg(imagemagick_path, out_path, screenshot, svg_file, log) with open(os.path.join( out_path, svg_file), "r") as f_h: for line in f_h: line = line.strip() if line.startswith("<svg"): line = line.split(" ") height = line[-1].split("=")[-1][1:-2] width = line[1].split("=")[-1][1:-1] break gen_svg(os.path.join(out_path, screenshot), track_num, height, width) log.write("All colorization for {0} is done.\n".format(out_path)) # convert to png file again log.write("Converting svg file in {0} to png.\n".format( out_path)) for screenshot in os.listdir(out_path): if screenshot.endswith(".svg"): print("Converting {0} to png files now...".format( screenshot)) png_file = screenshot.replace(".svg", ".png") self._convert_png(imagemagick_path, out_path, screenshot, png_file, log) Helper().remove_all_content(out_path, ".svg", "file") log.write("All conversion for {0} is done.\n".format(out_path))
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/color_png.py
color_png.py
import csv from annogesiclib.gff3 import Gff3Parser def read_file(seq_file, seqs): with open(seq_file, "r") as s_h: for line in s_h: line = line.strip() if line.startswith(">"): header = line[1:] else: seqs.append({"name": header, "seq": line}) def import_ribo(line, ribos, seq_name): if line.startswith("("): num = 0 datas = line.split(" ") for data in datas: if len(data) != 0: num += 1 if (num == 2): detect = data elif (num == 3): e = float(data) elif (num == 4): score = float(data) elif (num == 6): name = data elif (num == 7): start = int(data) elif (num == 8): end = int(data) elif (num == 9): if start > end: tmp_start = start start = end end = tmp_start ribos.append({"name": name, "detect": detect, "e": e, "score": score, "seq_name": seq_name, "start": start, "end": end}) def print_file(ribos, out_t, out_s, seq_name, seqs): if len(ribos) != 0: for rbs in ribos: if rbs["detect"] == "!": out_t.write("\t".join( seq_name.split("|") + [ rbs["name"], str(rbs["e"]), str(rbs["score"]), str(rbs["start"]), str(rbs["end"])]) + "\n") for seq in seqs: if rbs["seq_name"] == seq["name"]: tags = seq["name"].split("|") if (rbs["end"] > (rbs["start"] - 1)): if tags[2] == "+": out_s.write(">" + "|".join([ "|".join(tags[0:-2]), str(int(tags[-2]) + rbs["start"] - 1), str(int(tags[-2]) + rbs["end"] - 1)]) + "\n") else: out_s.write(">" + "|".join([ "|".join(tags[0:-2]), str(int(tags[-1]) - rbs["end"] + 1), str(int(tags[-1]) - rbs["start"] + 1)]) + "\n") out_s.write(seq["seq"][ (rbs["start"] - 1): (rbs["end"])] + "\n") def regenerate_seq(align_file, seq_file, out_table, out_seq): hit = False seqs = [] out_t = open(out_table, "w") out_s = open(out_seq, "w") read_file(seq_file, seqs) with open(align_file, "r") as a_h: for line in a_h: line = line.strip() if line.startswith("#"): continue else: if line.startswith("Query:"): datas = line.split("[")[0] ribos = [] seq_name = datas.strip().split(" ")[-1] hit = False elif line.startswith("Hit scores:"): hit = True elif hit: import_ribo(line, ribos, seq_name) if line.startswith("Hit alignments:"): hit = False print_file(ribos, out_t, out_s, seq_name, seqs) out_t.close() out_s.close() def check_cutoff(cutoff): if cutoff.split("_")[0] == "e": return "e" elif cutoff.split("_")[0] == "s": return "score" def compare_first_result(ribos, firsts, seq_name, out, extras, cutoff): if len(ribos) != 0: for rbs in ribos: check = False same = False info = "" if rbs["detect"] == "!": for first in firsts: if first["seq_name"] == "|".join( rbs["seq_name"].split("|")[0:4]): same = True type_ = check_cutoff(cutoff) if (first["acc"] == rbs["name"]) and ( first[type_] > rbs[type_]): first["print"] = True first["e"] = rbs["e"] first["score"] = rbs["score"] out.write("\t".join(seq_name.split("|") + [ rbs["name"], str(rbs["e"]), str(rbs["score"]), str(rbs["start"]), str(rbs["end"])]) + "\n") if len(info) != 0: info = "" elif (first["acc"] != rbs["name"]): info = "\t".join(seq_name.split("|") + [ rbs["name"], str(rbs["e"]), str(rbs["score"]), str(rbs["start"]), str(rbs["end"])]) if len(info) != 0: out.write(info + "\n") if not same: if (len(extras) == 0): extras.append(rbs) else: for extra in extras: if (("|".join( extra["seq_name"].split("|")[0:4])) == ( "|".join( rbs["seq_name"].split("|")[0:4]))): check = True if (extra["name"] == rbs["name"]): type_ = check_cutoff(cutoff) if extra[type_] > rbs[type_]: extra["seq_name"] = rbs["seq_name"] extra["e"] = rbs["e"] extra["score"] = rbs["score"] extra["start"] = rbs["start"] extra["end"] = rbs["end"] else: extras.append(rbs) if not check: extras.append(rbs) def reextract_rbs(align_file, first_file, output_file, cutoff): '''based on the first detection, extract the RBS and run again''' hit = False extras = [] out = open(output_file, "w") f_h = open(first_file, "r") firsts = [] for row in csv.reader(f_h, delimiter="\t"): firsts.append({"seq_name": "|".join(row[0:4]), "acc": row[6], "e": float(row[7]), "score": float(row[8]), "start": int(row[9]), "end": int(row[10]), "print": False, "pre_start": int(row[4]), "pre_end": int(row[5])}) with open(align_file, "r") as a_h: for line in a_h: line = line.strip() if line.startswith("#"): continue else: if line.startswith("Query:"): datas = line.split("[")[0] seq_name = datas.strip().split(" ")[-1] ribos = [] hit = False elif line.startswith("Hit scores:"): hit = True elif hit: import_ribo(line, ribos, seq_name) if line.startswith("Hit alignments:"): hit = False compare_first_result(ribos, firsts, seq_name, out, extras, cutoff) if len(extras) != 0: for extra in extras: out.write("\t".join(extra["seq_name"].split("|") + [ extra["name"], str(extra["e"]), str(extra["score"]), str(extra["start"]), str(extra["end"])]) + "\n") for first in firsts: if not first["print"]: out.write("\t".join(first["seq_name"].split("|") + [ str(first["pre_start"]), str(first["pre_end"]), first["acc"], str(first["e"]), str(first["score"]), str(first["start"]), str(first["end"])]) + "\n") out.close() f_h.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/recompute_RBS.py
recompute_RBS.py
import csv import shutil from annogesiclib.lib_reader import read_libs def import_covers(row): cover_names = [] covers = [] for data in row.split("("): if ")" not in data: cover_names.append(data) else: covers.append(data.split(")")[0]) if len(data.split(");")) == 2: cover_names.append(data.split(");")[-1]) return cover_names, covers def get_lib_name(libs): tracks = [] double_tracks = [] track_list = [] for lib1 in libs: for lib2 in libs: if (lib1["cond"] == lib2["cond"]) and ( lib1["type"] == lib2["type"]) and ( lib1["rep"] == lib2["rep"]) and ( lib1["strand"] != lib2["strand"]): track = "/".join([lib1["name"], lib2["name"]]) if track not in double_tracks: double_tracks.append(track) double_tracks.append("/".join([lib2["name"], lib1["name"]])) tracks.append(track) track_list.append([lib1["name"], lib2["name"]]) return tracks, track_list def reorganize_table(input_libs, wigs, cover_header, table_file): libs, texs = read_libs(input_libs, wigs) fh = open(table_file, "r") first = True headers = [] tracks, track_list = get_lib_name(libs) out = open(table_file + "tmp", "w") for row in csv.reader(fh, delimiter='\t'): if first: detect = False header_num = 0 for header in row: if header == cover_header: index = header_num detect = True header_num += 1 if not detect: headers.append(header) else: detect = False first = False for track in tracks: headers.append("Avg_coverage:" + track) out.write("\t".join(headers) + "\n") else: if len(row) < (index + 1): cover_names = [] covers = [] else: cover_names, covers = import_covers(row[index]) if len(row) == index + 1: row = row[:index] else: row = row[:index] + row[index + 1:] detects = ["Not_detect"] * len(tracks) for name, cover in zip(cover_names, covers): num_track = 0 for track in track_list: if name in track: detects[num_track] = cover num_track += 1 out.write("\t".join(row + detects) + "\n") out.close() shutil.move(table_file + "tmp", table_file)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/reorganize_table.py
reorganize_table.py
import os import shutil import sys import random import csv from subprocess import Popen, STDOUT import math import time from annogesiclib.gff3 import Gff3Parser from annogesiclib.converter import Converter import copy def compute_stat(stat_value, best, best_para, cores, list_num, out_path, indexs, strain): if indexs["change"]: indexs["change"] = False best = stat_value best_para = copy.deepcopy(list_num[-1 * cores + indexs["count"]]) best_out = open(out_path + "/best_" + strain + ".csv", "w") para_line = "_".join(["he", str(best_para["height"]), "rh", str(best_para["re_height"]), "fa", str(best_para["factor"]), "rf", str(best_para["re_factor"]), "bh", str(best_para["base_height"]), "ef", str(best_para["enrichment"]), "pf", str(best_para["processing"])]) best_out.write("{0}\t{1}\tTP={2}\tTP_rate={3}\tFP={4}\tFP_rate={5}\t" "FN={6}\tMissing_ratio={7}\t".format( (indexs["step"] - cores + 1 + indexs["count"]), para_line, best["tp"], best["tp_rate"], best["fp"], best["fp_rate"], best["fn"], best["missing_ratio"])) best_out.close() print(", ".join(["Current genome={0}", "Current Parameters:step={1}", "height={2}", "height_reduction={3}", "factor={4}", "factor_reduction={5}", "base_height={6}", "enrichment_factor={7}", "processing_factor={8}"]).format(strain, indexs["step"] - cores + 1 + indexs["count"], list_num[-1 * cores + indexs["count"]]["height"], list_num[-1 * cores + indexs["count"]]["re_height"], list_num[-1 * cores + indexs["count"]]["factor"], list_num[-1 * cores + indexs["count"]]["re_factor"], list_num[-1 * cores + indexs["count"]]["base_height"], list_num[-1 * cores + indexs["count"]]["enrichment"], list_num[-1 * cores + indexs["count"]]["processing"])) print("Current:TP={0}\tTP_rate={1}\tFP={2}\t" "FP_rate={3}\tFN={4}\tMissing_ratio={5}".format( stat_value["tp"], stat_value["tp_rate"], stat_value["fp"], stat_value["fp_rate"], stat_value["fn"], stat_value["missing_ratio"])) print("\t".join(["Best Parameters:height={0}", "height_reduction={1}", "factor={2}", "factor_reduction={3}", "base_height={4}", "enrichment_factor={5}", "processing_factor={6}"]).format( best_para["height"], best_para["re_height"], best_para["factor"], best_para["re_factor"], best_para["base_height"], best_para["enrichment"], best_para["processing"])) print("Best:TP={0}\tTP_rate={1}\tFP={2}\tFP_rate={3}" "\tFN={4}\tMissing_ratio={5}".format( best["tp"], best["tp_rate"], best["fp"], best["fp_rate"], best["fn"], best["missing_ratio"])) indexs["count"] += 1 return (best_para, best) def scoring_function(best, stat_value, indexs, num_manual): '''main scoring function''' indexs["change"] = False if (stat_value["tp_rate"] == best["tp_rate"]) and ( stat_value["fp_rate"] == best["fp_rate"]): pass else: if (stat_value["tp_rate"] - best["tp_rate"]) >= 0.1: indexs["change"] = True elif (best["tp_rate"] - stat_value["tp_rate"]) <= 0.1: if (best["tp_rate"] <= stat_value["tp_rate"]) and ( best["fp_rate"] >= stat_value["fp_rate"]): indexs["change"] = True elif num_manual > 100: if (stat_value["tp_rate"] - best["tp_rate"] >= 0.01) and ( stat_value["fp_rate"] - best["fp_rate"] <= 0.00005): indexs["change"] = True elif (best["tp_rate"] - stat_value["tp_rate"] <= 0.01) and ( best["fp_rate"] - stat_value["fp_rate"] >= 0.00005): indexs["change"] = True tp_diff = float(best["tp"] - stat_value["tp"]) if tp_diff > 0: if float(best["fp"] - stat_value["fp"]) >= 5 * tp_diff: indexs["change"] = True elif tp_diff < 0: tp_diff = tp_diff * -1 if float(stat_value["fp"] - best["fp"]) <= 5 * tp_diff: indexs["change"] = True def check_overlap(overlap, pre_tss, nums, length, manual, predict, pre_pos): if overlap: if pre_tss: pre_tss.attributes["print"] = True tss = pre_tss else: tss = predict if (tss.start <= int(length)): if (pre_pos != -1): if (tss.start - pre_pos != 0): nums["overlap"] += 1 else: nums["overlap"] += 1 else: nums["overlap"] += 1 overlap = False pre_pos = tss.start else: if (manual.start <= int(length)): nums["manual"] += 1 return (overlap, pre_pos) def comparison(manuals, predicts, nums, args_ops, length): overlap = False pre_pos = -1 for tss_m in manuals: pre_tss = None for tss_p in predicts: if (tss_p.strand == tss_m.strand) and ( (tss_p.seq_id == tss_m.seq_id) or ( (tss_p.seq_id == tss_m.seq_id[:-2]) and ( tss_m.seq_id[-2] == "."))): if (tss_p.start == tss_m.start): tss_p.attributes["print"] = True overlap = True pre_tss = None break elif (math.fabs(tss_p.start - tss_m.start) <= args_ops.cluster): overlap = True pre_tss = tss_p try: datas = check_overlap(overlap, pre_tss, nums, length, tss_m, tss_p, pre_pos) overlap = datas[0] pre_pos = datas[1] except UnboundLocalError: nums = {"overlap": -1, "predict": -1, "manual": -1} for tss_p in predicts: if tss_p.attributes["print"] is False: if (tss_p.start <= int(length)): nums["predict"] += 1 def read_predict_manual_gff(gff_file, length): num = 0 gffs = [] f_h = open(gff_file, "r") for entry in Gff3Parser().entries(f_h): if (entry.start <= int(length)): num += 1 entry.attributes["print"] = False gffs.append(entry) f_h.close() return num, gffs def compare_manual_predict(total_step, para_list, gff_files, out_path, out, args_ops, manuals, num_manual, length): '''compare manual detected set and prediced set and print to stat.csv''' # manual_fh = open(manual, "r") stats = [] count = 0 total_step = total_step - int(args_ops.cores) + 1 if num_manual != 0: for gff_file in gff_files: nums = {"overlap": 0, "predict": 0, "manual": 0} para = "_".join(["he", str(para_list[count]["height"]), "rh", str(para_list[count]["re_height"]), "fa", str(para_list[count]["factor"]), "rf", str(para_list[count]["re_factor"]), "bh", str(para_list[count]["base_height"]), "ef", str(para_list[count]["enrichment"]), "pf", str(para_list[count]["processing"])]) num_predict, predicts = read_predict_manual_gff(gff_file, length) comparison(manuals, predicts, nums, args_ops, length) out.write("{0}\t{1}\tTP={2}\tTP_rate={3}\t".format( total_step, para, nums["overlap"], float(nums["overlap"]) / float(num_manual))) out.write("FP={0}\tFP_rate={1}\tFN={2}" "\tmissing_ratio={3}\n".format( nums["predict"], float(nums["predict"]) / float( int(length) - num_manual), nums["manual"], float(nums["manual"]) / float(num_manual))) if nums["overlap"] == -1: out.write("No TSS is detected within the range\n") stats.append({"tp": nums["overlap"], "tp_rate": float( nums["overlap"]) / float(num_manual), "fp": nums["predict"], "fp_rate": float(nums["predict"]) / float( length - num_manual), "fn": nums["manual"], "missing_ratio": float( nums["manual"]) / float(num_manual)}) total_step += 1 count += 1 return stats def convert2gff(out_path, gff_files, args_ops, strain): for core in range(1, args_ops.cores+1): output_folder = os.path.join( out_path, "_".join(["MasterTable", str(core)])) gff_file = os.path.join( output_folder, "_".join(["TSSpredator", str(core) + ".gff"])) Converter().convert_mastertable2gff( os.path.join(output_folder, "MasterTable.tsv"), "TSSpredator", args_ops.program, strain, gff_file) gff_files.append(gff_file) def run_TSSpredator(tsspredator_path, config_file): folders = config_file.split("/") out_path = "/".join(folders[:-1]) out = open(os.path.join( out_path, "TSSpredator_log.txt"), "w") p = Popen(["java", "-jar", tsspredator_path, config_file], stdout=out, stderr=STDOUT) return p def run_TSSpredator_paralle(config_files, tsspredator_path, processes): '''it is for running TSSpredator parallel''' for config_file in config_files: process = run_TSSpredator(tsspredator_path, config_file) processes.append(process) for p in processes: p.wait() if p.stdout: p.stdout.close() if p.stdin: p.stdin.close() if p.stderr: p.stderr.close() try: p.kill() except OSError: pass time.sleep(5) def print_lib(lib_num, lib_list, out, wig_folder, prefix, rep_set): for num_id in range(1, lib_num+1): cond_list = [] for lib in lib_list: if num_id == lib["condition"]: cond_list.append(lib) cond_sort_list = sorted(cond_list, key=lambda k: k['replicate']) reps = [] for cond in cond_sort_list: out.write("{0}_{1}{2} = {3}/{4}\n".format( prefix, cond["condition"], cond["replicate"], wig_folder, cond["wig"])) reps.append(cond["replicate"]) for rep in sorted(rep_set): if rep not in reps: out.write("{0}_{1}{2} = \n".format( prefix, cond["condition"], rep)) def assign_dict(lib_datas): return {"wig": lib_datas[0], "tex": lib_datas[1], "condition": int(lib_datas[2]), "replicate": lib_datas[3], "strand": lib_datas[4]} def import_lib(wig_folder, rep_set, lib_dict, out, gff, list_num_id, fasta, args_ops, strain): lib_num = 0 for lib in args_ops.libs: lib_datas = lib.split(":") if lib_datas[0].endswith(".wig") is not True: print("Error: Exist a not proper wig file!!") sys.exit() for wig in os.listdir(wig_folder): filename = wig.split("_STRAIN_") if (filename[0] == lib_datas[0][:-4]) and \ (filename[1][:-4] == strain): lib_datas[0] = wig elif (filename[0] == lib_datas[0][:-4]) and \ ("." == filename[1][-6]) and \ (filename[1][:-6] == strain): lib_datas[0] = wig if int(lib_datas[2]) > lib_num: lib_num = int(lib_datas[2]) if lib_datas[3] not in rep_set: rep_set.add(lib_datas[3]) if (lib_datas[1] == "tex") and \ (lib_datas[4] == "+"): lib_dict["fp"].append(assign_dict(lib_datas)) elif (lib_datas[1] == "tex") and \ (lib_datas[4] == "-"): lib_dict["fm"].append(assign_dict(lib_datas)) elif (lib_datas[1] == "notex") and \ (lib_datas[4] == "+"): lib_dict["np"].append(assign_dict(lib_datas)) elif (lib_datas[1] == "notex") and \ (lib_datas[4] == "-"): lib_dict["nm"].append(assign_dict(lib_datas)) for num_id in range(1, lib_num+1): os.system("echo '##gff-version 3' > tmp") g = open(gff, "r") for row in csv.reader(g, delimiter='\t'): if not row[0].startswith("#"): seq_name = row[0] break os.system("echo '##sequence-region '" + seq_name + " >> tmp") os.system("cat " + gff + ">> tmp") g.close() shutil.move("tmp", gff) out.write("annotation_{0} = {1}\n".format(num_id, gff)) if args_ops.program.lower() == "tss": print_lib(lib_num, lib_dict["fm"], out, wig_folder, "fivePrimeMinus", rep_set) print_lib(lib_num, lib_dict["fp"], out, wig_folder, "fivePrimePlus", rep_set) elif args_ops.program.lower() == "ps": print_lib(lib_num, lib_dict["nm"], out, wig_folder, "fivePrimeMinus", rep_set) print_lib(lib_num, lib_dict["np"], out, wig_folder, "fivePrimePlus", rep_set) else: print("Error: The program name is wrong!!") sys.exit() for num_id in range(1, lib_num+1): out.write("genome_%s = %s\n" % (str(num_id), fasta)) for num_id in range(1, lib_num+1): list_num_id.append(str(num_id)) return lib_num def print_repmatch(args_ops, out): '''deal with the replicate match''' detect_all = False for rep in args_ops.replicate: if "all" in args_ops.replicate: detect_all = True match = args_ops.replicate.split("_")[-1] out.write("minNumRepMatches = {0}\n".format(match)) break if not detect_all: nums = {} matchs = {} for match in args_ops.replicate: lib = match.split("_")[0] rep = match.split("_")[-1] matchs[lib] = rep if rep not in nums.keys(): nums[rep] = 1 else: nums[rep] += 1 for rep, num in nums.items(): if num == max(nums.values()): out.write("minNumRepMatches = {0}\n".format(rep)) max_rep = rep break for lib, rep in matchs.items(): if rep != max_rep: out.write("minNumRepMatches_{0} = {1}\n".format( lib, rep)) def gen_config(para_list, out_path, core, wig, fasta, gff, args_ops, strain): '''generate config file for TSSpredator''' files = os.listdir(out_path) if "MasterTable_" + str(core) not in files: os.mkdir(os.path.join(out_path, "MasterTable_" + str(core))) lib_dict = {"fp": [], "fm": [], "nm": [], "np": []} rep_set = set() list_num_id = [] filename = os.path.join(out_path, "config_" + str(core) + ".ini") out = open(filename, "w") out.write("TSSinClusterSelectionMethod = HIGHEST\n") out.write("allowedCompareShift = 1\n") out.write("allowedRepCompareShift = 1\n") lib_num = import_lib(wig, rep_set, lib_dict, out, gff, list_num_id, fasta, args_ops, strain) out.write("idList = ") out.write(",".join(list_num_id) + "\n") out.write("maxASutrLength = 100\n") out.write("maxGapLengthInGene = 500\n") out.write("maxNormalTo5primeFactor = {0}\n".format( para_list["processing"])) out.write("maxTSSinClusterDistance = {0}\n".format(args_ops.cluster + 1)) out.write("maxUTRlength = {0}\n".format(args_ops.utr)) out.write("min5primeToNormalFactor = {0}\n".format( para_list["enrichment"])) out.write("minCliffFactor = {0}\n".format(para_list["factor"])) out.write("minCliffFactorDiscount = {0}\n".format(para_list["re_factor"])) out.write("minCliffHeight = {0}\n".format(para_list["height"])) out.write("minCliffHeightDiscount = {0}\n".format(para_list["re_height"])) out.write("minNormalHeight = {0}\n".format(para_list["base_height"])) print_repmatch(args_ops, out) out.write("minPlateauLength = 0\n") out.write("mode = cond\n") out.write("normPercentile = 0.9\n") if (args_ops.program.lower() == "tss"): print_lib(lib_num, lib_dict["nm"], out, wig, "normalMinus", rep_set) print_lib(lib_num, lib_dict["np"], out, wig, "normalPlus", rep_set) elif (args_ops.program.lower() == "ps"): print_lib(lib_num, lib_dict["fm"], out, wig, "normalMinus", rep_set) print_lib(lib_num, lib_dict["fp"], out, wig, "normalPlus", rep_set) out.write("numReplicates = {0}\n".format(len(rep_set))) out.write("numberOfDatasets = {0}\n".format(lib_num)) out.write("outputDirectory = {0}\n".format( os.path.join(out_path, "_".join(["MasterTable", str(core)])))) for prefix_id in range(len(args_ops.replicate_name)): out.write("outputPrefix_{0} = {1}\n".format( prefix_id + 1, args_ops.replicate_name[prefix_id])) out.write("outputID_{0} = {1}\n".format( prefix_id + 1, args_ops.output_id)) out.write("projectName = {0}\n".format(strain)) out.write("superGraphCompatibility = igb\n") out.write("texNormPercentile = 0.5\n") out.write("writeGraphs = 0\n") out.write("writeNocornacFiles = 0\n") out.close() return filename def run_tss_and_stat(indexs, list_num, seeds, diff_h, diff_f, out_path, stat_out, best_para, current_para, wig, fasta, gff, best, num_manual, args_ops, strain, manuals, length, log, set_config, run_tss): '''run TSS and do statistics''' if indexs["step"] > args_ops.steps + int(args_ops.cores): return (True, best_para) elif len(list_num) == indexs["length"]: indexs["step"] = indexs["step"] - 1 seeds["pre_seed"].append(seeds["seed"]) elif (diff_h <= 0) or \ (diff_f <= 0): indexs["step"] = indexs["step"] - 1 list_num = list_num[:-1] else: indexs["num"] += 1 seeds["pre_seed"] = [] if indexs["num"] == args_ops.cores: index = 0 config_files = [] gff_files = [] for para in list_num[-1 * args_ops.cores:]: index += 1 if not set_config: log.write("Checking the process of generating config files.\n") config_files.append(gen_config(para, out_path, index, wig, fasta, gff, args_ops, strain)) if not set_config: set_config = True log.write("config files can be generated and stored in " "{0} successfully.\n".format(out_path)) indexs["count"] = 0 processes = [] if not run_tss: log.write("Checking the setup of TSSpredator.\n") log.write("Please make sure your version of TSSpredator " "is at least 1.06.\n") run_TSSpredator_paralle(config_files, args_ops.tsspredator_path, processes) if not run_tss: run_tss = True log.write("TSSpredator is running successfully.\n") convert2gff(out_path, gff_files, args_ops, strain) stat_values = compare_manual_predict( indexs["step"], list_num[-1 * args_ops.cores:], gff_files, out_path, stat_out, args_ops, manuals, num_manual, length) for stat_value in stat_values: if indexs["first"]: indexs["first"] = False best = stat_value best_para = copy.deepcopy(list_num[-1 * args_ops.cores + indexs["count"]]) else: scoring_function(best, stat_value, indexs, num_manual) datas = compute_stat(stat_value, best, best_para, args_ops.cores, list_num, out_path, indexs, strain) best_para = datas[0] best = datas[1] indexs["switch"] += 1 stat_values = [] indexs["num"] = 0 return (False, best_para, best), set_config, run_tss def minus_process(num_type, new_para, max_num, best_num, actions, list_num, compare): '''it is for minus one unit in small change part''' if num_type == "base_height": new_para[num_type] = new_para[num_type] - 0.001 new_para[num_type] = float('%.3f' % new_para[num_type]) while True: if new_para[num_type] < 0.0: new_para[num_type] = 0.0 if new_para in list_num: new_para[num_type] = best_num actions["minus"] = True actions["in_or_de"] = 2 break else: list_num.append(new_para) return new_para[num_type] elif (new_para in list_num): new_para[num_type] = new_para[num_type] - 0.001 new_para[num_type] = float('%.3f' % new_para[num_type]) continue else: list_num.append(copy.deepcopy(new_para)) return new_para[num_type] else: new_para[num_type] = new_para[num_type] - 0.1 new_para[num_type] = float('%.1f' % new_para[num_type]) while True: if new_para[num_type] <= 0.0: new_para[num_type] = best_num actions["in_or_de"] = 2 actions["minus"] = True break elif (new_para in list_num): new_para[num_type] = new_para[num_type] - 0.1 new_para[num_type] = float('%.1f' % new_para[num_type]) continue elif ((num_type == "factor") or ( num_type == "height")) and ( new_para[num_type] <= compare): new_para[num_type] = best_num actions["in_or_de"] = 2 actions["minus"] = True break else: list_num.append(copy.deepcopy(new_para)) return new_para[num_type] return None def plus_process(num_type, new_para, max_num, best_num, actions, list_num, compare): '''it is for plus one unit in small change part''' if num_type == "base_height": new_para[num_type] = new_para[num_type] + 0.001 new_para[num_type] = float('%.3f' % new_para[num_type]) while True: if new_para[num_type] >= max_num: new_para[num_type] = best_num actions["in_or_de"] = 1 actions["plus"] = True break elif (new_para in list_num): new_para[num_type] = new_para[num_type] + 0.001 new_para[num_type] = float('%.3f' % new_para[num_type]) continue else: list_num.append(copy.deepcopy(new_para)) return new_para[num_type] else: new_para[num_type] = new_para[num_type] + 0.1 new_para[num_type] = float('%.1f' % new_para[num_type]) while True: if new_para[num_type] >= max_num: new_para[num_type] = best_num actions["in_or_de"] = 1 actions["plus"] = True break elif (new_para in list_num): new_para[num_type] = new_para[num_type] + 0.1 new_para[num_type] = float('%.1f' % new_para[num_type]) continue elif ((num_type == "re_factor") or ( num_type == "re_height")) and ( new_para[num_type] >= compare): new_para[num_type] = best_num actions["in_or_de"] = 1 actions["plus"] = True break else: list_num.append(copy.deepcopy(new_para)) return new_para[num_type] return None def small_change(max_num, num_type, compare, list_num, best_num, best_para): '''add or minus one unit for one parameter in small change part''' new_para = copy.deepcopy(best_para) actions = {"plus": False, "minus": False} step = 0 if new_para[num_type] >= max_num: actions["in_or_de"] = 1 elif new_para[num_type] <= 0: actions["in_or_de"] = 2 else: actions["in_or_de"] = random.randint(0, 9) while True: step += 1 if step >= 1000: return best_num if (actions["plus"] is True) and (actions["minus"] is True): new_para[num_type] = best_num return new_para[num_type] if actions["in_or_de"] % 2 == 0: tmp_para = plus_process(num_type, new_para, max_num, best_num, actions, list_num, compare) if actions["in_or_de"] % 2 == 1: tmp_para = minus_process(num_type, new_para, max_num, best_num, actions, list_num, compare) if tmp_para is not None: return tmp_para def run_small_change_part(seeds, features, indexs, current_para, best_para, list_num, max_num): '''it is for the small change''' while True: seeds["seed"] = random.randint(0, 6) if seeds["seed"] in seeds["pre_seed"]: if len(seeds["pre_seed"]) == 7: indexs["switch"] += 1 features["pre_feature"] = features["feature"] break else: continue else: break if seeds["seed"] == 0: current_para["height"] = small_change( max_num["height"], "height", best_para["re_height"], list_num, best_para["height"], best_para) elif seeds["seed"] == 1: current_para["re_height"] = small_change( max_num["re_height"], "re_height", best_para["height"], list_num, best_para["re_height"], best_para) elif seeds["seed"] == 2: current_para["factor"] = small_change( max_num["factor"], "factor", best_para["re_factor"], list_num, best_para["factor"], best_para) elif seeds["seed"] == 3: current_para["re_factor"] = small_change( max_num["re_factor"], "re_factor", best_para["factor"], list_num, best_para["re_factor"], best_para) elif seeds["seed"] == 4: current_para["base_height"] = small_change( max_num["base_height"], "base_height", best_para["base_height"], list_num, best_para["base_height"], best_para) elif seeds["seed"] == 5: current_para["enrichment"] = small_change( max_num["enrichment"], "enrichment", best_para["enrichment"], list_num, best_para["enrichment"], best_para) elif seeds["seed"] == 6: current_para["processing"] = small_change( max_num["processing"], "processing", best_para["processing"], list_num, best_para["processing"], best_para) return current_para def gen_large_random(max_num, num_type, compare, list_num, origin_num, best_para, index_large, indexs): '''random change two parameters for large change''' new_para = copy.deepcopy(best_para) step = 0 while True: step += 1 if step >= 1000000: return best_para seed = random.randint(0, 6) if num_type == index_large[seed]: continue if num_type == "base_height": number = round(random.uniform(0.000, max_num[num_type]), 3) number = '%.3f' % number number = float(number) else: number = round(random.uniform(0.1, max_num[num_type]), 1) number = '%.1f' % number number = float(number) if index_large[seed] == "base_height": number_par = round(random.uniform(0.000, max_num[index_large[seed]]), 3) number_par = '%.3f' % number_par number_par = float(number_par) else: number_par = round(random.uniform(0.1, max_num[index_large[seed]]), 1) number_par = '%.1f' % number_par number_par = float(number_par) new_para[num_type] = number new_para[index_large[seed]] = number_par if new_para in list_num: continue else: if (new_para["height"] <= new_para["re_height"]) or \ (new_para["factor"] <= new_para["re_factor"]): continue else: list_num.append(copy.deepcopy(new_para)) return new_para def run_large_change_part(seeds, features, indexs, current_para, max_num, best_para, list_num): '''it is for the large change''' index_large = {0: "height", 1: "re_height", 2: "factor", 3: "re_factor", 4: "base_height", 5: "enrichment", 6: "processing"} while True: seeds["seed"] = random.randint(0, 6) if seeds["seed"] in seeds["pre_seed"]: if len(seeds["pre_seed"]) == 7: features["pre_feature"] = features["feature"] indexs["switch"] += 1 break else: continue else: break if seeds["seed"] == 0: current_para = gen_large_random( max_num, "height", best_para["re_height"], list_num, best_para["height"], best_para, index_large, indexs) elif seeds["seed"] == 1: current_para = gen_large_random( max_num, "re_height", best_para["height"], list_num, best_para["re_height"], best_para, index_large, indexs) elif seeds["seed"] == 2: current_para = gen_large_random( max_num, "factor", best_para["re_factor"], list_num, best_para["factor"], best_para, index_large, indexs) elif seeds["seed"] == 3: current_para = gen_large_random( max_num, "re_factor", best_para["factor"], list_num, best_para["re_factor"], best_para, index_large, indexs) elif seeds["seed"] == 4: current_para = gen_large_random( max_num, "base_height", best_para["base_height"], list_num, best_para["base_height"], best_para, index_large, indexs) elif seeds["seed"] == 5: current_para = gen_large_random( max_num, "enrichment", best_para["enrichment"], list_num, best_para["enrichment"], best_para, index_large, indexs) elif seeds["seed"] == 6: current_para = gen_large_random( max_num, "processing", best_para["processing"], list_num, best_para["processing"], best_para, index_large, indexs) return current_para def run_random_part(current_para, list_num, max_num, steps, indexs): '''it is for the random selection''' tmp_random_step = 0 while True: current_para["height"] = round(random.uniform( 0.1, max_num["height"]), 1) current_para["re_height"] = round(random.uniform( 0.1, max_num["re_height"]), 1) current_para["factor"] = round(random.uniform( 0.1, max_num["factor"]), 1) current_para["re_factor"] = round(random.uniform( 0.1, max_num["re_factor"]), 1) current_para["enrichment"] = round(random.uniform( 0.1, max_num["enrichment"]), 1) current_para["processing"] = round(random.uniform( 0.1, max_num["processing"]), 1) current_para["base_height"] = round(random.uniform( 0.000, max_num["base_height"]), 3) if (current_para["height"] > current_para["re_height"]) and ( current_para["factor"] > current_para["re_factor"]) and ( current_para not in list_num): list_num.append(copy.deepcopy(current_para)) break tmp_random_step += 1 if tmp_random_step >= steps: indexs["switch"] += 1 return None return current_para def optimization_process(indexs, current_para, list_num, max_num, best_para, out_path, stat_out, best, wig, fasta, gff, num_manual, new, args_ops, strain, manuals, length, log): '''main part of opimize TSSpredator''' features = {"pre_feature": "", "feature": ""} seeds = {"pre_seed": [], "seed": 0} tmp_step = 0 log.write("The optimization starts.\n") set_config = False run_tss = False while True: if indexs["exist"] is False: indexs["exist"] = True features["feature"] = "" elif (indexs["switch"] % 3 == 0): features["feature"] = "r" if new: start_data(current_para, list_num) new = False else: if features["feature"] != features["pre_feature"]: seeds["pre_seed "] = [] current_para = run_random_part(current_para, list_num, max_num, args_ops.steps, indexs) if current_para is None: tmp_step += 1 elif (indexs["switch"] % 3 == 1): features["feature"] = "l" if features["feature"] != features["pre_feature"]: seeds["pre_seed"] = [] current_para = run_large_change_part( seeds, features, indexs, current_para, max_num, best_para, list_num) else: features["feature"] = "s" if features["feature"] != features["pre_feature"]: seeds["pre_seed"] = [] current_para = run_small_change_part( seeds, features, indexs, current_para, best_para, list_num, max_num) if current_para is not None: diff_h = float(current_para["height"]) - float( current_para["re_height"]) diff_f = float(current_para["factor"]) - float( current_para["re_factor"]) datas, set_config, run_tss = run_tss_and_stat( indexs, list_num, seeds, diff_h, diff_f, out_path, stat_out, best_para, current_para, wig, fasta, gff, best, num_manual, args_ops, strain, manuals, length, log, set_config, run_tss) tmp_step = 0 else: indexs["step"] = indexs["step"] - 1 if tmp_step >= 2: print("The number of steps may be enough, it " "may not be able to find more parameters\n") log.write("The optimization stop because no more combination " "of parameters can be found.\n") sys.exit() best_para = datas[1] if datas[0]: break else: best = datas[2] indexs["length"] = len(list_num) features["pre_feature"] = features["feature"] indexs["step"] += 1 if indexs["step"] >= args_ops.steps: break def start_data(current_para, list_num): '''setup the start parameter as default one''' current_para["height"] = 0.3 current_para["re_height"] = 0.2 current_para["factor"] = 2.0 current_para["re_factor"] = 0.5 current_para["enrichment"] = 2.0 current_para["processing"] = 1.5 current_para["base_height"] = 0.000 list_num.append(copy.deepcopy(current_para)) return current_para def extend_data(out_path, best, best_para, step, strain): '''extend the data from previous run''' print("Current genome is {0}, Extending step from {1}".format(strain, step)) print("\t".join(["Best Parameters:height={0}", "height_reduction={1}", "factor={2}", "factor_reduction={3}", "base_height={4}", "enrichment_factor={5}", "processing_factor={6}"]).format( best_para["height"], best_para["re_height"], best_para["factor"], best_para["re_factor"], best_para["base_height"], best_para["enrichment"], best_para["processing"])) print("Best:TP={0}\tTP_rate={1}\tFP={2}\tFP_rate={3}" "\tFN={4}\tMissing_ratio={5}".format( best["tp"], best["tp_rate"], best["fp"], best["fp_rate"], best["fn"], best["missing_ratio"])) current_para = copy.deepcopy(best_para) return current_para def load_stat_csv(out_path, list_num, best, best_para, indexs, num_manual, stat_file): '''load the statistics from stat.csv''' f_h = open(stat_file, "r") first_line = True line_num = 0 for row in csv.reader(f_h, delimiter="\t"): if (line_num == 0) and (len(row) < 8): print("Error: {0} has something wrong, " "please check it or remove it!!!".format(stat_file)) sys.exit() line_num += 1 paras = row[1].split("_") if len(row) == 8: prev_stat = {"tp": int(row[2].split("=")[-1]), "tp_rate": float(row[3].split("=")[-1]), "fp": int(row[4].split("=")[-1]), "fp_rate": float(row[5].split("=")[-1])} list_num.append({"height": float(paras[1]), "re_height": float(paras[3]), "factor": float(paras[5]), "re_factor": float(paras[7]), "base_height": float(paras[9]), "enrichment": float(paras[11]), "processing": float(paras[13])}) if first_line: first_line = False indexs["change"] = True else: scoring_function(best, prev_stat, indexs, num_manual) if indexs["change"]: best_para = {"height": float(paras[1]), "re_height": float(paras[3]), "factor": float(paras[5]), "re_factor": float(paras[7]), "base_height": float(paras[9]), "enrichment": float(paras[11]), "processing": float(paras[13])} best["tp"] = float(row[2].split("=")[-1]) best["tp_rate"] = float(row[3].split("=")[-1]) best["fp"] = float(row[4].split("=")[-1]) best["fp_rate"] = float(row[5].split("=")[-1]) best["fn"] = float(row[6].split("=")[-1]) best["missing_ratio"] = float(row[7].split("=")[-1]) indexs["step"] = int(row[0]) + 1 f_h.close() return (line_num, best, best_para) def reload_data(out_path, list_num, best, best_para, indexs, num_manual, stat_file, log): '''if is based on previous run, it is for reload the previous results''' indexs["switch"] = 1 indexs["exist"] = True datas = load_stat_csv(out_path, list_num, best, best_para, indexs, num_manual, stat_file) line_num = datas[0] best = datas[1] best_para = datas[2] if len(list_num) > 0: indexs["extend"] = True else: print("Error: {0} has something wrong, " "please check it or remove it!!!".format(stat_file)) log.write(stat_file + " is brocken. Please check it or remove it.\n") sys.exit() new_line = 0 new_stat = open("tmp.csv", "w") with open(stat_file, "r") as fh: for line in fh: new_line += 1 line = line.strip() if new_line >= line_num: break else: new_stat.write(line + "\n") shutil.move("tmp.csv", stat_file) new_stat.close() return (best_para, best) def initiate(args_ops): '''setup the dict''' max_num = {"height": args_ops.height, "re_height": args_ops.height_reduction, "factor": args_ops.factor, "re_factor": args_ops.factor_reduction, "base_height": args_ops.base_height, "enrichment": args_ops.enrichment, "processing": args_ops.processing} best_para = {"height": 0, "re_height": 0, "factor": 0, "re_factor": 0, "base_height": 0, "enrichment": 0, "processing": 0} current_para = {"height": 0, "re_height": 0, "factor": 0, "re_factor": 0, "base_height": 0, "enrichment": 0, "processing": 0} indexs = {"switch": 0, "extend": False, "exist": False, "step": 0, "first": True, "num": 0, "length": 0, "change": False, "count": 0} return max_num, best_para, current_para, indexs def check_empty(stat_file): empty_file = True with open(stat_file) as fh: for line in fh: if len(line) != 0: empty_file = False break return empty_file def check_output_id(gff, output_id): g = open(gff, "r") for row in csv.reader(g, delimiter='\t'): if len(row) != 0: if (not row[0].startswith("#")): tags = row[-1].split(";") detect = False for tag in tags: if tag.startswith(output_id): detect = True if (not detect) and (row[2] == "gene"): print("Warning: --output_id does not exist in " "all genes of annotation gff files.") def optimization(wig, fasta, gff, args_ops, manual, length, strain, log): '''opimize TSSpredator''' best = {} new = True max_num, best_para, current_para, indexs = initiate(args_ops) out_path = os.path.join(args_ops.output_folder, "optimized_TSSpredator") files = os.listdir(args_ops.output_folder) stat_file = os.path.join(out_path, "stat_" + strain + ".csv") num_manual, manuals = read_predict_manual_gff(manual, length) log.write(manual + " is loaded successfully.\n") if len(os.listdir(out_path)) == 1: list_num = [] stat_out = open(stat_file, "w") else: if (("stat_" + strain + ".csv") in os.listdir(out_path)): empty_file = check_empty(stat_file) if empty_file: os.remove(stat_file) list_num = [] stat_out = open(stat_file, "w") else: list_num = [] new = False log.write("Checking the previous results.\n") datas = reload_data(out_path, list_num, best, best_para, indexs, num_manual, stat_file, log) log.write("The intermediate results of the previous " "optimization is loaded. The optimization will start " "from {0}\n".format(indexs["step"])) best_para = datas[0] best = datas[1] current_para = extend_data(out_path, best, best_para, indexs["step"], strain) stat_out = open(stat_file, "a") indexs["first"] = False else: list_num = [] stat_out = open(stat_file, "w") check_output_id(gff, args_ops.output_id) optimization_process(indexs, current_para, list_num, max_num, best_para, out_path, stat_out, best, wig, fasta, gff, num_manual, new, args_ops, strain, manuals, length, log) log.write("The optimization is done. The following files are generated:\n") for file_ in os.listdir(out_path): if not file_.startswith("Master") and not file_.startswith("config"): log.write("\t" + file_ + "\n") stat_out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/optimize_TSSpredator.py
optimize_TSSpredator.py
import csv from annogesiclib.gff3 import Gff3Parser def plus_num(nums, strain, feature): nums[strain][feature] += 1 nums["total"][feature] += 1 def print_percent(out, total, fract, name): if total != 0: out.write("\t\t(percentage of total {0}terminators = {1})\n".format( name, (fract / total))) else: out.write("\t\t(percentage of total {0}terminators = 0.0)\n".format( name)) def print_express(out, total, fract, name): if total != 0: out.write("\t\t(percentage of total {0}terminators which have " "gene expression = {1})\n".format(name, (fract / total))) else: out.write("\t\t(percentage of total {0}terminators which have " "gene expression = 0.0)\n".format(name)) def print_decrease(out, total, fract, name): if total != 0: out.write("\t\t(percentage of total {0}terminators " "which have dramatic coverage decreasing = {1})\n".format( name, (fract / total))) else: out.write("\t\t(percentage of total {0}terminators " "which have dramatic coverage decreasing = 0.0)\n".format( name)) def print_method(nums, method_name, method, express, detect, only, out): out.write(method_name + "\n") out.write("\tTotal {0} terminators = {1}\n".format( method_name, nums[method] + nums["frhp"])) print_percent(out, float(nums["total"]), float(nums[method] + nums["frhp"]), "") out.write("\tTotal terminators which only can be " "detected in {0} = {1}\n".format(method_name, nums[method])) print_percent(out, float(nums["total"]), float(nums[method]), "") print_percent(out, float(nums[method] + nums["frhp"]), float(nums[method]), method_name + " ") out.write("\tTotal {0} terminators which located in gene " "expression region = {1}\n".format(method_name, nums[express])) print_percent(out, float(nums["total"]), float(nums[express]), "") print_percent(out, float(nums[method] + nums["frhp"]), float(nums[express]), method_name + " ") out.write("\tTotal {0} terminators which have dramatic coverage " "decreasing = {1}\n".format(method_name, nums[detect])) print_percent(out, float(nums["total"]), float(nums[detect]), "") print_percent(out, float(nums[method] + nums["frhp"]), float(nums[detect]), method_name + " ") print_express(out, float(nums["total_ex"]), float(nums[detect]), "") print_express(out, float(nums[express]), float(nums[detect]), method_name + " ") out.write("\tTotal terminators which have dramatic coverage decreasing" "(unique in {0}) = {1}\n".format(method_name, nums[only])) print_decrease(out, float(nums["total_de"]), float(nums[only]), "") print_decrease(out, float(nums[detect]), float(nums[only]), method_name + " ") out.write("\n") def print_intersection_number(out, nums, type_): print_percent(out, float(nums["total"]), float(nums[type_]), "") print_percent(out, float(nums["fr"]), float(nums[type_]), "method_1 ") print_percent(out, float(nums["hp"]), float(nums[type_]), "method_2 ") def print_intersection_express(out, nums, type_): print_express(out, float(nums["total_ex"]), float(nums[type_]), "") print_express(out, float(nums["ex_fr"]), float(nums[type_]), "method_1 ") print_express(out, float(nums["ex_hp"]), float(nums[type_]), "method_2 ") def print_file(nums, out, strain): out.write(strain + ":\n") out.write("Combine two methods:\n") out.write("\tTotal terminators = {0}\n".format(nums["total"])) out.write("\tTotal terminators which located in gene expression " "region = {0}\n".format(nums["total_ex"])) print_percent(out, float(nums["total"]), float(nums["total_ex"]), "") out.write("\tTotal terminators which have dramatic coverage " "decreasing = {0}\n".format(nums["total_de"])) print_percent(out, float(nums["total"]), float(nums["total_de"]), "") print_express(out, float(nums["total_ex"]), float(nums["total_de"]), "") out.write("\n") print_method(nums, "method_1", "fr", "ex_fr", "de_fr", "only_de_fr", out) print_method(nums, "method_2", "hp", "ex_hp", "de_hp", "only_de_hp", out) out.write("intersection two methods:\n") out.write("\tTotal terminators which overlap with " "two methods = {0}\n".format(nums["frhp"])) print_intersection_number(out, nums, "frhp") out.write("\tTotal overlaped terminators which located in " "gene expression region = {0}\n".format(nums["ex_frhp"])) print_intersection_number(out, nums, "ex_frhp") print_intersection_express(out, nums, "ex_frhp") out.write("\tTotal overlaped terminators which have dramatic " "coverage decreasing = {0}\n".format(nums["de_frhp"])) print_intersection_number(out, nums, "de_frhp") print_intersection_express(out, nums, "de_frhp") print_express(out, float(nums["total_de"]), float(nums["de_frhp"]), "") print_express(out, float(nums["de_fr"]), float(nums["de_frhp"]), "method_1 ") print_express(out, float(nums["de_hp"]), float(nums["de_frhp"]), "method_2 ") def classify_terms(terms, nums, out_d, out_e, out_n, pre_strain): for term in terms: if term.seq_id != pre_strain: pre_strain = term.seq_id strain = term.seq_id nums[strain] = { "fr": 0, "hp": 0, "frhp": 0, "ex_fr": 0, "ex_hp": 0, "ex_frhp": 0, "de_fr": 0, "de_hp": 0, "de_frhp": 0, "total": 0, "total_de": 0, "total_ex": 0, "only_de_fr": 0, "only_de_hp": 0, "only_ex_fr": 0, "only_ex_hp": 0, "de_frhp": 0, "ex_frhp": 0} if term.attributes["coverage_decrease"] == "True": out_d.write(term.info + "\n") if term.attributes["express"] == "True": out_e.write(term.info + "\n") if term.attributes["express"] != "True": out_n.write(term.info + "\n") if term.attributes["method"] == "gene_converged": plus_num(nums, strain, "total") plus_num(nums, strain, "fr") if term.attributes["coverage_decrease"] == "True": plus_num(nums, strain, "de_fr") plus_num(nums, strain, "only_de_fr") plus_num(nums, strain, "total_de") if term.attributes["express"] == "True": plus_num(nums, strain, "ex_fr") plus_num(nums, strain, "only_ex_fr") plus_num(nums, strain, "total_ex") elif term.attributes["method"] == "TransTermHP": plus_num(nums, strain, "total") plus_num(nums, strain, "hp") if term.attributes["coverage_decrease"] == "True": plus_num(nums, strain, "de_hp") plus_num(nums, strain, "only_de_hp") plus_num(nums, strain, "total_de") if term.attributes["express"] == "True": plus_num(nums, strain, "ex_hp") plus_num(nums, strain, "only_ex_hp") plus_num(nums, strain, "total_ex") elif term.attributes["method"] == "gene_converged,TransTermHP": plus_num(nums, strain, "total") plus_num(nums, strain, "frhp") if term.attributes["coverage_decrease"] == "True": plus_num(nums, strain, "de_frhp") plus_num(nums, strain, "de_fr") plus_num(nums, strain, "de_hp") plus_num(nums, strain, "total_de") if term.attributes["express"] == "True": plus_num(nums, strain, "ex_frhp") plus_num(nums, strain, "ex_fr") plus_num(nums, strain, "ex_hp") plus_num(nums, strain, "total_ex") def check_repeat(checks, strain, start, end, strand): detect = False try: term = {"strain": strain, "start": int(start), "strand": strand, "end": int(end)} if len(checks) == 0: checks.append(term) detect = True else: if term not in checks: detect = True checks.append(term) return detect except ValueError: return detect def stat_term(term_gff, term_table, stat, output_decrease, output_expression, output_non): terms = [] nums = {} nums["total"] = { "fr": 0, "hp": 0, "frhp": 0, "ex_fr": 0, "ex_hp": 0, "ex_frhp": 0, "de_fr": 0, "de_hp": 0, "de_frhp": 0, "total": 0, "total_de": 0, "total_ex": 0, "only_de_fr": 0, "only_de_hp": 0, "only_ex_fr": 0, "only_ex_hp": 0, "de_frhp": 0, "ex_frhp": 0} pre_strain = "" out_te = open(output_expression + ".csv", "w") out_td = open(output_decrease + ".csv", "w") out_tn = open(output_non + ".csv", "w") fh = open(term_table, "r") out_tn.write("\t".join(["Genome", "Name", "Start", "End", "Strand", "Method", "Associated_gene", "Associated_transcript", "Coverage_decrease", "Coverage_detail"]) + "\n") gh = open(term_gff) checks = [] for entry in Gff3Parser().entries(gh): detect = check_repeat(checks, entry.seq_id, entry.start, entry.end, entry.strand) if detect: terms.append(entry) checks = [] for row in csv.reader(fh, delimiter="\t"): detect = check_repeat(checks, row[0], row[2], row[3], row[4]) if detect: if (row[-1] != "NA") and (row[-1] != "No_coverage_decreasing"): out_td.write("\t".join(row) + "\n") out_te.write("\t".join(row) + "\n") if (row[-1] == "No_coverage_decreasing"): out_te.write("\t".join(row) + "\n") if (row[-1] == "NA"): out_tn.write("\t".join(row) + "\n") out = open(stat, "w") out_e = open(output_expression + ".gff", "w") out_d = open(output_decrease + ".gff", "w") out_n = open(output_non + ".gff", "w") out_e.write("##gff-version 3\n") out_d.write("##gff-version 3\n") out_n.write("##gff-version 3\n") classify_terms(terms, nums, out_d, out_e, out_n, pre_strain) out.write("method_1 is searching the gene converged region.\n") out.write("method_2 is TransTermHP.\n") if len(nums) > 2: print_file(nums["total"], out, "All genome") else: for strain, datas in nums.items(): if strain != "total": print_file(datas, out, strain) out_te.close() out_td.close() out_tn.close() out.close() out_e.close() out_d.close() out_n.close() fh.close() gh.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/stat_term.py
stat_term.py
import math import copy from annogesiclib.helper import Helper def remove_primary(tss, tss_entry): final_types = [] final_utrs = [] final_genes = [] tss_dict = tss_entry[1] types = tss_dict["type"].split("&") utrs = tss_dict["UTR_length"].split("&") genes = tss_dict["associated_gene"].split("&") index = 0 for type_ in types: if type_ != "Primary": final_types.append(type_) final_utrs.append(utrs[index]) final_genes.append(genes[index]) index += 1 strand = Helper().get_strand_name(tss.strand) tss_dict = {"Name": "_".join(["TSS:" + str(tss.start), strand]), "type": "&".join(final_types), "UTR_length": "&".join(final_utrs), "associated_gene": "&".join(final_genes)} tss_string = ";".join(["=".join(["UTR_length", tss_dict["UTR_length"]]), "=".join(["associated_gene", tss_dict["associated_gene"]]), "=".join(["type", tss_dict["type"]]), "=".join(["Name", tss_dict["Name"]])]) return (tss_string, tss_dict) def import_to_tss(tss_type, cds_pos, tss, locus_tag, tss_entry): if cds_pos == "NA": utr = "_".join([tss_type, "NA"]) else: utr = "_".join([tss_type, str(int(math.fabs(cds_pos - tss.start)))]) if len(tss_entry) != 0: tss_dict = tss_entry[1] tss_dict_types = tss_dict["type"].split("&") tss_dict_utrs = tss_dict["UTR_length"].split("&") tss_dict_tags = tss_dict["associated_gene"].split("&") if tss_type == "Primary" and ("Primary" in tss_dict["type"]): index = 0 for tss_dict_type in tss_dict_types: if "Primary" in tss_dict_type: utr_length = tss_dict_utrs[index].split("_") if math.fabs(cds_pos - tss.start) < int(utr_length[1]): tss_dict_utrs[index] = utr tss_dict_tags[index] = locus_tag index += 1 else: tss_dict_types.append(tss_type) tss_dict_utrs.append(utr) tss_dict_tags.append(locus_tag) strand = Helper().get_strand_name(tss.strand) tss_dict = {"Name": "_".join(["TSS:" + str(tss.start), strand]), "type": "&".join(tss_dict_types), "UTR_length": "&".join(tss_dict_utrs), "associated_gene": "&".join(tss_dict_tags)} else: strand = Helper().get_strand_name(tss.strand) tss_dict = {"Name": "_".join(["TSS:" + str(tss.start), strand]), "type": tss_type, "UTR_length": utr, "associated_gene": locus_tag} tss_string = ";".join(["=".join(["UTR_length", tss_dict["UTR_length"]]), "=".join(["associated_gene", tss_dict["associated_gene"]]), "=".join(["type", tss_dict["type"]]), "=".join(["Name", tss_dict["Name"]])]) return (tss_string, tss_dict) def is_primary(cds_start, cds_end, tss_pos, strand): if strand == "+": if (is_utr(cds_start, tss_pos, 300) and (cds_start >= tss_pos)): return True else: if (is_utr(tss_pos, cds_end, 300) and (cds_end <= tss_pos)): return True def is_internal(cds_start, cds_end, tss_pos, strand): if ((cds_start < tss_pos) and (cds_end > tss_pos)) or ( (strand == "+") and (tss_pos == cds_end)) or ( (strand == "-") and (tss_pos == cds_start)): return True def is_antisense(cds_start, cds_end, tss_pos, strand): if ((is_utr(cds_start, tss_pos, 100)) and (cds_start >= tss_pos)) or ( (is_utr(tss_pos, cds_end, 100)) and (cds_end <= tss_pos)) or ( is_internal(cds_start, cds_end, tss_pos, strand)): return True def is_utr(pos1, pos2, length): if (pos1 - pos2 <= length): return True def same_strand_tss_gene(gene, tss, anti_ends, gene_ends, checks, tss_entry): '''deal with the the gene and TSS which located at the same strands''' if is_primary(gene.start, gene.end, tss.start, tss.strand): ori_entry = copy.deepcopy(tss_entry) if "locus_tag" in gene.attributes.keys(): locus_tag = gene.attributes["locus_tag"] else: locus_tag = "".join([gene.feature, ":", str(gene.start), "-", str(gene.end), "_", gene.strand]) if tss.strand == "+": if ((anti_ends["reverse"] != -1) and ( anti_ends["reverse"] - gene.start) > 0) or ( anti_ends["reverse"] == -1): tss_entry = import_to_tss("Primary", gene.start, tss, locus_tag, ori_entry) checks["orphan"] = False gene_ends["forward"] = gene.start elif (anti_ends["reverse"] != -1) and ( (anti_ends["reverse"] - gene.start) < 0): if (checks["int_anti"]) or ( (tss.start - anti_ends["reverse"]) > 0): tss_entry = import_to_tss("Primary", gene.start, tss, locus_tag, ori_entry) checks["orphan"] = False gene_ends["forward"] = gene.start else: if ((anti_ends["forward"] != -1) and ( gene.end - anti_ends["forward"]) > 0) or ( anti_ends["forward"] == -1): tss_entry = import_to_tss("Primary", gene.end, tss, locus_tag, ori_entry) checks["orphan"] = False gene_ends["reverse"] = gene.end if is_internal(gene.start, gene.end, tss.start, tss.strand): ori_entry = copy.deepcopy(tss_entry) if "locus_tag" in gene.attributes.keys(): locus_tag = gene.attributes["locus_tag"] else: locus_tag = "".join([gene.feature, ":", str(gene.start), "-", str(gene.end), "_", gene.strand]) tss_entry = import_to_tss("Internal", "NA", tss, locus_tag, ori_entry) checks["orphan"] = False return tss_entry def diff_strand_tss_gene(gene, tss, anti_ends, gene_ends, checks, tss_entry): '''deal with the the gene and TSS which located at different strands''' if is_antisense(gene.start, gene.end, tss.start, tss.strand): checks["int_anti"] = False if tss.strand == "-": anti_ends["forward"] = gene.start if (gene_ends["reverse"] != -1) and ( gene.start - gene_ends["reverse"]) > 0: if is_internal(gene.start, gene.end, tss.start, tss.strand): pass else: anti_ends["reverse"] = gene.end if is_internal(gene.start, gene.end, tss.start, tss.strand): checks["int_anti"] = True if "locus_tag" in gene.attributes.keys(): locus_tag = gene.attributes["locus_tag"] else: locus_tag = "".join([gene.feature, ":", str(gene.start), "-", str(gene.end), "_", gene.strand]) ori_entry = copy.deepcopy(tss_entry) tss_entry = import_to_tss("Antisense", "NA", tss, locus_tag, ori_entry) checks["orphan"] = False return tss_entry def compare_tss_cds(tss, cdss, genes): '''compare TSS and CDS to classify the TSSs''' tss_entry = [] gene_ends = {"forward": -1, "reverse": -1} anti_ends = {"forward": -1, "reverse": -1} checks = {"orphan": True, "int_anti": None} if (len(genes) == 0): datas = copy.deepcopy(cdss) else: datas = copy.deepcopy(genes) for data in datas: ori_entry = copy.deepcopy(tss_entry) if data.strand == tss.strand: tss_entry = same_strand_tss_gene(data, tss, anti_ends, gene_ends, checks, ori_entry) else: tss_entry = diff_strand_tss_gene(data, tss, anti_ends, gene_ends, checks, ori_entry) if checks["orphan"]: ori_entry = copy.deepcopy(tss_entry) tss_entry = import_to_tss("Orphan", "NA", tss, "NA", ori_entry) return tss_entry def fix_attributes(tss, tss_entry): '''change the primary TSS to secondary TSS''' index = 0 genes = tss.attributes["associated_gene"].split("&") utrs = tss.attributes["UTR_length"].split("&") types = tss.attributes["type"].split("&") for gene in genes: if gene == tss_entry["locus"]: utrs[index] = utrs[index].replace("Primary", "Secondary") types[index] = types[index].replace("Primary", "Secondary") index += 1 tss.attributes["UTR_length"] = "&".join(utrs) tss.attributes["type"] = "&".join(types) def detect_coverage(wigs, tss, ref): tss_cover = -1 ref_cover = -1 for strain, conds in wigs.items(): if strain == tss.seq_id: tss_cover = 0 ref_cover = 0 for cond, tracks in conds.items(): for lib_name, covers in tracks.items(): if ((tss.start + 1) <= len(covers)) and ( (ref.start + 1) <= len(covers)): if tss.strand == "+": diff_t = (covers[tss.start - 1] - covers[tss.start - 2]) diff_r = (covers[ref.start - 1] - covers[ref.start - 2]) else: diff_t = (covers[tss.start - 1] - covers[tss.start]) diff_r = (covers[ref.start - 1] - covers[ref.start]) tss_cover = tss_cover + diff_t ref_cover = ref_cover + diff_r return (tss_cover, ref_cover) def del_repeat(tsss): '''delete the repeat TSSs''' for tss in tsss: types = tss.attributes["type"].split("&") utrs = tss.attributes["UTR_length"].split("&") genes = tss.attributes["associated_gene"].split("&") detect_pri = False detect_sec = False index = 0 final_types = [] final_utrs = [] final_genes = [] for type_ in types: if (type_ == "Primary") and (not detect_pri): detect_pri = True pri_utr = int(utrs[index].split("_")[1]) real_index = index elif (type_ == "Primary") and (detect_pri): compare_utr = int(utrs[index].split("_")[1]) if compare_utr < pri_utr: pri_utr = compare_utr real_index = index elif (type_ == "Secondary") and (not detect_sec): detect_sec = True sec_utr = int(utrs[index].split("_")[1]) real_index2 = index elif (type_ == "Secondary") and (detect_sec): compare_utr = int(utrs[index].split("_")[1]) if compare_utr < sec_utr: sec_utr = compare_utr real_index2 = index elif (type_ == "Antisense") or \ (type_ == "Internal") or \ (type_ == "Orphan"): final_types.append(types[index]) final_utrs.append(utrs[index]) final_genes.append(genes[index]) index += 1 if detect_pri: final_types.append(types[real_index]) final_utrs.append(utrs[real_index]) final_genes.append(genes[real_index]) else: if detect_sec: final_types.append(types[real_index2]) final_utrs.append(utrs[real_index2]) final_genes.append(genes[real_index2]) tss.attributes["type"] = "&".join(final_types) tss.attributes["UTR_length"] = "&".join(final_utrs) tss.attributes["associated_gene"] = "&".join(final_genes) def get_primary_locus_tag(tss): tsss = [] tss_types = tss.attributes["type"].split("&") tss_locus_tags = tss.attributes["associated_gene"].split("&") tss_utr_lengths = tss.attributes["UTR_length"].split("&") index = 0 for tss_type in tss_types: if "Primary" in tss_type: tsss.append({"locus": tss_locus_tags[index], "utr": int(tss_utr_lengths[index].split("_")[1]), "type": tss_type, "pos": tss.start}) index += 1 return tsss def fix_primary_type(tsss, wigs_f, wigs_r): '''If one gene is associated with multiple primary TSSs, it will assing the low expressed one to be secondary TSS''' for tss in tsss: if ("Primary" in tss.attributes["type"]): tss_entrys = get_primary_locus_tag(tss) for ref in tsss: if (ref.seq_id == tss.seq_id) and ( ref.strand == tss.strand) and ( ref.start == tss.start): pass else: if ("Primary" in ref.attributes["type"]): ref_entrys = get_primary_locus_tag(ref) for tss_entry in tss_entrys: for ref_entry in ref_entrys: if (tss_entry["locus"] == ref_entry["locus"]) and ( tss_entry["type"] == "Primary") and ( ref_entry["type"] == "Primary") and ( tss.seq_id == ref.seq_id): if wigs_f is not None: if tss.strand == "+": covers = detect_coverage( wigs_f, tss, ref) else: covers = detect_coverage( wigs_r, tss, ref) tss_cover = covers[0] ref_cover = covers[1] if tss_cover < ref_cover: fix_attributes(tss, tss_entry) elif tss_cover > ref_cover: fix_attributes(ref, ref_entry) elif tss_cover == ref_cover: if (tss_entry["utr"] < ref_entry["utr"]): fix_attributes(ref, ref_entry) elif (tss_entry["utr"] > ref_entry["utr"]): fix_attributes(tss, tss_entry) else: if (tss_entry["utr"] < ref_entry["utr"]): fix_attributes(ref, ref_entry) elif (tss_entry["utr"] > ref_entry["utr"]): fix_attributes(tss, tss_entry) del_repeat(tsss) return tsss
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gen_TSS_type.py
gen_TSS_type.py
import os import csv import sys import shutil from glob import glob from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def del_attributes(entry, features): attributes = {} for key, value in entry.attributes.items(): if (key not in features): attributes[key] = value entry.attributes = attributes def read_gffs(gff_files, feature): gffs = {} if feature == "transcript": gffs["transcript"] = [] gff_f = open(gff_files, "r") for entry in Gff3Parser().entries(gff_f): gffs["transcript"].append(entry) gff_f.close() gffs["transcript"] = sorted( gffs["transcript"], key=lambda x: (x.seq_id, x.start, x.end, x.strand)) else: num = 0 for files in gff_files: for gff_file in glob(files): gffs[num] = [] gff_f = open(gff_file, "r") for entry in Gff3Parser().entries(gff_f): parent = None if (entry.feature != "gene") and ( entry.feature != "transcript") and ( entry.feature != "source") and ( entry.feature != "region") and ( entry.feature != "remark"): if "Parent" in entry.attributes.keys(): parent = entry.attributes["Parent"] del_attributes(entry, ["associated_tran", "parent_tran", "Parent", "Parent"]) if parent is not None: entry.attributes["Parent"] = parent entry.attributes["print"] = False gffs[num].append(entry) gff_f.close() gffs[num] = sorted( gffs[num], key=lambda x: (x.seq_id, x.start, x.end, x.strand)) num += 1 return gffs def assign_parent(other, tran): '''assign the parent transcript to all features''' if "Parent" not in other.attributes.keys(): other.attributes["Parent"] = tran.attributes["ID"] else: if tran.attributes["ID"] not in other.attributes["Parent"].split(","): other.attributes["Parent"] = ",".join([ other.attributes["Parent"], tran.attributes["ID"]]) def compare_tran(tran_gffs, other_gffs, fuzzy_tss, fuzzy_term): for tran in tran_gffs["transcript"]: for num, others in other_gffs.items(): for other in others: if (tran.seq_id == other.seq_id) and ( tran.strand == other.strand) and ( other.feature != "source") and ( other.feature != "region") and ( other.feature != "remark"): if other.feature == "TSS": if (other.start >= tran.start - fuzzy_tss) and ( other.end <= tran.end + fuzzy_tss): assign_parent(other, tran) elif other.feature.lower() == "terminator": if tran.strand == "+": if ((tran.end >= other.start) and ( tran.end <= other.end)) or ( (tran.end <= other.start) and ( (other.start - tran.end) <= fuzzy_term)) or ( (tran.end >= other.end) and ( (tran.end - other.end) <= fuzzy_term)) or ( (tran.start <= other.start) and ( tran.end >= other.end)): assign_parent(other, tran) else: if ((tran.start >= other.start) and ( tran.start <= other.end)) or ( (tran.start <= other.start) and ( (other.start - tran.start) <= fuzzy_term)) or ( (tran.start >= other.end) and ( (tran.start - other.end) <= fuzzy_term)) or ( (tran.start <= other.start) and ( tran.end >= other.end)): assign_parent(other, tran) else: if ((tran.start <= other.start) and ( tran.end >= other.end)) or ( (tran.start >= other.start) and ( tran.end <= other.end)) or ( (tran.start <= other.start) and ( tran.end >= other.start) and ( tran.end <= other.end)) or ( (tran.start >= other.start) and ( tran.start <= other.end) and ( tran.end >= other.end)): assign_parent(other, tran) def combine_gffs(tran_gffs, other_gffs): gffs = [] o_gffs = [] s_gffs = [] if tran_gffs is not None: for tran in tran_gffs["transcript"]: gffs.append(tran) for num, others in other_gffs.items(): for other in others: if not other.attributes["print"]: if tran.seq_id == other.seq_id: if "Parent" in other.attributes.keys(): attributes = {} for key, value in other.attributes.items(): if key != "print": attributes[key] = value if (tran.attributes["ID"] in other.attributes["Parent"].split(",")): other.attribute_string = ";".join( ["=".join(items) for items in attributes.items()]) other.attributes["print"] = True gffs.append(other) for num, others in other_gffs.items(): for other in others: if (other.feature == "source") or ( other.feature == "region") or ( other.feature == "remark"): s_gffs.append(other) if not other.attributes["print"]: attributes = {} for key, value in other.attributes.items(): if key != "print": attributes[key] = value other.attribute_string = ";".join( ["=".join(items) for items in attributes.items()]) o_gffs.append(other) return gffs, o_gffs, s_gffs def print_gff(gffs, o_gffs, s_gffs, output): sort_others = sorted(o_gffs, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) out = open(output, "w") out.write("##gff-version 3\n") if len(gffs) != 0: pre_strain = None for gff in gffs: if (pre_strain is not None) and (pre_strain != gff.seq_id): for other in sort_others: if other.seq_id == pre_strain: if (not other.attributes["print"]): out.write("\t".join([other.info_without_attributes, other.attribute_string]) + "\n") other.attributes["print"] = True for source in s_gffs: if (source.seq_id == gff.seq_id) and ( not source.attributes["print"]): out.write("\t".join([source.info_without_attributes, source.attribute_string]) + "\n") source.attributes["print"] = True out.write("\t".join([gff.info_without_attributes, gff.attribute_string]) + "\n") pre_strain = gff.seq_id for other in sort_others: if other.seq_id == gff.seq_id: if (not other.attributes["print"]): out.write("\t".join([other.info_without_attributes, other.attribute_string]) + "\n") other.attributes["print"] = True else: for other in sort_others: out.write("\t".join([gff.info_without_attributes, gff.attribute_string]) + "\n") out.close() def run_merge(out_folder, tran, others, fuzzy_term, fuzzy_tss, strain, log): '''merge all features to be one gff file''' output = "_".join([strain, "merge_features.gff"]) if tran is None and others is None: lgo.write("No input files are found.\n") print("Error: There is no input file...") sys.exit() elif (tran is not None) and (others is None): shutil.copy(tran, os.path.join(out_folder, output)) elif others is not None: if (tran is not None): tran_gffs = read_gffs(tran, "transcript") other_gffs = read_gffs(others, "others") log.write("Comparing transripts and other features to get " "parental transcripts.\n") compare_tran(tran_gffs, other_gffs, fuzzy_tss, fuzzy_term) else: other_gffs = read_gffs(others, "others") log.write("Combining all the gff files and merge the features.\n") gffs, o_gffs, s_gffs = combine_gffs(tran_gffs, other_gffs) print_gff(gffs, o_gffs, s_gffs, output) log.write("\t" + output + " is generated.\n")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/merge_feature.py
merge_feature.py
from annogesiclib.gff3 import Gff3Parser def gen_promoter_table(input_file, output_file, tss_file, type_): '''generate the table of promoter based on MEME''' tsss = [] gff_f = open(tss_file, "r") for entry in Gff3Parser().entries(gff_f): tsss.append(entry) out = open(output_file, "w") out.write("\t".join(["Genome", "TSS_position", "TSS_strand", "Motif"]) + "\n") detect = False num = 1 with open(input_file) as fh: for line in fh: line = line.strip() if type_ == "meme": if line.startswith("MOTIF"): motif = line.split("MEME")[0].strip() datas = motif.split(" ") motif = datas[0] + "_" + datas[-1] detect = False elif (line.startswith("Sequence name")) and ( line.endswith("Site")): detect = True elif (len(line) == 0): detect = False elif (detect) and (not line.startswith("---")): tag = line.split(" ")[0] datas = tag.split("_") for tss in tsss: if ("_".join(datas[2:]) in tss.seq_id) and ( datas[0] == str(tss.start)) and ( datas[1] == tss.strand): out.write("\t".join([tss.seq_id, datas[0], datas[1], motif]) + "\n") elif type_ == "glam2": if line.startswith("*"): detect = True motif = "MOTIF_" + str(num) num += 1 elif len(line) == 0: detect = False elif detect: datas = line.split(" ")[0].split("_") for tss in tsss: if ("_".join(datas[2:]) in tss.seq_id) and ( datas[0] == str(tss.start)) and ( datas[1] == tss.strand): out.write("\t".join([tss.seq_id, datas[0], datas[1], motif]) + "\n")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gen_promoter_table.py
gen_promoter_table.py
import os, gc import math import numpy as np from annogesiclib.gff3 import Gff3Parser from annogesiclib.lib_reader import read_wig, read_libs from annogesiclib.coverage_detection import coverage_comparison, get_repmatch from annogesiclib.coverage_detection import replicate_comparison from annogesiclib.args_container import ArgsContainer def import_data(strand, strain, pos, utr, type_, name, srna_cover, pro): if type_ == "TSS": data = {"strand": strand, "strain": strain, "start": pos["start"], "end": pos["end"], "utr": utr, "start_tss": name, "end_cleavage": pro, "start_cleavage": "NA", "datas": srna_cover} elif type_ == "cleavage": data = {"strand": strand, "strain": strain, "start": pos["start"], "end": pos["end"], "utr": utr, "start_tss": "NA", "start_cleavage": name, "datas": srna_cover, "end_cleavage": pro} else: data = {"strand": strand, "strain": strain, "start": pos["start"], "end": pos["end"], "utr": utr, "start_tss": "NA", "start_cleavage": "NA", "datas": srna_cover, "end_cleavage": pro} del(srna_cover) return data def import_inter(strand, strain, start, end, length): return {"strand": strand, "strain": strain, "start": start, "end": end, "len_CDS": length} def get_terminal(cdss, inters, seq, type_): first_p = True first_m = True pre_strain = "" if type_ == "start": for cds in cdss: if cds.seq_id != pre_strain: pre_strain = cds.seq_id first_p = True first_m = True if (cds.strand == "+") and (first_p): first_p = False inters.append(import_inter(cds.strand, cds.seq_id, 1, cds.start, 0)) elif (cds.strand == "-") and (first_m): first_m = False inters.append(import_inter(cds.strand, cds.seq_id, 1, cds.start, 0)) elif type_ == "end": for cds in reversed(cdss): if cds.seq_id != pre_strain: pre_strain = cds.seq_id first_p = True first_m = True if (cds.strand == "+") and (first_p): first_p = False inters.append(import_inter( cds.strand, cds.seq_id, cds.end, len(seq[cds.seq_id]), cds.end - cds.start)) elif (cds.strand == "-") and (first_m): first_m = False inters.append(import_inter( cds.strand, cds.seq_id, cds.start, len(seq[cds.seq_id]), cds.end - cds.start)) def check_pos(cover, check_point, checks, cover_pos): if (cover_pos >= min(check_point["utr_start"], check_point["utr_end"])) and ( cover_pos <= max(check_point["utr_start"], check_point["utr_end"])): checks["utr"] = True if (cover_pos >= min(check_point["srna_start"], check_point["srna_end"])) and ( cover_pos <= max(check_point["srna_start"], check_point["srna_end"])): checks["srna"] = True def check_start_and_end(start, end, covers, strand, fuzzy): if strand == "-": if (start - 2 - fuzzy) < 0: c_start = 0 else: c_start = start - 2 - fuzzy if (end + 1) > len(covers): c_end = len(covers) else: c_end = end + 1 else: if (start - 2) < 0: c_start = 0 else: c_start = start - 2 if (end + 1 + fuzzy) > len(covers): c_end = len(covers) else: c_end = end + 1 + fuzzy return c_start, c_end def set_cover_and_point(cover_results, inter, covers, pos, fuzzy_end): check_point = {"srna_start": 0, "srna_end": 0, "utr_start": 0, "utr_end": 0} if pos["ori_start"] - 2 < 0: ori_start = 2 else: ori_start = pos["ori_start"] if inter["strand"] == "-": c_start, c_end = check_start_and_end(ori_start, pos["ori_end"], covers, "-", fuzzy_end) covers = covers[c_start: c_end] covers = covers[::-1] check_point["srna_start"] = pos["end"] + 1 check_point["srna_end"] = pos["start"] - 2 - fuzzy_end check_point["utr_start"] = pos["ori_end"] check_point["utr_end"] = ori_start start = c_start end = c_end elif inter["strand"] == "+": c_start, c_end = check_start_and_end(ori_start, pos["ori_end"], covers, "+", fuzzy_end) covers = covers[c_start: c_end] check_point["srna_start"] = pos["start"] - 2 check_point["srna_end"] = pos["end"] + 1 + fuzzy_end check_point["utr_start"] = ori_start check_point["utr_end"] = pos["ori_end"] start = c_start end = c_end cover_results["check_point"] = check_point cover_results["covers"] = covers return start, end def detect_cover_utr_srna(cover_results, pos, inter, cond, track, args_srna, lib_type, start, end, strand): datas = {"num": 0, "cover_tmp": {"5utr": 0, "total": 0, "ori_total": 0}, "checks": {"first": True, "detect_decrease": False, "srna": False, "utr": False}, "final_poss": {"start": pos["start"], "end": pos["end"]}} index_pos = 0 for cover in cover_results["covers"]: if strand == "+": cover_pos = start + index_pos else: cover_pos = end - index_pos datas["checks"]["srna"] = False datas["checks"]["utr"] = False check_pos(cover, cover_results["check_point"], datas["checks"], cover_pos) if datas["checks"]["utr"]: datas["cover_tmp"]["ori_total"] = \ datas["cover_tmp"]["ori_total"] + cover if datas["checks"]["srna"]: datas["cover_tmp"]["total"] = \ datas["cover_tmp"]["total"] + cover datas["checks"]["first"] = coverage_comparison( cover, cover_results["cover_sets"], cover_results["pos"], datas["checks"]["first"], inter["strand"], cover_pos) if (datas["checks"]["first"] is not True) and ( cover_results["cover_sets"]["high"] > 0): if (cover_results["type"] == "5utr") or ( cover_results["type"] == "3utr") or ( (cover_results["type"] == "interCDS") and ( cover_results["intercds"] == "TSS")): if ((cover_results["cover_sets"]["low"] / cover_results["cover_sets"]["high"]) < args_srna.decrease_utr) and ( cover_results["cover_sets"]["low"] > -1): datas["checks"]["detect_decrease"] = True datas["cover_tmp"]["5utr"] = cover if datas["checks"]["detect_decrease"]: go_out = get_cover_5utr(datas, cover_results["cover_sets"], cover, inter, args_srna, cover_pos) if go_out is True: break index_pos += 1 if (datas["checks"]["first"] is not True) and ( cover_results["cover_sets"]["high"] > 0): check_import_srna_covers(datas, cover_results, inter, cond, track, cover, pos, args_srna, lib_type) def get_coverage(wigs, inter, pos, type_, intercds_type, args_srna): cover_results = {"srna_covers": {}, "utr_covers": {}, "cover_sets": {"high": 0, "low": 0, "best": -1}, "pos": {"low": 0, "high": 0}, "type": type_, "intercds": intercds_type} for wig_strain, conds in wigs.items(): if wig_strain == inter["strain"]: for cond, tracks in conds.items(): cover_results["srna_covers"][cond] = [] cover_results["utr_covers"][cond] = [] for lib_name, covers in tracks.items(): track = lib_name.split("|")[-3] lib_strand = lib_name.split("|")[-2] lib_type = lib_name.split("|")[-1] start, end = set_cover_and_point( cover_results, inter, covers, pos, args_srna.fuzzy_utr) detect_cover_utr_srna(cover_results, pos, inter, cond, track, args_srna, lib_type, start, end, lib_strand) return cover_results["srna_covers"], cover_results["utr_covers"] def get_cover_5utr(datas, cover_sets, cover, inter, args_srna, cover_pos): go_out = False if (datas["num"] == args_srna.fuzzy_utr) or ( datas["cover_tmp"]["5utr"] == 0) or ( (cover > datas["cover_tmp"]["5utr"]) and ( cover / datas["cover_tmp"]["5utr"]) > ( 1 + args_srna.decrease_utr)): if inter["strand"] == "+": datas["final_poss"]["end"] = cover_pos elif inter["strand"] == "-": datas["final_poss"]["start"] = cover_pos go_out = True elif (cover <= datas["cover_tmp"]["5utr"]): if (cover / datas["cover_tmp"]["5utr"]) >= ( args_srna.decrease_utr / 2): datas["num"] += 1 else: datas["num"] = 0 datas["cover_tmp"]["5utr"] = cover cover_sets["low"] = cover elif (cover > datas["cover_tmp"]["5utr"]) and ( (cover / datas["cover_tmp"]["5utr"]) <= ( 1 + args_srna.decrease_utr)): datas["num"] += 1 return go_out def import_cover(inter, covers, track, cover_sets, avgs, cover, final_poss, pos, lib_type): if final_poss["start"] < final_poss["end"]: if (inter["strand"] == "+") and (final_poss["end"] > pos["end"]): final_poss["end"] = pos["end"] elif (inter["strand"] == "-") and (final_poss["start"] < pos["start"]): final_poss["start"] = pos["start"] covers.append({"track": track, "high": cover_sets["high"], "low": cover_sets["low"], "avg": avgs["avg"], "type": lib_type, "ori_avg": avgs["ori_avg"], "final_start": final_poss["start"], "final_end": final_poss["end"]}) def check_import_srna_covers(datas, cover_results, inter, cond, track, cover, pos, args_srna, lib_type): avgs = {"avg": datas["cover_tmp"]["total"] / float( pos["end"] - pos["start"] + 1), "ori_avg": datas["cover_tmp"]["ori_total"] / float( pos["ori_end"] - pos["ori_start"] + 1)} if ((cover_results["type"] == "5utr") and ( (cover_results["intercds"] == "tsspro") or ( datas["checks"]["detect_decrease"]))) or ( (cover_results["type"] == "3utr") and ( cover_results["intercds"] == "two_pro")) or ( (cover_results["type"] == "3utr") and ( cover_results["intercds"] != "two_pro") and ( datas["checks"]["detect_decrease"])) or ( (cover_results["type"] == "3utr") and ( cover_results["intercds"] != "two_pro") and ( (pos["end"] - pos["start"]) >= args_srna.min_len) and ( (pos["end"] - pos["start"]) <= args_srna.max_len)) or ( (cover_results["type"] == "interCDS") and ( cover_results["intercds"] == "TSS") and ( datas["checks"]["detect_decrease"])) or ( (cover_results["type"] == "interCDS") and ( (cover_results["intercds"] == "tss_pro") or ( cover_results["intercds"] == "two_pro"))): import_cover(inter, cover_results["srna_covers"][cond], track, cover_results["cover_sets"], avgs, cover, datas["final_poss"], pos, lib_type) import_cover(inter, cover_results["utr_covers"][cond], track, cover_results["cover_sets"], avgs, cover, datas["final_poss"], pos, lib_type) else: import_cover(inter, cover_results["utr_covers"][cond], track, cover_results["cover_sets"], avgs, cover, datas["final_poss"], pos, lib_type) def detect_3utr_pro(inter, pos, wigs, utr_type, args_srna): '''3UTR start with processing site''' for pro in args_srna.pros: if (pro.seq_id == inter["strain"]) and ( pro.strand == inter["strand"]): if (pro.start >= pos["start"]) and (pro.start <= pos["end"]): if pro.strand == "+": if ((pos["end"] - pro.start) >= args_srna.min_len) and ( (pos["end"] - pro.start) <= args_srna.max_len): n_pos = import_position( pro.start, pos["end"], pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "pro", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "NA")) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, "3utr", "cleavage", "Cleavage:" + "_".join([ str(pro.start), pro.strand]), srna_covers, "NA")) elif (pos["end"] - pro.start) > args_srna.max_len: detect_twopro(inter, pos, wigs, utr_type, "3utr", args_srna) else: if ((pro.start - pos["start"]) >= args_srna.min_len) and ( (pro.start - pos["start"]) <= args_srna.max_len): n_pos = import_position( pos["start"], pro.start, pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "pro", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "NA")) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, "3utr", "cleavage", "Cleavage:" + "_".join([ str(pro.start), pro.strand]), srna_covers, "NA")) elif (pro.start - pos["start"]) > args_srna.max_len: detect_twopro(inter, pos, wigs, utr_type, "3utr", args_srna) if (pro.start > pos["end"] + args_srna.fuzzy_tsss[utr_type]): break def detect_twopro(inter, pos, wigs, utr_type, import_type, args_srna): '''the sRNA is associated with two processing sites''' pros = [] for pro in args_srna.pros: if (pro.seq_id == inter["strain"]) and ( pro.strand == inter["strand"]): if (pro.start >= pos["start"]) and (pro.start <= pos["end"]): pros.append(pro) first = True pre_pro = None for pro in pros: if first: first = False else: if ((pro.start - pre_pro.start) >= args_srna.min_len) and ( (pro.start - pre_pro.start) <= args_srna.max_len): n_pos = import_position(pre_pro.start, pro.start, pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "two_pro", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "Cleavage:" + "_".join( [str(pro.start), pro.strand]))) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, import_type, "cleavage", "Cleavage:" + "_".join( [str(pre_pro.start), pro.strand]), srna_covers, "Cleavage:" + "_".join( [str(pro.start), pro.strand]))) pre_pro = pro def decrease_pos(covers, pos, strand): longer = -1 for cond, datas in covers.items(): for data in datas: for key, value in data.items(): if key == pos: if longer == -1: longer = value elif (strand == "+") and (value > longer) and ( longer != -1): longer = value elif (strand == "-") and (value < longer) and ( longer != -1): longer = value return longer def get_decrease(inter, wigs, tss, pos, utr_type, args_srna): '''check the coverage decrease''' if inter["strand"] == "+": n_pos = import_position(tss.start, pos["end"], pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "TSS", args_srna) utr_pos = decrease_pos(utr_covers, "final_end", "+") n_pos = import_position(tss.start, utr_pos, pos["ori_start"], pos["ori_end"]) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "NA")) if len(srna_covers) != 0: srna_pos = decrease_pos(srna_covers, "final_end", "+") n_pos = import_position(tss.start, srna_pos, pos["ori_start"], pos["ori_end"]) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "TSS", "TSS:" + "_".join([str(tss.start), tss.strand]), srna_covers, "NA")) else: n_pos = import_position(pos["start"], tss.start, pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "TSS", args_srna) utr_pos = decrease_pos(utr_covers, "final_start", "-") n_pos = import_position(utr_pos, tss.start, pos["ori_start"], pos["ori_end"]) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "NA")) if len(srna_covers) != 0: srna_pos = decrease_pos(srna_covers, "final_start", "-") n_pos = import_position(srna_pos, tss.start, pos["ori_start"], pos["ori_end"]) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "TSS", "TSS:" + "_".join([str(tss.start), tss.strand]), srna_covers, "NA")) def import_append_normal(inter, tss, pro, pos, wigs, utr_type, args_srna): if inter["strand"] == "+": n_pos = import_position(tss.start, pro.start, pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "tsspro", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "Cleavage:" + "_".join([str(pro.start), pro.strand]))) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "TSS", "TSS:" + "_".join([str(tss.start), tss.strand]), srna_covers, "Cleavage:" + "_".join([str(pro.start), pro.strand]))) else: n_pos = import_position(pro.start, tss.start, pos["ori_start"], pos["ori_end"]) srna_covers, utr_covers = get_coverage( wigs, inter, n_pos, utr_type, "tsspro", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "NA", "NA", utr_covers, "Cleavage:" + "_".join([str(pro.start), pro.strand]))) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], n_pos, utr_type, "TSS", "TSS:" + "_".join([str(tss.start), tss.strand]), srna_covers, "Cleavage:" + "_".join([str(pro.start), pro.strand]))) def detect_normal(diff, wigs, inter, pos, utr_type, tss, args_srna): '''normal case, UTR-derived sRNA with TSS''' if (diff >= args_srna.min_len) and ( diff <= args_srna.max_len): srna_covers, utr_covers = get_coverage( wigs, inter, pos, utr_type, "TSS", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], pos, utr_type, "NA", "NA", utr_covers, "NA")) args_srna.srnas.append(import_data( inter["strand"], inter["strain"], pos, utr_type, "TSS", "TSS:" + "_".join([str(tss.start), tss.strand]), srna_covers, "NA")) elif (diff > args_srna.max_len) and (len(args_srna.pros) != 0): detect = False for pro in args_srna.pros: if (pro.seq_id == inter["strain"]) and ( pro.strand == inter["strand"]): if (pro.start >= pos["start"]) and (pro.start <= pos["end"]): if ((pro.start - pos["start"]) >= args_srna.min_len) and ( (pro.start - pos["start"]) <= args_srna.max_len): detect = True import_append_normal(inter, tss, pro, pos, wigs, utr_type, args_srna) if not detect: get_decrease(inter, wigs, tss, pos, utr_type, args_srna) def import_position(start, end, ori_start, ori_end): return {"start": start, "end": end, "ori_start": ori_start, "ori_end": ori_end} def detect_utr(start, end, inter, utr_type, wigs, args_srna): ori_fuzzy = args_srna.fuzzy_tsss[utr_type] if "p_" in args_srna.fuzzy_tsss[utr_type]: per = float(args_srna.fuzzy_tsss[utr_type].split("_")[-1]) args_srna.fuzzy_tsss[utr_type] = inter["len_CDS"]*per elif "n_" in args_srna.fuzzy_tsss[utr_type]: args_srna.fuzzy_tsss[utr_type] = float( args_srna.fuzzy_tsss[utr_type].split("_")[-1]) for tss in args_srna.tsss: if (tss.seq_id == inter["strain"]) and ( tss.strand == inter["strand"]): if tss.strand == "+": start_fuzzy = start - args_srna.fuzzy_tsss[utr_type] if (tss.start >= start_fuzzy) and (tss.start <= end): n_pos = import_position(tss.start, end, start, end) detect_normal(end - tss.start, wigs, inter, n_pos, utr_type, tss, args_srna) elif tss.start > end: break else: end_fuzzy = end + args_srna.fuzzy_tsss[utr_type] if (tss.start >= start) and (tss.start <= end_fuzzy): n_pos = import_position(start, tss.start, start, end) detect_normal(tss.start - start, wigs, inter, n_pos, utr_type, tss, args_srna) if tss.start > end_fuzzy: break if (utr_type == "3utr") and (len(args_srna.pros) != 0): pos = import_position(start, end, start, end) detect_3utr_pro(inter, pos, wigs, utr_type, args_srna) if (utr_type == "interCDS") and (len(args_srna.pros) != 0): pos = import_position(start, end, start, end) detect_twopro(inter, pos, wigs, utr_type, "interCDS", args_srna) args_srna.fuzzy_tsss[utr_type] = ori_fuzzy def run_utr_detection(wigs, inter, start, end, utr_type, args_srna): if end - start >= 0: if (utr_type == "3utr") or ( utr_type == "5utr") or ( utr_type == "interCDS"): detect_utr(start, end, inter, utr_type, wigs, args_srna) else: pos = import_position(start, end, start, end) srna_covers, utr_covers = get_coverage( wigs, inter, pos, utr_type, "NA", args_srna) args_srna.utrs.append(import_data( inter["strand"], inter["strain"], pos, utr_type, "NA", "NA", utr_covers, "NA")) def class_utr(inter, ta, args_srna, wig_fs, wig_rs): '''classify the UTR-dervied sRNA''' if inter["strand"] == "+": if (inter["start"] <= ta.end) and ( inter["end"] >= ta.end) and ( ta.start <= inter["start"]): run_utr_detection(wig_fs, inter, inter["start"] + 1, ta.end, "3utr", args_srna) elif (inter["start"] <= ta.start) and ( inter["end"] >= ta.start) and ( ta.end >= inter["end"]): run_utr_detection(wig_fs, inter, ta.start, inter["end"] - 1, "5utr", args_srna) elif (inter["start"] >= ta.start) and (inter["end"] <= ta.end): run_utr_detection(wig_fs, inter, inter["start"] + 1, inter["end"] - 1, "interCDS", args_srna) else: if (inter["start"] <= ta.end) and ( inter["end"] >= ta.end) and ( ta.start <= inter["start"]): run_utr_detection(wig_rs, inter, inter["start"] + 1, ta.end, "5utr", args_srna) elif (inter["start"] <= ta.start) and ( inter["end"] >= ta.start) and ( ta.end >= inter["end"]): run_utr_detection(wig_rs, inter, ta.start, inter["end"] - 1, "3utr", args_srna) elif (inter["start"] >= ta.start) and (inter["end"] <= ta.end): run_utr_detection(wig_rs, inter, inter["start"] + 1, inter["end"] - 1, "interCDS", args_srna) def median_score(lst, per): '''if the cutoff is assigned by precentage, it can get the corresponding number''' sortedLst = sorted(lst) lstLen = len(lst) index = int((lstLen - 1) * per) if lstLen != 0: return sortedLst[index] else: return 0 def print_file(num, srna, start, end, srna_datas, args_srna): name = '%0*d' % (5, num) args_srna.out_t.write( "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t".format( srna["strain"], name, start, end, srna["strand"], ";".join(srna_datas["conds"].keys()), ";".join(srna_datas["conds"].values()), srna_datas["best"], srna_datas["high"], srna_datas["low"])) attribute_string = ";".join( ["=".join(items) for items in [ ["ID", srna["strain"] + "_srna_utr" + str(num)], ["Name", "UTR_sRNA_" + name], ["sRNA_type", srna["utr"]], ["best_avg_coverage", str(srna_datas["best"])], ["best_high_coverage", str(srna_datas["high"])], ["best_low_coverage", str(srna_datas["low"])], ["with_TSS", srna["start_tss"]], ["start_cleavage", srna["start_cleavage"]], ["end_cleavage", srna["end_cleavage"]]]]) args_srna.out.write("\t".join([str(field) for field in [ srna["strain"], "ANNOgesic", "ncRNA", str(start), str(end), ".", srna["strand"], ".", attribute_string]]) + "\n") first = True for data in srna_datas["detail"]: if first: args_srna.out_t.write("{0}(avg={1};high={2};low={3})".format( data["track"], data["avg"], data["high"], data["low"])) first = False else: args_srna.out_t.write(";{0}(avg={1};high={2};low={3})".format( data["track"], data["avg"], data["high"], data["low"])) args_srna.out_t.write("\n") def detect_srna(median, args_srna): '''check the sRNA candidates and print it out''' num = 0 if len(args_srna.srnas) != 0: for srna in args_srna.srnas: if srna["strain"] in median.keys(): srna_datas = replicate_comparison( args_srna, srna["datas"], srna["strand"], "sRNA_utr_derived", median[srna["strain"]][srna["utr"]], args_srna.coverages, srna["utr"], None, None, args_srna.texs) if srna_datas["best"] != 0: if (srna["utr"] == "5utr") or ( srna["utr"] == "interCDS"): start = srna_datas["start"] end = srna_datas["end"] elif srna["utr"] == "3utr": start = srna["start"] end = srna["end"] if (math.fabs(start - end) >= args_srna.min_len) and ( math.fabs(start - end) <= args_srna.max_len): print_file(num, srna, start, end, srna_datas, args_srna) num += 1 def read_data(args_srna): cdss = [] tas = [] tsss = [] pros = [] seq = {} gff_parser = Gff3Parser() fh = open(args_srna.gff_file, "r") for entry in gff_parser.entries(fh): if (entry.feature == "CDS") or ( entry.feature == "tRNA") or ( entry.feature == "rRNA"): if ("product" in entry.attributes.keys()) and (args_srna.hypo): if "hypothetical protein" not in entry.attributes["product"]: cdss.append(entry) else: cdss.append(entry) fh.close() fh = open(args_srna.ta_file, "r") for entry in gff_parser.entries(fh): tas.append(entry) fh.close() fh = open(args_srna.tss_file, "r") for entry in gff_parser.entries(fh): tsss.append(entry) fh.close() if args_srna.pro_file is not None: fh = open(args_srna.pro_file, "r") for entry in gff_parser.entries(fh): pros.append(entry) fh.close() with open(args_srna.seq_file, "r") as s_f: for line in s_f: line = line.strip() if line.startswith(">"): strain = line[1:] seq[strain] = "" else: seq[strain] = seq[strain] + line cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) if len(pros) != 0: pros = sorted(pros, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return cdss, tas, tsss, pros, seq def get_utr_coverage(utrs): covers = {} for utr in utrs: if utr["strain"] not in covers.keys(): covers[utr["strain"]] = {"3utr": {}, "5utr": {}, "interCDS": {}} for cond, utr_covers in utr["datas"].items(): for cover in utr_covers: if (cover["track"] not in covers[utr["strain"]][utr["utr"]].keys()): covers[utr["strain"]][utr["utr"]][cover["track"]] = [] covers[utr["strain"]][utr["utr"]][cover["track"]].append( cover["ori_avg"]) return covers def get_inter(cdss, inters): '''get the intergenic region''' for cds1 in cdss: for cds2 in cdss: if (cds1.seq_id == cds2.seq_id) and \ (cds1.strand == cds2.strand): if (cds2.start > cds1.start): if cds2.start - cds1.end > 1: if cds1.strand == "+": length = cds1.end - cds1.start else: length = cds2.end - cds2.start inters.append(import_inter(cds1.strand, cds1.seq_id, cds1.end, cds2.start, length)) break def mean_score(lst): total = 0 for li in lst: total = total + li if len(lst) != 0: return (total / len(lst)) else: return 0 def get_utr_cutoff(coverage, mediandict, avgs, strain, utr, track): if "n_" in coverage: cutoff = float(coverage.split("_")[-1]) mediandict[strain][utr][track] = {"median": cutoff, "mean": mean_score(avgs)} elif "p_" in coverage: cutoff = float(coverage.split("_")[-1]) mediandict[strain][utr][track] = {"median": median_score(avgs, cutoff), "mean": mean_score(avgs)} def set_cutoff(covers, args_srna): '''set the cutoff based on the types of sRNA''' mediandict = {} for strain, utrs in covers.items(): mediandict[strain] = {"3utr": {}, "5utr": {}, "interCDS": {}} for utr, tracks in utrs.items(): if (utr == "3utr") or (utr == "5utr") or (utr == "interCDS"): for track, avgs in tracks.items(): if track not in mediandict[strain][utr].keys(): mediandict[strain][utr][track] = {} if args_srna.cover_notex is not None: for keys in args_srna.texs.keys(): tracks = keys.split("@AND@") if tracks[0] == track: get_utr_cutoff(args_srna.coverages[utr], mediandict, avgs, strain, utr, track) break elif tracks[1] == track: get_utr_cutoff(args_srna.cover_notex[utr], mediandict, avgs, strain, utr, track) break else: get_utr_cutoff(args_srna.coverages[utr], mediandict, avgs, strain, utr, track) return mediandict def print_median(out_folder, mediandict): '''print the cutoff based on the types of sRNA''' out = open(os.path.join(out_folder, "tmp_median"), "a") for strain, utrs in mediandict.items(): for utr, tracks in utrs.items(): for track, value in tracks.items(): out.write("\t".join([strain, utr, track, str(value["median"])]) + "\n") out.close() def free_memory(paras): for data in (paras): del(data) gc.collect() def utr_derived_srna(args_srna, libs, texs, wig_fs, wig_rs): inters = [] cdss, tas, tsss, pros, seq = read_data(args_srna) out = open(args_srna.output_file, "w") out.write("##gff-version 3\n") out_t = open(args_srna.output_table, "w") get_terminal(cdss, inters, seq, "start") get_inter(cdss, inters) get_terminal(cdss, inters, seq, "end") inters = sorted(inters, key=lambda k: (k["strain"], k["start"], k["end"], k["strand"])) args_srna = ArgsContainer().extend_utr_container( args_srna, cdss, tsss, pros, out, out_t, texs) for inter in inters: for ta in tas: if (inter["strain"] == ta.seq_id) and ( inter["strand"] == ta.strand): class_utr(inter, ta, args_srna, wig_fs, wig_rs) covers = get_utr_coverage(args_srna.utrs) mediandict = set_cutoff(covers, args_srna) print_median(args_srna.out_folder, mediandict) detect_srna(mediandict, args_srna) args_srna.out.close() args_srna.out_t.close() paras = [args_srna.srnas, args_srna.utrs, seq, inters, tas, cdss, tas, tsss, pros, covers] free_memory(paras)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sRNA_utr_derived.py
sRNA_utr_derived.py
import os import sys import shutil from annogesiclib.helper import Helper from annogesiclib.multiparser import Multiparser from annogesiclib.converter import Converter from annogesiclib.combine_frag_tex import combine from annogesiclib.stat_TA_comparison import stat_ta_tss, stat_ta_gff from annogesiclib.transcript_detection import detect_transcript from annogesiclib.fill_gap import fill_gap, longer_ta from annogesiclib.gen_table_tran import gen_table_transcript from annogesiclib.compare_tran_term import compare_term_tran from annogesiclib.plot_tran import plot_tran from annogesiclib.reorganize_table import reorganize_table class TranscriptDetection(object): '''doing for transcript detection''' def __init__(self, args_tran): self.multiparser = Multiparser() self.helper = Helper() self.converter = Converter() self.gff_outfolder = os.path.join(args_tran.out_folder, "gffs") self.tran_path = os.path.join(self.gff_outfolder, "tmp") self.stat_path = os.path.join(args_tran.out_folder, "statistics") self.tmps = {"gff": "tmp.gff", "merge": "tmp_merge", "tran": os.path.join(args_tran.out_folder, "tmp_tran"), "tss_ta": os.path.join(self.gff_outfolder, "tmp_tss_ta"), "ta_tss": os.path.join(self.gff_outfolder, "tmp_ta_tss"), "ta_gff": os.path.join(self.gff_outfolder, "tmp_ta_gff"), "gff_ta": os.path.join(self.gff_outfolder, "tmp_gff_ta"), "uni": os.path.join(self.gff_outfolder, "tmp_uni"), "overlap": os.path.join( self.gff_outfolder, "tmp_overlap")} self.frag = "transcript_fragment.gff" self.tex = "transcript_tex_notex.gff" self.endfix_tran = "transcript.gff" def _compute_transcript(self, wig_f, wig_r, wig_folder, wig_type, strain, libs, args_tran): print("Computing transcripts for {0}".format(strain)) out = os.path.join(args_tran.out_folder, "_".join([strain, wig_type])) detect_transcript(wig_f, wig_r, wig_folder, libs, out, wig_type, args_tran) def _compute(self, wig_type, wigs, libs, args_tran): strains = [] wig_folder = os.path.join(wigs, "tmp") for wig in os.listdir(wig_folder): if wig.endswith("_forward.wig"): strains.append(wig.replace("_forward.wig", "")) for strain in strains: f_file = os.path.join(wig_folder, "_".join( [strain, "forward.wig"])) r_file = os.path.join(wig_folder, "_".join( [strain, "reverse.wig"])) self._compute_transcript(f_file, r_file, wigs, wig_type, strain, libs, args_tran) return strains def _compare_tss(self, tas, args_tran, log): self.multiparser.parser_gff(args_tran.compare_tss, "TSS") self.multiparser.combine_gff( self.gff_outfolder, os.path.join(args_tran.compare_tss, "tmp"), "transcript", "TSS") print("Comaring of transcripts and TSSs") log.write("Running stat_TA_comparison.py to compare transcripts " "with TSSs.\n") tss_folder = os.path.join(args_tran.compare_tss, "tmp") for ta in tas: ta_file = os.path.join(self.gff_outfolder, "_".join([ta, self.endfix_tran])) stat_tss_out = os.path.join( self.stat_path, "".join([ "stat_compare_transcript_TSS_", ta, ".csv"])) for tss in os.listdir(tss_folder): filename = tss.split("_TSS") if (filename[0] == ta) and (tss.endswith(".gff")): stat_ta_tss(ta_file, os.path.join(tss_folder, tss), stat_tss_out, self.tmps["ta_tss"], self.tmps["tss_ta"], args_tran.fuzzy) os.remove(ta_file) os.remove(os.path.join(tss_folder, tss)) self.helper.sort_gff(self.tmps["ta_tss"], ta_file) self.helper.sort_gff( self.tmps["tss_ta"], os.path.join( args_tran.compare_tss, tss)) os.remove(self.tmps["tss_ta"]) os.remove(self.tmps["ta_tss"]) log.write("\t" + stat_tss_out + "\n") def _compare_cds(self, tas, args_tran, log): self.multiparser.parser_gff(args_tran.gffs, None) self.multiparser.combine_gff( self.gff_outfolder, os.path.join(args_tran.gffs, "tmp"), "transcript", None) print("Comaring of transcripts and genome annotations") cds_folder = os.path.join(args_tran.gffs, "tmp") log.write("Running stat_TA_comparison.py to compare transcripts " "with genome annotations.\n") for ta in tas: ta_file = os.path.join(self.gff_outfolder, "_".join([ta, self.endfix_tran])) stat_gff_out = os.path.join(self.stat_path, "".join([ "stat_compare_transcript_genome_", ta, ".csv"])) for gff in os.listdir(cds_folder): if (gff[:-4] == ta) and (gff.endswith(".gff")): cds_file = os.path.join(cds_folder, gff) stat_ta_gff(ta_file, cds_file, stat_gff_out, self.tmps["ta_gff"], self.tmps["gff_ta"], args_tran.c_feature) os.remove(ta_file) os.remove(os.path.join(args_tran.gffs, gff)) self.helper.sort_gff(self.tmps["ta_gff"], ta_file) self.helper.sort_gff(self.tmps["gff_ta"], os.path.join( args_tran.gffs, gff)) os.remove(self.tmps["ta_gff"]) os.remove(self.tmps["gff_ta"]) log.write("\t" + stat_gff_out + ".\n") def _compare_tss_cds(self, tas, args_tran, log): '''compare transcript with CDS and TSS''' if (args_tran.compare_tss is not None) and ( args_tran.c_feature is not None): self.multiparser.parser_gff(self.gff_outfolder, "transcript") self._compare_cds(tas, args_tran, log) self._compare_tss(tas, args_tran, log) elif (args_tran.c_feature is not None) and ( args_tran.compare_tss is None): self.multiparser.parser_gff(self.gff_outfolder, "transcript") self._compare_cds(tas, args_tran, log) elif (args_tran.c_feature is None) and ( args_tran.compare_tss is not None): self.multiparser.parser_gff(self.gff_outfolder, "transcript") self._compare_tss(tas, args_tran, log) def _for_one_wig(self, type_, args_tran): '''running transcript detection to one type of wig files''' if type_ == "tex_notex": libs = args_tran.tlibs wigs = args_tran.tex_wigs else: libs = args_tran.flibs wigs = args_tran.frag_wigs print("Importing {0} wig files".format(type_)) strains = self._compute(type_, wigs, libs, args_tran) for strain in strains: out = os.path.join(self.gff_outfolder, "_".join([ strain, "transcript", type_ + ".gff"])) self.helper.sort_gff(os.path.join(args_tran.out_folder, "_".join([strain, type_])), out) os.remove(os.path.join(args_tran.out_folder, "_".join([strain, type_]))) return strains def _for_two_wigs(self, strains, args_tran, log): '''merge the results of fragemented and tex treated libs''' if (args_tran.frag_wigs is not None) and ( args_tran.tex_wigs is not None): log.write("Running combine_frag_tex.py to merge the results from " "fragmented libs and dRNA-Seq libs.\n") print("Merging fragmented and tex treated ones") for strain in strains: frag_gff = os.path.join(self.gff_outfolder, "_".join([strain, self.frag])) tex_gff = os.path.join(self.gff_outfolder, "_".join([strain, self.tex])) final_gff = os.path.join(self.gff_outfolder, "_".join([strain, self.endfix_tran])) for gff in os.listdir(self.gff_outfolder): if "_transcript_" in gff: filename = gff.split("_transcript_") if (strain == filename[0]) and ( "tex_notex.gff" == filename[1]): tex_file = gff elif (strain == filename[0]) and ( "fragment.gff" == filename[1]): frag_file = gff combine(os.path.join(self.gff_outfolder, frag_file), os.path.join(self.gff_outfolder, tex_file), args_tran.tolerance, os.path.join(self.gff_outfolder, "_".join([strain, self.endfix_tran]))) os.remove(frag_gff) os.remove(tex_gff) log.write("\t" + final_gff + " is generated.\n") else: if args_tran.frag_wigs is not None: for strain in strains: frag_gff = os.path.join( self.gff_outfolder, "_".join([strain, self.frag])) final_gff = os.path.join( self.gff_outfolder, "_".join([strain, self.endfix_tran])) shutil.move(frag_gff, final_gff) log.write("\t" + final_gff + " is generated.\n") elif args_tran.tex_wigs is not None: for strain in strains: tex_gff = os.path.join( self.gff_outfolder, "_".join([strain, self.tex])) final_gff = os.path.join( self.gff_outfolder, "_".join([strain, self.endfix_tran])) shutil.move(tex_gff, final_gff) log.write("\t" + final_gff + " is generated.\n") def _post_modify(self, tas, args_tran): '''modify the transcript by comparing with genome annotation''' for ta in tas: for gff in os.listdir(args_tran.gffs): if (".gff" in gff) and (gff[:-4] == ta): break print("Modifying {0} by refering to {1}".format(ta, gff)) fill_gap(os.path.join(args_tran.gffs, gff), os.path.join(self.tran_path, "_".join([ta, self.endfix_tran])), "overlap", self.tmps["overlap"], args_tran.modify) fill_gap(os.path.join(args_tran.gffs, gff), os.path.join(self.tran_path, "_".join([ta, self.endfix_tran])), "uni", self.tmps["uni"], args_tran.modify) tmp_merge = os.path.join(self.gff_outfolder, self.tmps["merge"]) if self.tmps["merge"] in self.gff_outfolder: os.remove(tmp_merge) self.helper.merge_file(self.tmps["overlap"], tmp_merge) self.helper.merge_file(self.tmps["uni"], tmp_merge) tmp_out = os.path.join(self.gff_outfolder, "_".join(["tmp", ta])) self.helper.sort_gff(tmp_merge, tmp_out) os.remove(self.tmps["overlap"]) os.remove(self.tmps["uni"]) os.remove(tmp_merge) final_out = os.path.join(self.gff_outfolder, "_".join(["final", ta])) longer_ta(tmp_out, args_tran.length, final_out) shutil.move(final_out, os.path.join(self.tmps["tran"], "_".join([ta, self.endfix_tran]))) os.remove(tmp_out) shutil.rmtree(self.gff_outfolder) shutil.move(self.tmps["tran"], self.gff_outfolder) def _remove_file(self, args_tran): if "tmp_wig" in os.listdir(args_tran.out_folder): shutil.rmtree(os.path.join(args_tran.out_folder, "tmp_wig")) if "merge_wigs" in os.listdir(args_tran.out_folder): shutil.rmtree(os.path.join(args_tran.out_folder, "merge_wigs")) self.helper.remove_tmp_dir(args_tran.gffs) self.helper.remove_tmp_dir(args_tran.compare_tss) self.helper.remove_tmp_dir(args_tran.terms) self.helper.remove_tmp(os.path.join(args_tran.out_folder, "gffs")) self.helper.remove_tmp(self.gff_outfolder) def _compare_term_tran(self, args_tran, log): '''searching the associated terminator to transcript''' if args_tran.terms is not None: print("Comparing between terminators and transcripts") self.multiparser.parser_gff(args_tran.terms, "term") if args_tran.gffs is not None: self.multiparser.combine_gff( args_tran.gffs, os.path.join(args_tran.terms, "tmp"), None, "term") log.write("Running compare_tran_term.py to compare transcripts " "with terminators.\n") compare_term_tran(self.gff_outfolder, os.path.join(args_tran.terms, "tmp"), args_tran.fuzzy_term, args_tran.fuzzy_term, args_tran.out_folder, "transcript", args_tran.terms, self.gff_outfolder) for file_ in os.listdir(os.path.join(args_tran.out_folder, "statistics")): if file_.startswith("stat_compare_transcript_terminator_"): log.write("\t" + file_ + " is generated.\n") def _re_table(self, args_tran, log): log.write("Running re_table.py to generate coverage information.\n") log.write("The following files are updated:\n") for gff in os.listdir(self.gff_outfolder): if os.path.isfile(os.path.join(self.gff_outfolder, gff)): tran_table = os.path.join(args_tran.out_folder, "tables", gff.replace(".gff", ".csv")) reorganize_table(args_tran.libs, args_tran.merge_wigs, "Coverage_details", tran_table) log.write("\t" + tran_table + "\n") def _list_files(self, folder, log, end): log.write("The following files in {0} are generated:\n".format(folder)) for file_ in os.listdir(folder): if (end is not None) and (file_.endswith(end)): log.write("\t" + file_ + "\n") elif end is None: log.write("\t" + file_ + "\n") def run_transcript(self, args_tran, log): if (args_tran.frag_wigs is None) and (args_tran.tex_wigs is None): log.write("No wig file is assigned.\n") print("Error: There is no wiggle file!\n") sys.exit() if args_tran.frag_wigs is not None: log.write("Running transcript_detection.py for detecting " "transcripts based on fragmented libs.\n") strains = self._for_one_wig("fragment", args_tran) if args_tran.tex_wigs is not None: log.write("Running transcript_detection.py for detecting " "transcripts based on dRNA-Seq libs.\n") strains = self._for_one_wig("tex_notex", args_tran) self._for_two_wigs(strains, args_tran, log) tas = [] if ("none" not in args_tran.modify) and (args_tran.gffs is not None): for gff in os.listdir(args_tran.gffs): if gff.endswith(".gff"): self.helper.sort_gff(os.path.join(args_tran.gffs, gff), self.tmps["gff"]) shutil.move(self.tmps["gff"], os.path.join(args_tran.gffs, gff)) self.multiparser.combine_gff(args_tran.gffs, os.path.join( args_tran.gffs, "tmp"), None, None) self.multiparser.parser_gff(self.gff_outfolder, "transcript") self.multiparser.combine_gff(args_tran.gffs, self.tran_path, None, "transcript") self.helper.check_make_folder(self.tmps["tran"]) for ta in os.listdir(self.tran_path): if ta.endswith(".gff"): if os.path.getsize(os.path.join(self.tran_path, ta)) != 0: tas.append(ta.replace("_" + self.endfix_tran, "")) log.write("Running fill_gap.py to modify transcripts " "based on genome annotations.\n") self._post_modify(tas, args_tran) self._compare_tss_cds(tas, args_tran, log) self._compare_term_tran(args_tran, log) print("Generating tables for the details") log.write("Running gen_table_tran.py to generate the table of transcripts.\n") gen_table_transcript(self.gff_outfolder, args_tran) self._list_files(os.path.join(args_tran.out_folder, "tables"), log, None) log.write("Running plot_tran to plot the distribution of the length of " "the transcripts.\n") plot_tran(self.gff_outfolder, self.stat_path, args_tran.max_dist) self._list_files(self.stat_path, log, ".png") self._re_table(args_tran, log) self._remove_file(args_tran)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/transcript.py
transcript.py
import copy def coverage_comparison(cover, cover_sets, poss, first, strand, cover_pos): '''Seaching the lowest and highest coverage''' if first: first = False cover_sets["high"] = cover cover_sets["low"] = cover poss["high"] = cover_pos poss["low"] = cover_pos else: if cover_sets["high"] < cover: cover_sets["high"] = cover poss["high"] = cover_pos poss["low"] = cover_pos cover_sets["low"] = cover if ((strand == "+") and (poss["low"] >= poss["high"])) or \ ((strand == "-") and (poss["low"] <= poss["high"])): if cover_sets["low"] > cover: cover_sets["low"] = cover poss["low"] = cover_pos elif ((strand == "+") and (poss["low"] < poss["high"])) or \ ((strand == "-") and (poss["low"] > poss["high"])): poss["low"] = cover_pos cover_sets["low"] = cover return first def get_repmatch(replicates, cond): '''deal with the replicate match''' detect_all = False for rep in replicates: if "all" in rep: detect_all = True rep = int(rep.split("_")[-1]) break if not detect_all: for match in replicates: if cond.split("_")[0] == match.split("_")[0]: rep = int(match.split("_")[-1]) return rep def define_cutoff(coverages, median, utr_type): '''get the cutoff''' cutoffs = {} if coverages[utr_type] == "mean": for track, values in median.items(): cutoffs[track] = values["mean"] else: for track, values in median.items(): cutoffs[track] = values["median"] return cutoffs def check_notex(cover, texs, cutoff, notex): '''Check the cutoff of average coverage for TEX+ and TEX- libs''' if notex is not None: if len(texs) != 0: for keys in texs.keys(): tracks = keys.split("@AND@") if cover["track"] == tracks[0]: if cover["avg"] > cutoff: return True elif cover["track"] == tracks[1]: if cover["avg"] > notex: return True else: if cover["avg"] > cutoff: return True else: if cover["avg"] > cutoff: return True def run_tex(cover, texs, check_texs, tex_notex, type_, detect_num, poss, target_datas): '''Check the position of different libs''' if (cover["type"] == "tex") or (cover["type"] == "notex"): for key in texs.keys(): if cover["track"] in key: texs[key] += 1 check_texs[key].append(cover) if texs[key] >= tex_notex: if type_ == "sRNA_utr_derived": if detect_num == 0: poss["start"] = cover["final_start"] poss["end"] = cover["final_end"] else: exchange_start_end(poss, cover) detect_num += 1 if cover not in target_datas: target_datas.append(cover) if tex_notex != 1: if check_texs[key][0] not in target_datas: target_datas.append(check_texs[key][0]) if type_ == "sRNA_utr_derived": exchange_start_end(poss, check_texs[key][0]) elif cover["type"] == "frag": if type_ == "sRNA_utr_derived": if detect_num == 0: poss["start"] = cover["final_start"] poss["end"] = cover["final_end"] else: exchange_start_end(poss, cover) detect_num += 1 target_datas.append(cover) return detect_num def check_tex(template_texs, covers, target_datas, notex, type_, poss, median, coverages, utr_type, cutoff_coverage, tex_notex): '''Check the candidates for TEX+/- libs (should be detected in one or both of libs)''' detect_num = 0 check_texs = {} texs = copy.deepcopy(template_texs) for key, num in texs.items(): check_texs[key] = [] for cover in covers: run_check_tex = False if type_ == "sRNA_utr_derived": cutoffs = define_cutoff(coverages, median, utr_type) if cover["track"] in cutoffs.keys(): if cover["avg"] > cutoffs[cover["track"]]: run_check_tex = True else: run_check_tex = True elif type_ == "sORF": if cover["avg"] > coverages[cover["track"]]: run_check_tex = True elif (type_ == "terminator"): run_check_tex = True elif (type_ == "normal"): run_check_tex = check_notex(cover, texs, cutoff_coverage, notex) else: if cover["avg"] > cutoff_coverage: run_check_tex = True if run_check_tex: detect_num = run_tex(cover, texs, check_texs, tex_notex, type_, detect_num, poss, target_datas) return detect_num def exchange_start_end(poss, cover): '''modify the start and end point. get the long one''' if poss["start"] > cover["final_start"]: poss["start"] = cover["final_start"] if poss["end"] < cover["final_end"]: poss["end"] = cover["final_end"] def replicate_comparison(args_srna, srna_covers, strand, type_, median, coverages, utr_type, notex, cutoff_coverage, texs): '''Check the number of replicates which fit the cutoff in order to remove the candidates which only can be detected in few replicates.''' srna_datas = {"best": 0, "high": 0, "low": 0, "start": -1, "end": -1, "track": "", "detail": [], "conds": {}} tmp_poss = {"start": -1, "end": -1, "pos": -1, "all_start": [], "all_end": []} detect = False for cond, covers in srna_covers.items(): detect_num = check_tex( texs, covers, srna_datas["detail"], notex, type_, tmp_poss, median, coverages, utr_type, cutoff_coverage, args_srna.tex_notex) if ("texnotex" in cond): tex_rep = get_repmatch(args_srna.replicates["tex"], cond) if detect_num >= tex_rep: detect = True elif ("frag" in cond): frag_rep = get_repmatch(args_srna.replicates["frag"], cond) if detect_num >= frag_rep: detect = True if detect: detect = False if type_ == "sRNA_utr_derived": tmp_poss["all_start"].append(tmp_poss["start"]) tmp_poss["all_end"].append(tmp_poss["end"]) else: if strand == "+": sort_datas = sorted(srna_datas["detail"], key=lambda k: (k["pos"])) else: sort_datas = sorted(srna_datas["detail"], key=lambda k: (k["pos"]), reverse=True) srna_datas["pos"] = sort_datas[-1]["pos"] sort_datas = sorted(srna_datas["detail"], key=lambda k: (k["avg"])) avg = sort_datas[-1]["avg"] srna_datas["conds"][cond] = str(detect_num) if (avg > srna_datas["best"]): srna_datas["high"] = sort_datas[-1]["high"] srna_datas["low"] = sort_datas[-1]["low"] srna_datas["best"] = avg srna_datas["track"] = sort_datas[-1]["track"] if type_ == "sRNA_utr_derived": if len(tmp_poss["all_start"]) != 0: srna_datas["start"] = min(tmp_poss["all_start"]) srna_datas["end"] = max(tmp_poss["all_end"]) else: srna_datas["start"] = -1 srna_datas["end"] = -1 return srna_datas
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/coverage_detection.py
coverage_detection.py
import os import shutil from annogesiclib.gff3 import Gff3Parser from annogesiclib.lib_reader import read_wig, read_libs def check_start_and_end(start, end, covers): if (start - 2) < 0: c_start = 0 else: c_start = start - 2 if (end + 2) > len(covers): c_end = len(covers) else: c_end = end + 2 return c_start, c_end def detect_coverage(wigs, tran, infos): for strain, conds in wigs.items(): if strain == tran.seq_id: for cond, tracks in conds.items(): for lib_name, covers in tracks.items(): track = lib_name.split("|")[-3] lib_strand = lib_name.split("|")[-2] lib_type = lib_name.split("|")[-1] infos[track] = {"avg": -1, "high": -1, "low": -1} total = 0 pos = 0 c_start, c_end = check_start_and_end(tran.start, tran.end, covers) for cover in covers[c_start: c_end]: cover_pos = pos + c_start if (cover_pos >= tran.start) and ( cover_pos <= tran.end): total = cover + total if cover > infos[track]["high"]: infos[track]["high"] = cover if (cover < infos[track]["low"]) or ( infos[track]["low"] == -1): infos[track]["low"] = cover pos += 1 infos[track]["avg"] = (float(total) / float(tran.end - tran.start + 1)) def print_associate(associate, tran, out): if associate in tran.attributes.keys(): out.write("\t" + tran.attributes[associate]) else: out.write("\tNA") def compare_ta_genes(tran, genes, out): ass_genes = [] if len(genes) != 0: for gene in genes: if (gene.seq_id == tran.seq_id) and ( gene.strand == tran.strand): if ((tran.start <= gene.start) and ( tran.end >= gene.end)) or ( (tran.start >= gene.start) and ( tran.end <= gene.end)) or ( (tran.start <= gene.start) and ( tran.end <= gene.end) and ( tran.end >= gene.start)) or ( (tran.start >= gene.start) and ( tran.start <= gene.end) and ( tran.end >= gene.end)): if "gene" in gene.attributes.keys(): ass_genes.append(gene.attributes["gene"]) elif "locus_tag" in gene.attributes.keys(): ass_genes.append(gene.attributes["locus_tag"]) else: ass_genes.append("".join([ "gene:", str(gene.start), "-", str(gene.end), "_", gene.strand])) if len(ass_genes) != 0: out.write("\t" + ",".join(ass_genes)) else: out.write("\tNA") def print_coverage(trans, out, out_gff, wigs_f, wigs_r, gff_file): genes = [] if gff_file is not None: gff_f = open(gff_file, "r") for entry in Gff3Parser().entries(gff_f): if (entry.feature == "gene"): genes.append(entry) for tran in trans: infos = {} tran.attributes["detect_lib"] = tran.attributes["detect_lib"].replace( "tex_notex", "TEX+/-") out.write("\t".join([tran.seq_id, tran.attributes["Name"], str(tran.start), str(tran.end), tran.strand, tran.attributes["detect_lib"]])) compare_ta_genes(tran, genes, out) print_associate("associated_tss", tran, out) print_associate("associated_term", tran, out) if tran.strand == "+": detect_coverage(wigs_f, tran, infos) else: detect_coverage(wigs_r, tran, infos) out.write("\t") best = -1 best_track = "" best_cover = {} for track, cover in infos.items(): if best != -1: out.write(";") out.write("{0}({1})".format( track, str(cover["avg"]))) if cover["avg"] > best: best = cover["avg"] best_track = track best_cover = cover out.write("\n") new_attrs = {} for key, value in tran.attributes.items(): if ("high_coverage" not in key) and ( "low_coverage" not in key): new_attrs[key] = value new_attrs["best_avg_coverage"] = str(best_cover["avg"]) attribute_string = ";".join( ["=".join(items) for items in new_attrs.items()]) out_gff.write("\t".join([tran.info_without_attributes, attribute_string]) + "\n") def gen_table_transcript(gff_folder, args_tran): '''generate the detail table of transcript''' libs, texs = read_libs(args_tran.libs, args_tran.merge_wigs) for gff in os.listdir(gff_folder): if os.path.isfile(os.path.join(gff_folder, gff)): wigs_f = read_wig(os.path.join(args_tran.wig_path, "_".join([ gff.replace("_transcript.gff", ""), "forward.wig"])), "+", libs) wigs_r = read_wig(os.path.join(args_tran.wig_path, "_".join([ gff.replace("_transcript.gff", ""), "reverse.wig"])), "-", libs) th = open(os.path.join(gff_folder, gff), "r") trans = [] out = open(os.path.join(args_tran.out_folder, "tables", gff.replace(".gff", ".csv")), "w") out_gff = open(os.path.join(args_tran.out_folder, "tmp_gff"), "w") out_gff.write("##gff-version 3\n") out.write("\t".join(["Genome", "Name", "Start", "End", "Strand", "Detect_lib_type", "Associated_gene", "Associated_tss", "Associated_term", "Coverage_details"]) + "\n") gff_parser = Gff3Parser() for entry in gff_parser.entries(th): trans.append(entry) if args_tran.gffs is not None: gff_file = os.path.join(args_tran.gffs, gff.replace("_transcript", "")) if not os.path.isfile(gff_file): gff_file = None else: gff_file = None print_coverage(trans, out, out_gff, wigs_f, wigs_r, gff_file) out.close() out_gff.close() shutil.move(os.path.join(args_tran.out_folder, "tmp_gff"), os.path.join(gff_folder, gff))
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gen_table_tran.py
gen_table_tran.py
import shutil import math import csv from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def read_file(seq_file, srna_table): seq = "" with open(seq_file) as fh: for line in fh: if not line.startswith(">"): line = line.strip() seq = seq + line tabs = [] sh = open(srna_table, "r") for row in csv.reader(sh, delimiter='\t'): tabs.append({"info": row, "seq_id": row[0], "start": int(row[2]), "end": int(row[3]), "strand": row[4]}) return seq, tabs def get_table_entry(tabs, srna): for tab in tabs: if (srna.seq_id == tab["seq_id"]) and ( srna.start == tab["start"]) and ( srna.end == tab["end"]) and ( srna.strand == tab["strand"]): return tab def backward_t(seq, start, end, strand, mut_u): no_ut = 0 ext = 0 bord = end - start while 1: if strand == "+": nt = Helper().extract_gene(seq, end - ext, end - ext, strand) else: nt = Helper().extract_gene(seq, start + ext, start + ext, strand) if (nt == "U") or (nt == "T"): pass else: no_ut += 1 if no_ut > mut_u: break ext += 1 if ext >= bord: break return ext def forward_t(seq, start, end, strand, mut_u): no_ut = 0 ext = 0 bord = end - start while 1: if strand == "+": nt = Helper().extract_gene(seq, end + ext, end + ext, strand) else: nt = Helper().extract_gene(seq, start - ext, start - ext, strand) if (nt == "U") or (nt == "T"): pass else: no_ut += 1 if no_ut > mut_u: break ext += 1 if ext >= bord: break return ext def iterate_seq(seq_u, args_srna): pos = 0 first = True nts = {"ut": 0, "no_ut": 0} while 1: if (len(seq_u) - pos) < args_srna.num_u: break for nt in reversed(seq_u[:(len(seq_u) - pos)]): if first: first = False if (nt != "U") and (nt != "T"): break if (nt == "U") or (nt == "T"): nts["ut"] += 1 else: nts["no_ut"] += 1 if nts["no_ut"] > args_srna.mut_u: break if nts["ut"] < args_srna.num_u: nts = {"ut": 0, "no_ut": 0} first = True else: break pos += 1 return pos def search_t(seq, start, end, strand, ext_b, ext_f, args_srna): if strand == "+": seq_end = end + args_srna.len_u + ext_f + 1 if seq_end > len(seq): seq_end = len(seq) seq_u = Helper().extract_gene( seq, end - ext_b - 1, seq_end, strand) else: seq_start = start - args_srna.len_u - ext_f - 1 if (seq_start) < 1: seq_start = 1 seq_u = Helper().extract_gene(seq, seq_start, start + ext_b + 1, strand) pos = iterate_seq(seq_u, args_srna) if strand == "+": final_end = (seq_end - pos) if (final_end - end) <= 0: final_end = end elif (final_end - start) >= args_srna.max_len: diff = final_end - start - args_srna.max_len pos = iterate_seq(seq_u[:(final_end - diff)], args_srna) final_end = (final_end - diff - pos) if (final_end - end) <= 0: final_end = end final_start = start else: final_start = (seq_start + pos) if (start - final_start) <= 0: final_start = start elif (end - final_start) >= args_srna.max_len: diff = end - final_start - args_srna.max_len pos = iterate_seq(seq_u[(final_start + diff):], args_srna) final_start = (final_start + diff + pos) if (start - final_start) <= 0: final_start = start final_end = end return final_start, final_end def check_term(srna, tab, seq, len_u, out, out_t): if "with_term" in srna.attributes.keys(): feature = srna.attributes["with_term"].split(":")[0] info = srna.attributes["with_term"].split(":")[-1] start = int(info.split("-")[0]) end = int(info.split("-")[-1].split("_")[0]) strand = info.split("_")[-1] def get_srna_poly_u(srna_file, seq_file, srna_table, args_srna): seq, tabs = read_file(seq_file, srna_table) out = open(srna_file + "_ext_polyu", "w") out_t = open(srna_table + "_ext_polyu", "w") gff_f = open(srna_file, "r") for entry in Gff3Parser().entries(gff_f): tab = get_table_entry(tabs, entry) ext_b = backward_t(seq, entry.start, entry.end, entry.strand, args_srna.mut_u) ext_f = forward_t(seq, entry.start - args_srna.len_u, entry.end + args_srna.len_u, entry.strand, args_srna.mut_u) final_start, final_end = search_t(seq, entry.start, entry.end, entry.strand, ext_b, ext_f, args_srna) tab["info"][2] = str(final_start) tab["info"][3] = str(final_end) info_without_attributes = "\t".join([str(field) for field in [ entry.seq_id, entry.source, entry.feature, str(final_start), str(final_end), entry.score, entry.strand, entry.phase]]) out.write("\t".join([info_without_attributes, entry.attribute_string]) + "\n") out_t.write("\t".join(tab["info"]) + "\n") gff_f.close() out.close() out_t.close() shutil.move(srna_file + "_ext_polyu", srna_file) shutil.move(srna_table + "_ext_polyu", srna_table)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/get_srna_poly_u.py
get_srna_poly_u.py
from annogesiclib.helper import Helper from annogesiclib.gff3 import Gff3Parser def get_feature(gene, file_type): if "Name" in gene.attributes.keys(): feature = gene.attributes["Name"] elif "locus_tag" in gene.attributes.keys(): feature = gene.attributes["locus_tag"] else: if file_type != "tran": file_type = gene.feature strand = Helper().get_strand_name(gene.strand) feature = "".join([file_type, ":", str(gene.start), "-", str(gene.end), "_", strand]) pos = str(gene.start) + "-" + str(gene.end) return feature, pos def import_data(f_1, f_2, start, end, file_type): if f_1 != "terminal": feature_1, pos_1 = get_feature(f_1, file_type) strain = f_1.seq_id else: feature_1 = "NA" pos_1 = "NA" if f_2 != "terminal": feature_2, pos_2 = get_feature(f_2, file_type) strain = f_2.seq_id else: feature_2 = "NA" pos_2 = "NA" return {"strain": strain, "start": start, "end": end, "parent_p": feature_1, "parent_m": feature_2, "print": False, "p_pos": pos_1, "m_pos": pos_2} def get_terminal(genes, inters, gene_len, type_, file_type): '''deal with the intergenic region which located at two ends''' if type_ == "start": for gene in genes: if (gene.strand == "-"): inters.append(import_data(gene.strand, gene.seq_id, 1, gene.start, file_type)) break elif type_ == "end": for gene in reversed(genes): if (gene.strand == "+"): inters.append(import_data(gene.strand, gene.seq_id, gene.end, gene_len, file_type)) break def get_inter(features, seq, file_type): '''compare features and get the intergenic region''' inters = [] first = True pre_strain = "" pre_feature1 = None sort_features = sorted(features, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) for feature1 in sort_features: if pre_strain != feature1.seq_id: if feature1.strand == "-": inters.append(import_data("terminal", feature1, 1, feature1.start, file_type)) if not first: if pre_feature1.strand == "+": inters.append(import_data(pre_feature1, "terminal", pre_feature1.start, len(seq[pre_feature1.seq_id]), file_type)) if first: first = False for feature2 in sort_features: if (feature1.seq_id == feature2.seq_id) and ( feature1.end < feature2.end) and ( feature1.end >= feature2.start) and ( feature1.strand == feature2.strand): break elif (feature1.seq_id == feature2.seq_id) and ( feature1.end <= feature2.start) and ( feature1.strand == "+"): if feature1.strand == feature2.strand: break else: inters.append(import_data(feature1, feature2, feature1.end, feature2.start, file_type)) break pre_feature1 = feature1 pre_strain = feature1.seq_id return inters def import_merge(id_, strain, start, end, parent_p, parent_m, p_pos, m_pos): return {"ID": "_".join(["inter_" + str(id_)]), "strain": strain, "start": start, "end": end, "parent_p": parent_p, "parent_m": parent_m, "print": False, "p_pos": p_pos, "m_pos": m_pos} def get_overlap_inters(inter1, inter2, merges, id_): if (inter1["end"] < inter2["end"]) and ( inter1["end"] > inter2["start"]) and ( inter1["start"] <= inter2["start"]): merges.append(import_merge(id_, inter1["strain"], inter2["start"], inter1["end"], inter2["parent_p"], inter1["parent_m"], inter2["p_pos"], inter1["m_pos"])) inter1["print"] = True inter2["print"] = True id_ += 1 elif (inter1["start"] > inter2["start"]) and ( inter1["start"] < inter2["end"]) and ( inter1["end"] >= inter2["end"]): merges.append(import_merge(id_, inter1["strain"], inter1["start"], inter2["end"], inter1["parent_p"], inter2["parent_m"], inter1["p_pos"], inter2["m_pos"])) inter1["print"] = True inter2["print"] = True id_ += 1 elif (inter1["end"] >= inter2["end"]) and ( inter1["start"] <= inter2["start"]): merges.append(import_merge(id_, inter2["strain"], inter2["start"], inter2["end"], inter2["parent_p"], inter2["parent_m"], inter2["p_pos"], inter2["m_pos"])) inter1["print"] = True inter2["print"] = True id_ += 1 elif (inter1["end"] <= inter2["end"]) and ( inter1["start"] >= inter2["start"]): merges.append(import_merge(id_, inter1["strain"], inter1["start"], inter1["end"], inter1["parent_p"], inter1["parent_m"], inter1["p_pos"], inter1["m_pos"])) inter1["print"] = True inter2["print"] = True id_ += 1 return id_ def merge_inter(inters1, inters2): '''check and merge the overlap intergenic seq''' merges = [] id_ = 0 for inter1 in inters1: for inter2 in inters2: if (inter1["strain"] == inter2["strain"]): id_ = get_overlap_inters(inter1, inter2, merges, id_) if not inter1["print"]: merges.append(import_merge(id_, inter1["strain"], inter1["start"], inter1["end"], inter1["parent_p"], inter1["parent_m"], inter1["p_pos"], inter1["m_pos"])) inter1["print"] = True id_ += 1 for inter2 in inters2: if not inter2["print"]: merges.append(import_merge(id_, inter2["strain"], inter2["start"], inter2["end"], inter2["parent_p"], inter2["parent_m"], inter2["p_pos"], inter2["m_pos"])) inter2["print"] = True id_ += 1 sort_merges = sorted(merges, key=lambda x: (x["strain"], x["start"], x["end"])) return sort_merges def detect_confliction(gc, genes, seq): '''check the intergenic region (add the region for folding proper sec str.) will overlap with gene or not''' corr_merges = [] overlap = False tmp_start = gc["start"] tmp_end = gc["end"] if "tran" in gc["parent_p"]: if gc["start"] > 80: for gene in genes: if ((gc["start"] - 80) > gene.start) and ( (gc["start"] - 80) < gene.end) and ( gene.strand == "+"): tmp_start = gene.end - 30 overlap = True if not overlap: tmp_start = gc["start"] - 80 else: tmp_start = 1 else: if gc["start"] > 30: tmp_start = gc["start"] - 30 else: tmp_start = 1 corr_merges.append(import_merge(gc["ID"], gc["strain"], tmp_start, gc["end"], gc["parent_p"], gc["parent_m"], gc["p_pos"], gc["m_pos"])) corr_merges[-1]["strand"] = "+" if "tran" in gc["parent_m"]: if gc["end"] < (len(seq[gc["strain"]]) - 80): for gene in genes: if ((gc["end"] + 80) > gene.start) and ( (gc["end"] + 80) < gene.end) and ( gene.strand == "-"): tmp_end = gene.start + 30 overlap = True if not overlap: tmp_end = gc["end"] + 80 else: tmp_end = len(seq[gc["strain"]]) else: if gc["end"] < len(seq[gc["strain"]]) - 30: tmp_end = gc["end"] + 30 else: tmp_end = len(seq[gc["strain"]]) corr_merges.append(import_merge(gc["ID"], gc["strain"], gc["start"], tmp_end, gc["parent_p"], gc["parent_m"], gc["p_pos"], gc["m_pos"])) corr_merges[-1]["strand"] = "-" return corr_merges def read_file(seq_file, tran_file, gff_file): seq = {} tas = [] genes = [] merges = [] with open(seq_file, "r") as f_h: for line in f_h: line = line.strip() if line.startswith(">"): strain = line[1:] seq[strain] = "" else: seq[strain] = seq[strain] + line ta_fh = open(tran_file, "r") for entry in Gff3Parser().entries(ta_fh): tas.append(entry) merges.append(entry) for entry in Gff3Parser().entries(open(gff_file)): if (entry.feature == "gene"): genes.append(entry) merges.append(entry) tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return seq, tas, merges, genes def get_fasta(seq, merge, num, strand, args_term, out, out_i): if (merge["end"] - merge["start"]) > args_term.window: detect_out = False for start in range(merge["start"], merge["end"] + 1, args_term.shift): if (merge["end"] - (start + args_term.window)) < args_term.shift: end = merge["end"] detect_out = True else: end = start + args_term.window inter_seq = Helper().extract_gene( seq[merge["strain"]], start, end, strand) out_i.write(">" + "|".join([ "inter_" + str(num), str(start), str(end), merge["strain"], merge["parent_p"], merge["parent_m"], merge["p_pos"], merge["m_pos"], strand]) + "\n") out.write(">inter_" + str(num) + "\n") out.write(inter_seq + "\n") num += 1 if detect_out: break else: inter_seq = Helper().extract_gene( seq[merge["strain"]], merge["start"], merge["end"], strand) out_i.write(">" + "|".join([ "inter_" + str(num), str(merge["start"]), str(merge["end"]), merge["strain"], merge["parent_p"], merge["parent_m"], merge["p_pos"], merge["m_pos"], strand]) + "\n") out.write(">inter_" + str(num) + "\n") out.write(inter_seq + "\n") num += 1 return num def mod_inter_tas_gene(inter_tas, genes): for inter in inter_tas: for gene in genes: if (inter["strain"] == gene.seq_id): if (gene.end > inter["start"]) and ( gene.end < inter["end"]) and ( gene.strand == "+"): inter["start"] = gene.end inter["parent_p"], inter["p_pos"] = get_feature( gene, "gene") elif (gene.start > inter["start"]) and ( gene.start < inter["end"]) and ( gene.strand == "-"): inter["end"] = gene.start inter["parent_m"], inter["m_pos"] = get_feature( gene, "gene") break def intergenic_seq(seq_file, tran_file, gff_file, out_file, index_file, args_term): '''get intergenic seq''' out = open(out_file, "w") out_i = open(index_file, "w") seq, tas, merges, genes = read_file(seq_file, tran_file, gff_file) inter_tas = get_inter(tas, seq, "tran") mod_inter_tas_gene(inter_tas, genes) inter_genes = get_inter(genes, seq, "gene") merges = merge_inter(inter_tas, inter_genes) num = 0 for tmp_merge in merges: corr_merges = detect_confliction(tmp_merge, genes, seq) for merge in corr_merges: if merge["start"] < merge["end"]: if merge["strand"] == "+": num = get_fasta(seq, merge, num, "+", args_term, out, out_i) else: num = get_fasta(seq, merge, num, "-", args_term, out, out_i)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/get_inter_seq.py
get_inter_seq.py
import os import csv from Bio import SeqIO from collections import defaultdict from annogesiclib.gff3 import Gff3Parser, Gff3Entry from annogesiclib.TSSpredator_parser import TSSPredatorReader from annogesiclib.helper import Helper class Converter(object): '''Converting from one format to another format''' def __init__(self): self.gff3parser = Gff3Parser() self.tssparser = TSSPredatorReader() def _check_locus_tag(self, entry, genes): gene_tag = "-" locus_tag = "-" if "locus_tag" in entry.attributes.keys(): locus_tag = entry.attributes["locus_tag"] elif "Parent" in entry.attributes.keys(): for gene in genes: if (gene.attributes["ID"] in entry.attributes["Parent"].split(",")): if "gene" in gene.attributes.keys(): gene_tag = gene.attributes["gene"] if "locus_tag" in gene.attributes.keys(): locus_tag = gene.attributes["locus_tag"] if locus_tag == "-": locus_tag = "".join([ entry.feature, ":", str(entry.start), "-", str(entry.end), "_", entry.strand]) return locus_tag, gene_tag def _print_rntptt_file(self, out, entrys, genes): '''output to rnt and ptt file''' for entry in entrys: location = "..".join([str(entry.start), str(entry.end)]) length = str(entry.end - entry.start + 1) if entry.feature == "CDS": if "protein_id" in entry.attributes.keys(): pid = entry.attributes["protein_id"] else: pid = "-" gene_tag, locus_tag = self._check_locus_tag(entry, genes) else: pid = "-" gene_tag = "-" gene_tag, locus_tag = self._check_locus_tag(entry, genes) if "product" in entry.attributes.keys(): product = entry.attributes["product"] else: product = "-" out.write("\t".join([location, entry.strand, length, pid, gene_tag, locus_tag, "-", "-", product]) + "\n") def _print_rntptt_title(self, out, num, seq_id, length): '''print the title of rnt and ptt file''' out.write(seq_id + " - 1.." + length + "\n") out.write(num + " proteins\n") out.write("\t".join(["Location", "Strand", "Length", "PID", "Gene", "Synonym", "Code", "COG", "Product"]) + "\n") def _read_file(self, gff_file, fasta_file, rnas, cdss, genes, seq_id): num_cds = 0 num_rna = 0 seq = "" g_f = open(gff_file, "r") for entry in self.gff3parser.entries(g_f): if entry.seq_id == seq_id: if (entry.feature == "rRNA") or (entry.feature == "tRNA"): num_rna += 1 rnas.append(entry) elif entry.feature == "CDS": num_cds += 1 cdss.append(entry) elif entry.feature == "gene": genes.append(entry) g_f.close() if fasta_file == "0": seq = "-1" else: detect = False with open(fasta_file, "r") as f_f: for line in f_f: line = line.strip() if len(line) != 0: if line.startswith(">"): if line[1:] == seq_id: detect = True else: detect = False else: if detect: seq = seq + line return (num_cds, num_rna, seq) def _srna2rntptt(self, srna_input_file, srna_output_file, srnas, length): '''convert the sRNA gff file to rnt file''' num_srna = 0 r_s = open(srna_input_file, "r") for entry in Gff3Parser().entries(r_s): num_srna += 1 srnas.append(entry) srnas = sorted(srnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) r_s.close() out_s = open(srna_output_file, "w") self._print_rntptt_title(out_s, str(num_srna), srnas[0].seq_id, str(length)) num_srna = 0 for srna in srnas: num_srna += 1 name = '%0*d' % (5, num_srna) gene_tag = "-" locus_tag = "ncRNA_" + name pid = "ncRNA_" + name product = "sRNA" location = "..".join([str(srna.start), str(srna.end)]) length = str(srna.end - srna.start + 1) out_s.write("\t".join([location, srna.strand, length, pid, gene_tag, locus_tag, "-", "-", product]) + "\n") out_s.close() def _deal_embl_join(self, info): '''deal with the embl file which contain join''' info = info.replace("(", "") info = info.replace(")", "") info = info.replace("join", "") joins = info.split(",") return joins def _multi_embl_pos(self, row): '''deal with the feature which has multiple positions''' poss = [] if row[21:31] == "complement": comple = row[32:-1] if comple.find("join") != -1: joins = self._deal_embl_join(comple) for join in joins: pos = join.split("..") if len(pos) < 2: return "Wrong" poss.append({"start": pos[0], "end": pos[1]}) else: pos = comple.split("..") if len(pos) < 2: return "Wrong" poss.append({"start": pos[0], "end": pos[1]}) strand = "-" else: if row[21:].find("join") != -1: joins = self._deal_embl_join(row[21:]) for join in joins: pos = join.split("..") if len(pos) < 2: return "Wrong" poss.append({"start": pos[0], "end": pos[1]}) else: pos = row[21:].split("..") if len(pos) < 2: return "Wrong" poss.append({"start": pos[0], "end": pos[1]}) strand = "+" source = row[5:21].rstrip() return {"pos": poss, "strand": strand, "source": source} def _parser_embl_data(self, embl_file, out): '''Parser of embl file for converting to other format''' first = True line = "" note_name = "" info = "Wrong" with open(embl_file, "r") as f_h: for row in f_h: row = row.strip() if row[0:2] == "SQ": break if row[0:2] == "ID": name = row.split(";") name[0] = name[0].replace("ID", "") if "SV" in name[1]: version = name[1].split(" ")[-1] id_name = ".".join([name[0].strip(), version.strip()]) else: id_name = name[0].strip() if (row[0:2] == "FT"): if row[5] != " ": note_name = row[5:9] if row[5:11] == "source": info = self._multi_embl_pos(row) if (note_name != "misc") and (row[5] == " ") and ( row[21] == "/"): if first: first = False else: line = line + ";" data = row[22:].replace(";", ",") data = data.split("=") try: note = data[1].replace("\"", "") line = line + data[0] + "=" + note except: line = line + data[0] + "=" + "True" if (note_name != "misc") and (row[5] == " ") and ( row[21] != "/"): note = row[21:].replace("\"", "") note = note.replace(";", ",") line = line + " " + note if (note_name != "misc") and (row[5] != " ") and ( row[5:11] != "source"): first = True if info != "Wrong": for pos in info["pos"]: out.write(("{0}\tRefseq\t{1}\t{2}\t{3}" "\t.\t{4}\t.\t{5}\n").format( id_name, info["source"], pos["start"], pos["end"], info["strand"], line)) if (row[5:8] != "CDS") and (row[5:9] != "misc"): info = self._multi_embl_pos(row) elif (row[5:8] == "CDS"): info = self._multi_embl_pos(row) line = "" return (id_name, info, line) def _assign_tss_type(self, tss, utr_pri, utr_sec): '''Assigning the TSS types''' if tss.is_primary: tss_type = "Primary" utr_pri.append(int(tss.utr_length)) elif tss.is_secondary: tss_type = "Secondary" utr_sec.append(int(tss.utr_length)) elif tss.is_internal: tss_type = "Internal" elif tss.is_antisense: tss_type = "Antisense" else: tss_type = "Orphan" return tss_type def _multi_tss_class(self, tss, tss_index, tss_features, nums, utrs): '''deal with the TSS which has multiple TSS types''' tss_type = self._assign_tss_type(tss, utrs["pri"], utrs["sec"]) if (tss_type not in tss_features["tss_types"]) or ( tss.locus_tag not in tss_features["locus_tags"]): if (tss_type not in tss_features["tss_types"]): tss_index[tss_type] += 1 nums["tss"] += 1 if (nums["class"] == 1): tss_features["tss_types"].append(tss_type) tss_features["utr_lengths"].append( tss_type + "_" + tss.utr_length) tss_features["locus_tags"].append(tss.locus_tag) else: if tss_type not in tss_features["tss_types"]: tss_features["tss_types"].append(tss_type) tss_features["utr_lengths"].append( tss_type + "_" + tss.utr_length) tss_features["locus_tags"].append(tss.locus_tag) nums["class"] += 1 def _uni_tss_class(self, tss, utrs, tss_index, tss_features, nums): '''It is for TSS which has only one type''' tss_type = self._assign_tss_type(tss, utrs["pri"], utrs["sec"]) tss_index[tss_type] += 1 tss_features["tss_types"].append(tss_type) tss_features["utr_lengths"].append(tss_type+"_"+tss.utr_length) tss_features["locus_tags"].append(tss.locus_tag) nums["tss"] += 1 def _print_tssfile(self, nums, tss_features, tss, tss_pro, strain, method, out, tss_libs): '''print gff file of TSS''' tss_merge_type = ",".join(tss_features["tss_types"]) utr_length = ",".join(tss_features["utr_lengths"]) merge_locus_tag = ",".join(tss_features["locus_tags"]) libs = ",".join(tss_libs) strand = Helper().get_strand_name(tss.super_strand) attribute_string = ";".join( ["=".join(items) for items in ( ["Name", "".join([tss_pro, ":", str(tss.super_pos), "_", strand])], ["ID", strain + "_" + tss_pro.lower() + str(nums["tss_uni"])], ["type", tss_merge_type], ["utr_length", str(utr_length)], ["associated_gene", merge_locus_tag], ["libs", libs], ["method", "TSSpredator"])]) out.write("\t".join([strain, method, tss_pro, str(tss.super_pos), str(tss.super_pos), ".", tss.super_strand, ".", attribute_string]) + "\n") def convert_gff2rntptt(self, gff_file, seq_id, fasta_file, ptt_file, rnt_file, srna_input_file, srna_output_file): '''Convert gff format to rnt and ptt format''' genes = [] rnas = [] cdss = [] srnas = [] datas = self._read_file(gff_file, fasta_file, rnas, cdss, genes, seq_id) rnas = sorted(rnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) num_cds = datas[0] num_rna = datas[1] seq = datas[2] out_p = open(ptt_file, "w") out_r = open(rnt_file, "w") if len(cdss) != 0: self._print_rntptt_title(out_p, str(num_cds), cdss[0].seq_id, str(len(seq))) self._print_rntptt_file(out_p, cdss, genes) if len(rnas) != 0: self._print_rntptt_title(out_r, str(num_rna), rnas[0].seq_id, str(len(seq))) self._print_rntptt_file(out_r, rnas, genes) out_p.close() out_r.close() if (srna_input_file is not None) and \ (srna_output_file is not None): self._srna2rntptt(srna_input_file, srna_output_file, srnas, str(len(seq))) elif (srna_input_file is None) and \ (srna_output_file is None): pass else: print("Error: Lack sRNA input gff files or " "the name sRNA output rnt files\n") def convert_embl2gff(self, embl_file, gff_file): '''Convert embl format to gff format''' info = "Wrong" out = open(gff_file, "w") out.write("##gff-version 3\n") datas = self._parser_embl_data(embl_file, out) id_name = datas[0] info = datas[1] line = datas[2] if info != "Wrong": for pos in info["pos"]: out.write(("{0}\tRefseq\t{1}\t{2}\t{3}" "\t.\t{4}\t.\t{5}\n").format( id_name, info["source"], pos["start"], pos["end"], info["strand"], line)) out.close() def _get_libs(self, tss_file): '''Get the library which can detect this specific TSS''' tss_libs = {} tss_fh = open(tss_file, "r") for tss in self.tssparser.entries(tss_fh): key = "_".join([str(tss.super_pos), tss.super_strand]) if key not in tss_libs.keys(): tss_libs[key] = [] if (tss.is_detected) and (tss.genome not in tss_libs[key]): tss_libs[key].append(tss.genome) tss_fh.close() return tss_libs def convert_mastertable2gff(self, tss_file, method, tss_pro, strain, out_gff): '''Convert MasterTable to gff format''' temps = {"tss": 0, "strand": "#"} nums = {"tss": 0, "tss_uni": 0, "class": 1} check_print = False utrs = {"total": [], "pri": [], "sec": []} tss_features = {"tss_types": [], "locus_tags": [], "utr_lengths": []} tss_index = defaultdict(lambda: 0) tss_fh = open(tss_file, "r") out = open(out_gff, "w") out.write("##gff-version 3\n") tss_libs = self._get_libs(tss_file) detect_run = False for tss in self.tssparser.entries(tss_fh): detect_run = True key = "_".join([str(tss.super_pos), tss.super_strand]) if ((tss.super_pos == temps["tss"])) and ( temps["strand"] == tss.super_strand) and ( tss.class_count == 1): pass else: if ((tss.super_pos != temps["tss"])) or ( temps["strand"] != tss.super_strand): check_print = False nums["class"] = 1 if tss.utr_length != "NA": utrs["total"].append(int(tss.utr_length)) temps["tss"] = tss.super_pos temps["strand"] = tss.super_strand if (tss.class_count != 1) and ( nums["class"] <= tss.class_count): self._multi_tss_class(tss, tss_index, tss_features, nums, utrs) if (tss.class_count == 1) or ( nums["class"] > tss.class_count): if (tss.class_count == 1): self._uni_tss_class(tss, utrs, tss_index, tss_features, nums) if (check_print is False): self._print_tssfile(nums, tss_features, tss, tss_pro, strain, method, out, tss_libs[key]) check_print = True nums["tss_uni"] += 1 tss_features = {"tss_types": [], "locus_tags": [], "utr_lengths": []} if (check_print is False) and detect_run: self._print_tssfile(nums, tss_features, tss, tss_pro, strain, method, out, tss_libs[key]) tss_fh.close() out.close() def convert_transtermhp2gff(self, transterm_file, gff_file): '''Convert the output of TransTermHP to gff format''' out = open(gff_file, "w") out.write("##gff-version 3\n") terms = [] with open(transterm_file) as t_h: for line in t_h: row = line[:-1].split() if len(row) < 10: continue if len(row) == 14: start = row[1] end = row[3] strand = row[4] gene = row[0] else: start = row[0] end = row[2] strand = row[3] gene = "missing" entry = Gff3Entry({ "seq_id": transterm_file.split("/")[-1][:-1 * len( "_best_terminator_after_gene.bag")], "source": "TransTermHP", "feature": "terminator", "start": start, "end": end, "score": ".", "strand": strand, "phase": ".", "attributes": "associated_gene=%s" % (gene) }) terms.append(entry) sort_terms = sorted(terms, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) num = 0 for term in sort_terms: out.write("\t".join([str(field) for field in [ term.seq_id, term.source, term.feature, term.start, term.end, term.score, term.strand, term.phase, term.attribute_string]])) name = '%0*d' % (5, num) out.write(";ID={0}_terminator{1};Name=terminator_{2}\n".format( term.seq_id, num, name)) num += 1 out.close() def convert_circ2gff(self, circ_file, args_circ, out_all, out_filter): '''Convert the circRNA output of segemehl to gff format''' circs = [] out_a = open(out_all, "w") out_f = open(out_filter, "w") out_a.write("##gff-version 3\n") out_f.write("##gff-version 3\n") f_h = open(circ_file, "r") for row in csv.reader(f_h, delimiter='\t'): if row[0] != "Genome": circs.append({"strain": row[0], "strand": row[1], "start": int(row[2]), "end": int(row[3]), "conflict": row[4], "depth": int(row[5]), "per_start": float(row[6]), "per_end": float(row[7])}) circs = sorted(circs, key=lambda k: (k["strain"], k["start"], k["end"], k["strand"])) id_ = 0 for circ in circs: attribute_string = ";".join(["=".join(items) for items in [ ("ID", circ["strain"] + "_circrna" + str(id_)), ("name", "circRNA_" + str(id_)), ("support_reads", str(circ["depth"])), ("read_at_start", str(circ["per_start"])), ("read_at_end", str(circ["per_end"])), ("conflict", circ["conflict"]), ("method", "segemehl")]]) out_a.write("\t".join([str(field) for field in [ circ["strain"], "ANNOgesic", "circRNA", str(circ["start"]), str(circ["end"]), ".", circ["strand"], ".", attribute_string]]) + "\n") if (circ["depth"] >= args_circ.support) and ( circ["conflict"] == "NA") and ( circ["per_start"] >= args_circ.start_ratio) and ( circ["per_end"] >= args_circ.end_ratio): out_f.write("\t".join([str(field) for field in [ circ["strain"], "ANNOgesic", "circRNA", str(circ["start"]), str(circ["end"]), ".", circ["strand"], ".", attribute_string]]) + "\n") id_ += 1 f_h.close() out_a.close() out_f.close() def convert_gbk2embl(self, input_folder): """Convert gbk to embl.""" print("Converting gbk files to embl files") for annotation_file in os.listdir(input_folder): if annotation_file[-3:] == "gbk": gbk_file = annotation_file embl_file = gbk_file[0:-3] + "embl" gbk_entry = SeqIO.parse(os.path.join( input_folder, gbk_file), "genbank") count = SeqIO.write(gbk_entry, os.path.join( input_folder, embl_file), "embl") print("Converted %i records" % count)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/converter.py
converter.py
import os import math from annogesiclib.gff3 import Gff3Parser from annogesiclib.parser_wig import WigParser from annogesiclib.helper import Helper def get_primary_locus_tag(tss): tsss = [] tss_types = tss.attributes["type"].split(",") tss_locus_tags = tss.attributes["associated_gene"].split(",") tss_utr_lengths = tss.attributes["utr_length"].split(",") index = 0 for tss_type in tss_types: if "Primary" in tss_type: tsss.append({"locus": tss_locus_tags[index], "utr": int(tss_utr_lengths[index].split("_")[1]), "type": tss_type}) index += 1 return tsss def detect_coverage(wigs, tss, ref): for strain, tracks in wigs.items(): if strain == tss.seq_id: tss_cover = 0 ref_cover = 0 for wig in tracks.values(): if ((tss.start + 1) <= len(wig)) and ( (ref.start + 1) <= len(wig)): if tss.strand == "+": diff_t = (wig[tss.start - 1]["coverage"] - wig[tss.start - 2]["coverage"]) diff_r = (wig[ref.start - 1]["coverage"] - wig[ref.start - 2]["coverage"]) else: diff_t = (wig[tss.start - 1]["coverage"] - wig[tss.start]["coverage"]) diff_r = (wig[ref.start - 1]["coverage"] - wig[ref.start]["coverage"]) tss_cover = tss_cover + diff_t ref_cover = ref_cover + diff_r return tss_cover, ref_cover def fix_attributes(tss, tss_entry): '''change the primary TSS to secondary TSS''' index = 0 genes = tss.attributes["associated_gene"].split(",") utrs = tss.attributes["utr_length"].split(",") types = tss.attributes["type"].split(",") for gene in genes: if gene == tss_entry["locus"]: utrs[index] = utrs[index].replace("Primary", "Secondary") types[index] = types[index].replace("Primary", "Secondary") index += 1 tss.attributes["utr_length"] = ",".join(utrs) tss.attributes["type"] = ",".join(types) def del_repeat(tsss): '''delete the repeat TSS''' for tss in tsss: types = tss.attributes["type"].split(",") utrs = tss.attributes["utr_length"].split(",") genes = tss.attributes["associated_gene"].split(",") detect = {"pri": False, "sec": False} index = 0 finals = {"types": [], "utrs": [], "genes": []} for type_ in types: if (type_ == "Primary") and (detect["pri"] is False): detect["pri"] = True pri_utr = int(utrs[index].split("_")[1]) real_index = index elif (type_ == "Primary") and (detect["pri"] is True): compare_utr = int(utrs[index].split("_")[1]) if compare_utr < pri_utr: pri_utr = compare_utr real_index = index elif (type_ == "Secondary") and (detect["sec"] is False): detect["sec"] = True sec_utr = int(utrs[index].split("_")[1]) real_index2 = index elif (type_ == "Secondary") and (detect["sec"] is True): compare_utr = int(utrs[index].split("_")[1]) if compare_utr < sec_utr: sec_utr = compare_utr real_index2 = index elif (type_ == "Antisense") or \ (type_ == "Internal") or \ (type_ == "Orphan"): finals["types"].append(types[index]) finals["utrs"].append(utrs[index]) finals["genes"].append(genes[index]) index += 1 if detect["pri"] is True: finals["types"].append(types[real_index]) finals["utrs"].append(utrs[real_index]) finals["genes"].append(genes[real_index]) else: if detect["sec"] is True: finals["types"].append(types[real_index2]) finals["utrs"].append(utrs[real_index2]) finals["genes"].append(genes[real_index2]) tss.attributes["type"] = ",".join(finals["types"]) tss.attributes["utr_length"] = ",".join(finals["utrs"]) tss.attributes["associated_gene"] = ",".join(finals["genes"]) def fix_primary_type(tsss, wigs_f, wigs_r): '''if one gene is associated with multiple TSSs, it will check the coverage and assign the low expressed TSS to be secondary TSS''' for tss in tsss: if ("Primary" in tss.attributes["type"]): tss_entrys = get_primary_locus_tag(tss) for ref in tsss: if (ref.seq_id == tss.seq_id) and \ (ref.strand == tss.strand) and \ (ref.start == tss.start): pass else: if ("Primary" in ref.attributes["type"]): ref_entrys = get_primary_locus_tag(ref) for tss_entry in tss_entrys: for ref_entry in ref_entrys: if (tss_entry["locus"] == ref_entry["locus"]) and ( tss_entry["type"] == "Primary") and ( ref_entry["type"] == "Primary") and ( tss.seq_id == ref.seq_id): if tss.strand == "+": tss_cover, ref_cover = detect_coverage( wigs_f, tss, ref) else: tss_cover, ref_cover = detect_coverage( wigs_r, tss, ref) if tss_cover < ref_cover: fix_attributes(tss, tss_entry) elif tss_cover > ref_cover: fix_attributes(ref, ref_entry) elif tss_cover == ref_cover: if (tss_entry["utr"] < ref_entry["utr"]): fix_attributes(ref, ref_entry) elif (tss_entry["utr"] > ref_entry["utr"]): fix_attributes(tss, tss_entry) del_repeat(tsss) return tsss def define_attributes(tss): string = [] for key, value in tss.attributes.items(): if key != "print": if key != "ID": string.append("=".join([key, value])) elif key == "Name": string.append("=".join([key, str(tss.start) + tss.strand])) return ";".join(string) def remove_primary(tss, tss_entry): final_types = [] final_utrs = [] final_genes = [] tss_dict = tss_entry[1] types = tss_dict["type"].split(",") utrs = tss_dict["utr_length"].split(",") genes = tss_dict["associated_gene"].split(",") index = 0 for type_ in types: if type_ != "Primary": final_types.append(type_) final_utrs.append(utrs[index]) final_genes.append(genes[index]) index += 1 tss_dict = {"Name": "TSS_" + str(tss.start) + tss.strand, "type": ",".join(final_types), "utr_length": ",".join(final_utrs), "associated_gene": ",".join(final_genes)} tss_string = ";".join(["=".join(["utr_length", tss_dict["utr_length"]]), "=".join(["associated_gene", tss_dict["associated_gene"]]), "=".join(["type", tss_dict["type"]]), "=".join(["Name", tss_dict["Name"]])]) return [tss_string, tss_dict] def import_to_tss(tss_type, cds_pos, tss, locus_tag, tss_entry): if cds_pos == "NA": utr = "_".join([tss_type, "NA"]) else: utr = "_".join([tss_type, str(int(math.fabs(cds_pos - tss.start)))]) if len(tss_entry) != 0: tss_dict = tss_entry[1] tss_dict_types = tss_dict["type"].split(",") tss_dict_utrs = tss_dict["utr_length"].split(",") tss_dict_tags = tss_dict["associated_gene"].split(",") if tss_type == "Primary" and ("Primary" in tss_dict["type"]): index = 0 for tss_dict_type in tss_dict_types: if "Primary" in tss_dict_type: utr_length = tss_dict_utrs[index].split("_") if math.fabs(cds_pos - tss.start) < int(utr_length[1]): tss_dict_utrs[index] = utr tss_dict_tags[index] = locus_tag index += 1 else: tss_dict_types.append(tss_type) tss_dict_utrs.append(utr) tss_dict_tags.append(locus_tag) tss_dict = {"Name": "TSS_" + str(tss.start) + tss.strand, "type": ",".join(tss_dict_types), "utr_length": ",".join(tss_dict_utrs), "associated_gene": ",".join(tss_dict_tags)} else: tss_dict = {"Name": "TSS_" + str(tss.start) + tss.strand, "type": tss_type, "utr_length": utr, "associated_gene": locus_tag} tss_string = ";".join(["=".join(["utr_length", tss_dict["utr_length"]]), "=".join(["associated_gene", tss_dict["associated_gene"]]), "=".join(["type", tss_dict["type"]]), "=".join(["Name", tss_dict["Name"]])]) return (tss_string, tss_dict) def same_strand_tss_gene(gene, tss, anti_ends, gene_ends, checks, tss_entry): '''check the TSS and gene which are at the same strand''' if is_primary(gene.start, gene.end, tss.start, tss.strand): locus_tag = gene.attributes["locus_tag"] if tss.strand == "+": if ((anti_ends["reverse"] != -1) and ( anti_ends["reverse"] - gene.start) > 0) or ( anti_ends["reverse"] == -1): tss_entry = import_to_tss("Primary", gene.start, tss, locus_tag, tss_entry) checks["orphan"] = False gene_ends["forward"] = gene.start elif (anti_ends["reverse"] != -1) and ( (anti_ends["reverse"] - gene.start) < 0): if (checks["int_anti"] is True) or ( (tss.start - anti_ends["reverse"]) > 0): tss_entry = import_to_tss("Primary", gene.start, tss, locus_tag, tss_entry) checks["orphan"] = False gene_ends["forward"] = gene.start else: if ((anti_ends["forward"] != -1) and ( gene.end - anti_ends["forward"]) > 0) or ( anti_ends["forward"] == -1): tss_entry = import_to_tss("Primary", gene.end, tss, locus_tag, tss_entry) checks["orphan"] = False gene_ends["reverse"] = gene.end if is_internal(gene.start, gene.end, tss.start, tss.strand): locus_tag = gene.attributes["locus_tag"] tss_entry = import_to_tss("Internal", "NA", tss, locus_tag, tss_entry) checks["orphan"] = False return tss_entry def diff_strand_tss_gene(gene, tss, anti_ends, gene_ends, checks, tss_entry): '''check the TSS and gene which are at the same strand''' if is_antisense(gene.start, gene.end, tss.start, tss.strand): checks["int_anti"] = False if tss.strand == "-": anti_ends["forward"] = gene.start if (gene_ends["reverse"] != -1) and ( (gene.start - gene_ends["reverse"]) > 0): if is_internal(gene.start, gene.end, tss.start, tss.strand): pass else: if (tss.start - gene.end) > 0: tss_entry = remove_primary(tss, tss_entry) else: anti_ends["reverse"] = gene.end if is_internal(gene.start, gene.end, tss.start, tss.strand): checks["int_anti"] = True if (gene_ends["forward"] != -1) and ( (gene.start - gene_ends["forward"]) > 0): if (gene.start - tss.start) > 0: tss_entry = remove_primary(tss, tss_entry) locus_tag = gene.attributes["locus_tag"] tss_entry = import_to_tss("Antisense", "NA", tss, locus_tag, tss_entry) checks["orphan"] = False return tss_entry def compare_tss_gene(tss, genes): '''compare TSS and gene to find the relation''' tss_entry = [] gene_ends = {"forward": -1, "reverse": -1} anti_ends = {"forward": -1, "reverse": -1} checks = {"orphan": True, "int_anti": None} for gene in genes: if gene.strand == tss.strand: tss_entry = same_strand_tss_gene(gene, tss, anti_ends, gene_ends, checks, tss_entry) else: tss_entry = diff_strand_tss_gene(gene, tss, anti_ends, gene_ends, checks, tss_entry) if checks["orphan"]: tss_entry = import_to_tss("Orphan", "NA", tss, "NA", tss_entry) return tss_entry def is_primary(cds_start, cds_end, tss_pos, strand): if strand == "+": if (is_utr(cds_start, tss_pos, 300) and (cds_start >= tss_pos)): return True else: if (is_utr(tss_pos, cds_end, 300) and (cds_end <= tss_pos)): return True def is_internal(cds_start, cds_end, tss_pos, strand): if ((cds_start < tss_pos) and (cds_end > tss_pos)) or ( (strand == "+") and (tss_pos == cds_end)) or ( (strand == "-") and (tss_pos == cds_start)): return True def is_antisense(cds_start, cds_end, tss_pos, strand): if ((is_utr(cds_start, tss_pos, 100)) and (cds_start >= tss_pos)) or ( (is_utr(tss_pos, cds_end, 100)) and (cds_end <= tss_pos)) or ( is_internal(cds_start, cds_end, tss_pos, strand)): return True def is_utr(pos1, pos2, length): if (pos1 - pos2 <= length): return True def print_all_unique(out, overlap_num, nums): if ((nums["tss_p"] != 0) or (overlap_num != 0)) and ( (nums["tss_m"] != 0) or (overlap_num != 0)): out.write("the number of overlap between " "TSSpredator and manual = {0} ".format( overlap_num)) out.write("(overlap of all TSSpredator = {0}, ".format( float(overlap_num) / (float(nums["tss_p"]) + float(overlap_num)))) out.write("overlap of all manual = {0})\n".format( float(overlap_num) / (float(nums["tss_m"]) + float(overlap_num)))) out.write("the number of unique in TSSpredator = {0} ({1})\n".format( nums["tss_p"], float(nums["tss_p"]) / (float(nums["tss_p"]) + float(overlap_num)))) out.write("the number of unique in manual = {0} ({1})\n".format( nums["tss_m"], float(nums["tss_m"]) / (float(nums["tss_m"]) + float(overlap_num)))) else: out.write("No TSS candidates which be predicted by TSSpredator.") def print_stat(num_strain, stat_file, overlap_num, nums): if len(num_strain) != 0: out = open(stat_file, "w") if len(num_strain.keys()) == 1: print_all_unique(out, overlap_num, nums) else: out.write("All genomes: \n") print_all_unique(out, overlap_num, nums) for strain in num_strain.keys(): if (num_strain[strain]["tsspredator"] == 0) and \ (num_strain[strain]["overlap"] == 0): perc_tsspredator = "NA" perc_tsspredator_uni = "NA" else: perc_tsspredator = str( float(num_strain[strain]["overlap"]) / ( float(num_strain[strain]["tsspredator"]) + float(num_strain[strain]["overlap"]))) perc_tsspredator_uni = str( float(num_strain[strain]["tsspredator"]) / ( float(num_strain[strain]["tsspredator"]) + float(num_strain[strain]["overlap"]))) if (num_strain[strain]["manual"] == 0) and ( num_strain[strain]["overlap"] == 0): perc_manual = "NA" perc_manual_uni = "NA" else: perc_manual = str( float(num_strain[strain]["overlap"]) / ( float(num_strain[strain]["manual"]) + float(num_strain[strain]["overlap"]))) perc_manual_uni = str( float(num_strain[strain]["manual"]) / ( float(num_strain[strain]["manual"]) + float(num_strain[strain]["overlap"]))) out.write(strain + ": \n") out.write("the number of overlap between " "TSSpredator and manual = {0} ".format( num_strain[strain]["overlap"])) out.write("(overlap of all TSSpredator = {0}, ".format( perc_tsspredator)) out.write("overlap of all manual = {0})\n".format(perc_manual)) out.write("the number of unique in " "TSSpredator = {0} ({1})\n".format( num_strain[strain]["tsspredator"], perc_tsspredator_uni)) out.write("the number of unique in manual = {0} ({1})\n".format( num_strain[strain]["manual"], perc_manual_uni)) def read_wig(filename, strand): wigs = {} wig_parser = WigParser() if filename: wig_fh = open(filename) for entry in wig_parser.parser(wig_fh, strand): if entry.strain not in wigs.keys(): strain = entry.strain wigs[strain] = {} if entry.track not in wigs[strain].keys(): wigs[strain][entry.track] = [] wigs[strain][entry.track].append({ "pos": entry.pos, "coverage": entry.coverage, "strand": entry.strand}) wig_fh.close() return wigs def read_gff(tss_predict_file, tss_manual_file, gff_file, lengths): tsss = {"tsss_p": [], "tsss_m": [], "merge": []} cdss = [] genes = [] gff_parser = Gff3Parser() tssp_fh = open(tss_predict_file, "r") tssm_fh = open(tss_manual_file, "r") g_f = open(gff_file, "r") for entry in gff_parser.entries(tssp_fh): entry.attributes["print"] = False tsss["tsss_p"].append(entry) tssp_fh.close() tsss["tsss_p"] = sorted(tsss["tsss_p"], key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for entry in gff_parser.entries(tssm_fh): if (entry.seq_id in lengths.keys()) or ("all" in lengths.keys()): entry.attributes["print"] = False entry.attributes["libs"] = "manual" entry.attributes["method"] = "manual" tsss["tsss_m"].append(entry) tssm_fh.close() tsss["tsss_m"] = sorted(tsss["tsss_m"], key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for entry in gff_parser.entries(g_f): if (Helper().feature_without_notgene(entry)): cdss.append(entry) if entry.feature == "gene": genes.append(entry) g_f.close() cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return tsss, cdss, genes def merge_libs(input_libs, wig_folder, program): if "merge_forward.wig" in os.listdir(os.getcwd()): os.remove("merge_forward.wig") if "merge_reverse.wig" in os.listdir(os.getcwd()): os.remove("merge_reverse.wig") if program == "TSS": type_ = "tex" elif program == "processing": type_ = "notex" for lib in input_libs: datas = lib.split(":") if (datas[1] == type_) and (datas[4] == "+"): Helper().merge_file(os.path.join(wig_folder, datas[0]), os.path.join(os.getcwd(), "merge_forward.wig")) elif (datas[1] == type_) and (datas[4] == "-"): Helper().merge_file(os.path.join(wig_folder, datas[0]), os.path.join(os.getcwd(), "merge_reverse.wig")) def check_overlap(overlap, pre_tss, nums, length, num_strain, overlap_num, tss_m, tss_p, tsss, pre_pos, cdss, genes): '''find the TSS which be detected in manual detection and TSSpredator''' if overlap: if pre_tss: pre_tss.attributes["print"] = True tss = pre_tss else: tss = tss_p tss.attribute_string = define_attributes(tss) tss.attributes["method"] = "TSSpredator,manual" if (not length) or \ (tss.start <= int(length)): num_strain[tss.seq_id]["overlap"] += 1 if (pre_pos != -1): if (tss.start - pre_pos != 0): tsss["merge"].append(tss) nums["tss"] += 1 overlap_num += 1 else: overlap_num += 1 else: tsss["merge"].append(tss) nums["tss"] += 1 overlap_num += 1 overlap = False pre_pos = tss.start else: tss_entry = compare_tss_gene(tss_m, genes) tss_m.attributes = tss_entry[1] tss_m.attribute_string = tss_entry[0] tss_m.attributes["method"] = "manual" tsss["merge"].append(tss_m) if (not length) or \ (tss_m.start <= int(length)): num_strain[tss_m.seq_id]["manual"] += 1 nums["tss_m"] += 1 nums["tss"] += 1 return (overlap, pre_pos, overlap_num) def intersection(tsss, cluster, nums, lengths, cdss, genes, seqs): '''compare the predicted TSS and manual TSS''' num_strain = {} overlap = False overlap_num = 0 pre_pos = -1 start = False length = None for tss_m in tsss["tsss_m"]: pre_tss = None start = False if "all" in lengths.keys(): length = seqs[tss_m.seq_id] else: if lengths[tss_m.seq_id] == "all": length = seqs[tss_m.seq_id] else: length = lengths[tss_m.seq_id] for tss_p in tsss["tsss_p"]: start = True if (tss_p.strand == tss_m.strand) and \ (tss_p.seq_id == tss_m.seq_id): if (tss_p.start == tss_m.start): tss_p.attributes["print"] = True overlap = True pre_tss = None break elif (math.fabs(tss_p.start - tss_m.start) <= cluster): overlap = True pre_tss = tss_p if (start) or (not overlap): if tss_m.seq_id not in num_strain.keys(): num_strain[tss_m.seq_id] = {"overlap": 0, "tsspredator": 0, "manual": 0} if pre_tss is None: if tss_p.seq_id not in num_strain.keys(): num_strain[tss_p.seq_id] = {"overlap": 0, "tsspredator": 0, "manual": 0} else: if pre_tss.seq_id not in num_strain.keys(): num_strain[pre_tss.seq_id] = {"overlap": 0, "tsspredator": 0, "manual": 0} datas = check_overlap(overlap, pre_tss, nums, length, num_strain, overlap_num, tss_m, tss_p, tsss, pre_pos, cdss, genes) overlap = datas[0] pre_pos = datas[1] overlap_num = datas[2] if (start) or (len(tsss["tsss_m"]) == 0): for tss_p in tsss["tsss_p"]: run = False if not tss_p.attributes["print"]: tss_p.attribute_string = define_attributes(tss_p) tsss["merge"].append(tss_p) if (length is None): run = True else: if (tss_p.start <= int(length)): run = True if run and (tss_p.seq_id in num_strain): num_strain[tss_p.seq_id]["tsspredator"] += 1 nums["tss"] += 1 nums["tss_p"] += 1 return overlap_num, num_strain def print_file(final_tsss, program, out_gff): num_final = 0 out = open(out_gff, "w") out.write("##gff-version 3\n") for tss in final_tsss: if "print" in tss.attributes.keys(): del tss.attributes["print"] tss.attributes["ID"] = "_".join([ tss.seq_id, program.lower() + str(num_final)]) num_final += 1 if program == "TSS": strand = Helper().get_strand_name(tss.strand) tss.attributes["Name"] = "TSS:" + "_".join( [str(tss.start), strand]) else: strand = Helper().get_strand_name(tss.strand) tss.attributes["Name"] = "processing:" + "_".join( [str(tss.start), strand]) tss.attribute_string = ";".join( ["=".join(items) for items in tss.attributes.items()]) out.write("\t".join([str(field) for field in [ tss.seq_id, "ANNOgesic", tss.feature, tss.start, tss.end, tss.score, tss.strand, tss.phase, tss.attribute_string]]) + "\n") def read_seq(seq_file): seqs = {} with open(seq_file) as fh: for line in fh: line = line.strip() if line.startswith(">"): strain = line[1:] seqs[strain] = 0 else: seqs[strain] = seqs[strain] + len(line) return seqs def merge_manual_predict_tss(tss_predict_file, stat_file, out_gff, gff_file, args_tss, manual, seq_file): '''merge the manual detected TSS and TSSpredator predicted TSS''' nums = {"tss_p": 0, "tss_m": 0, "tss": 0} merge_libs(args_tss.libs, args_tss.wig_folder, args_tss.program) wigs_f = read_wig("merge_forward.wig", "+") wigs_r = read_wig("merge_reverse.wig", "-") seqs = read_seq(seq_file) tsss, cdss, genes, = read_gff(tss_predict_file, manual, gff_file, args_tss.strain_lengths) overlap_num, num_strain = intersection(tsss, args_tss.cluster, nums, args_tss.strain_lengths, cdss, genes, seqs) sort_tsss = sorted(tsss["merge"], key=lambda k: (k.seq_id, k.start, k.end, k.strand)) final_tsss = fix_primary_type(sort_tsss, wigs_f, wigs_r) print_file(final_tsss, args_tss.program, out_gff) print_stat(num_strain, stat_file, overlap_num, nums)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/merge_manual.py
merge_manual.py
import math from annogesiclib.gff3 import Gff3Parser from annogesiclib.lib_reader import read_libs, read_wig def read_gff(input_file): datas = [] gff_parser = Gff3Parser() f_h = open(input_file, "r") for entry in gff_parser.entries(f_h): entry.attributes["print"] = False datas.append(entry) datas = sorted(datas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return datas def get_coverage(tar, wigs): '''get coverage''' coverage = 0 for strain, conds in wigs.items(): if tar.seq_id == strain: for tracks in conds.values(): for wigs in tracks.values(): if coverage < wigs[tar.start - 1]["coverage"]: coverage = wigs[tar.start - 1]["coverage"] return coverage def compare_wig(tars, wig_fs, wig_rs): '''get the coverage of TSS for comparison''' for tar in tars: if tar.strand == "+": tar.attributes["coverage"] = get_coverage(tar, wig_fs) elif tar.strand == "-": tar.attributes["coverage"] = get_coverage(tar, wig_rs) def stat(tars, refs, cutoff, gene_length, cluster): '''do statistics and print it out''' stats = {"tp": 0, "fp": 0, "miss": 0, "fp_rate": 0, "tp_rate": 0, "miss_rate": 0} num_ref = 0 for ref in refs: num_ref += 1 detect = False for tar in tars: if (ref.seq_id == tar.seq_id) and ( ref.strand == tar.strand) and ( float(tar.attributes["coverage"]) >= cutoff) and ( tar.start <= int(gene_length)): if math.fabs(ref.start - tar.start) <= cluster: stats["tp"] += 1 tar.attributes["print"] = True detect = True if not detect: stats["miss"] += 1 for tar in tars: if (not tar.attributes["print"]) and ( float(tar.attributes["coverage"]) >= cutoff) and ( tar.start <= int(gene_length)): stats["fp"] += 1 stats["fp_rate"] = float(stats["fp"]) / float(int(gene_length) - num_ref) stats["tp_rate"] = float(stats["tp"]) / float(num_ref) stats["miss_rate"] = float(stats["miss"]) / float(num_ref) return stats, num_ref def print_file(tars, cutoff, out_file): out = open(out_file, "w") for tar in tars: if tar.attributes["coverage"] >= cutoff: out.write(tar.info + "\n") def change_best(num_ref, best, stat_value): '''scoring function for evaluate the change of TSS candidates''' change = False if num_ref > 100: if best["tp_rate"] - stat_value["tp_rate"] >= 0.1: change = False else: if (best["tp_rate"] <= stat_value["tp_rate"]) and ( best["fp_rate"] >= stat_value["fp_rate"]): best = stat_value.copy() change = True elif (stat_value["tp_rate"] - best["tp_rate"] >= 0.01) and ( stat_value["fp_rate"] - best["fp_rate"] <= 0.00005): best = stat_value.copy() change = True elif (best["tp_rate"] - stat_value["tp_rate"] <= 0.01) and ( best["fp_rate"] - stat_value["fp_rate"] >= 0.00005): best = stat_value.copy() change = True else: if best["tp"] - stat_value["tp"] >= 5: change = False else: if (best["tp"] <= stat_value["tp"]) and ( best["fp"] >= stat_value["fp"]): best = stat_value.copy() change = True tp_diff = float(best["tp"] - stat_value["tp"]) if tp_diff > 0: if float(best["fp"] - stat_value["fp"]) >= 5 * tp_diff: best = stat_value.copy() change = True elif tp_diff < 0: tp_diff = tp_diff * -1 if float(stat_value["fp"] - best["fp"]) <= 5 * tp_diff: best = stat_value.copy() change = True return best, change def filter_low_expression(gff_file, args_tss, wig_f_file, wig_r_file, out_file): '''filter the low expressed TSS''' tars = read_gff(gff_file) refs = read_gff(args_tss.manual_file) libs, texs = read_libs(args_tss.input_lib, args_tss.wig_folder) wig_fs = read_wig(wig_f_file, "+", args_tss.libs) wig_rs = read_wig(wig_r_file, "-", args_tss.libs) compare_wig(tars, wig_fs, wig_rs) cutoff = 1 first = True while True: stat_value, num_ref = stat(tars, refs, cutoff, args_tss.gene_length, args_tss.cluster) if first: first = False best = stat_value.copy() continue else: best, change = change_best(num_ref, best, stat_value) if not change: break cutoff = cutoff + 0.1 print_file(tars, cutoff, out_file) return cutoff
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/filter_low_expression.py
filter_low_expression.py
import os import shutil from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def assign_tss(tss, tran): if "ID" in tran.attributes.keys(): tran_id = tran.attributes["ID"] else: strand = Helper().get_strand_name(tran.strand) tran_id = "".join([tran.feature, ":", str(tran.start), "-", str(tran.end), "_", strand]) if "Parent" not in tss.attributes.keys(): tss.attributes["Parent"] = tran_id else: tss.attributes["Parent"] = \ ",".join([tss.attributes["Parent"], tran_id]) if "Name" in tss.attributes.keys(): tss_name = tss.attributes["Name"] else: strand = Helper().get_strand_name(tss.strand) tss_name = "".join(["TSS:", str(tss.start), "_", strand]) if "associated_tss" not in tran.attributes.keys(): tran.attributes["associated_tss"] = tss_name else: tran.attributes["associated_tss"] = \ ",".join([tran.attributes["associated_tss"], tss_name]) def del_attributes(entry, features): attributes = {} for key, value in entry.attributes.items(): if (key not in features): attributes[key] = value return attributes def compare_tran_tss(trans, tsss, fuzzy, stat, out): num_tran = 0 for tran in trans: tran.attributes["ID"] = tran.seq_id + "_transcript" + str(num_tran) detect = False check = [0, 0, 0] for tss in tsss: if (tss.strand == tran.strand) and ( tss.seq_id == tran.seq_id): if tss.strand == "+": if (tss.start + int(fuzzy) >= tran.start) and ( tss.start <= tran.end): if check[0] != 1: stat["with_TSS"] += 1 assign_tss(tss, tran) check[0] = 1 detect = True tss.attributes["detect"] = True else: if (tss.end - int(fuzzy) <= tran.end) and ( tss.end >= tran.start): if check[0] != 1: stat["with_TSS"] += 1 assign_tss(tss, tran) check[0] = 1 detect = True tss.attributes["detect"] = True if not detect: stat["no_TSS"] += 1 tran.attributes["associated_tss"] = "NA" check[1] = 1 else: detect = False tran.attributes = del_attributes(tran, ["TSS_note", "detect"]) tran.attribute_string = ";".join( ["=".join(items) for items in tran.attributes.items()]) if out is not None: out.write("\t".join([str(field) for field in [ tran.seq_id, tran.source, tran.feature, tran.start, tran.end, tran.score, tran.strand, tran.phase, tran.attribute_string + "\n"]])) num_tran += 1 def detect_tas_region(tsss, trans, out, out_tss, fuzzy): stat = {"with_TSS": 0, "no_TSS": 0, "TSS_no_tran": 0, "TSS_with_tran": 0} compare_tran_tss(trans, tsss, fuzzy, stat, out) for tss in tsss: if "Parent" not in tss.attributes.keys(): tss.attributes["Parent"] = "NA" if ("detect" in tss.attributes.keys()): tss.attributes = del_attributes(tss, ["detect"]) tss.attribute_string = ";".join( ["=".join(items) for items in tss.attributes.items()]) stat["TSS_with_tran"] += 1 if out_tss is not None: out_tss.write("\t".join([str(field) for field in [ tss.seq_id, tss.source, tss.feature, tss.start, tss.end, tss.score, tss.strand, tss.phase, tss.attribute_string]]) + "\n") else: stat["TSS_no_tran"] += 1 if out_tss is not None: out_tss.write("\t".join([str(field) for field in [ tss.seq_id, tss.source, tss.feature, tss.start, tss.end, tss.score, tss.strand, tss.phase, tss.attribute_string]]) + "\n") return stat def print_tas_stat(stat, out): total_tran = stat["with_TSS"] + stat["no_TSS"] total_TSS = stat["TSS_no_tran"] + stat["TSS_with_tran"] out.write("\tTranscript starts or overlap with TSS:{0} ({1})\n".format( stat["with_TSS"], float(stat["with_TSS"]) / float(total_tran))) out.write("\tTranscript has no relationship with TSS:{0} ({1})\n".format( stat["no_TSS"], float(stat["no_TSS"]) / float(total_tran))) out.write("\tTSS starts or overlap with transcript:{0} ({1})\n".format( stat["TSS_with_tran"], float(stat["TSS_with_tran"]) / float(total_TSS))) out.write("\tTSS has no relationship with transcript:{0} ({1})\n".format( stat["TSS_no_tran"], float(stat["TSS_no_tran"]) / float(total_TSS))) def read_tas_file(tss_file, ta_file): tsss_uni = {} tas_uni = {} tsss = [] tas = [] tss_f = open(tss_file, "r") ta_f = open(ta_file, "r") pre_seq_id = "" for entry in Gff3Parser().entries(tss_f): entry.attributes = del_attributes(entry, ["Parent", "tran_note"]) if pre_seq_id != entry.seq_id: pre_seq_id = entry.seq_id tsss_uni[entry.seq_id] = [] tsss_uni[entry.seq_id].append(entry) tsss.append(entry) tss_f.close() pre_seq_id = "" for entry in Gff3Parser().entries(ta_f): entry.attributes = del_attributes(entry, ["associated_tss"]) if pre_seq_id != entry.seq_id: pre_seq_id = entry.seq_id tas_uni[entry.seq_id] = [] tas_uni[entry.seq_id].append(entry) tas.append(entry) ta_f.close() tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return tsss_uni, tsss, tas_uni, tas def stat_ta_tss(ta_file, tss_file, stat_file, out_ta_file, out_tss_file, fuzzy): '''statistics for comparison of transcript and TSS''' tsss_uni, tsss, tas_uni, tas = read_tas_file(tss_file, ta_file) out_stat = open(stat_file, "w") out_stat.write("All genomes:\n") out_ta = open(out_ta_file, "w") out_tss = open(out_tss_file, "w") out_ta.write("##gff-version 3\n") out_tss.write("##gff-version 3\n") stats = detect_tas_region(tsss, tas, out_ta, out_tss, fuzzy) print_tas_stat(stats, out_stat) if (len(tsss_uni) > 1) and (len(tas_uni) > 1): for strain_tss in tsss_uni.keys(): for strain_ta in tas_uni.keys(): if strain_tss == strain_ta: out_stat.write(strain_tss + ":\n") sort_tas = sorted(tas_uni[strain_ta], key=lambda k: ( k.seq_id, k.start, k.end, k.strand)) stats = detect_tas_region(tsss_uni[strain_tss], sort_tas, None, None, fuzzy) print_tas_stat(stats, out_stat) out_stat.close() out_ta.close() out_tss.close() def assign_parent(gff, tran, feature): if "Parent" not in gff.attributes.keys(): gff.attributes["Parent"] = tran.attributes["ID"] else: gff.attributes["Parent"] = ( ",".join([gff.attributes["Parent"], tran.attributes["ID"]])) if "_".join(["associated", feature]) not in tran.attributes.keys(): if "locus_tag" in gff.attributes.keys(): tran.attributes["_".join(["associated", feature])] = ( gff.attributes["locus_tag"]) elif "protein_id" in gff.attributes.keys(): tran.attributes["_".join(["associated", feature])] = ( gff.attributes["protein_id"]) elif "Name" in gff.attributes.keys(): tran.attributes["_".join(["associated", feature])] = ( gff.attributes["Name"]) else: strand = Helper().get_strand_name(gff.strand) tran.attributes["_".join(["associated", feature])] = ( "".join([gff.feature, ":", str(gff.start), "-", str(gff.end), "_", strand])) else: if "locus_tag" in gff.attributes.keys(): tran.attributes["_".join(["associated", feature])] = ( ",".join([tran.attributes["_".join(["associated", feature])], gff.attributes["locus_tag"]])) elif "protein_id" in gff.attributes.keys(): tran.attributes["_".join(["associated", feature])] = ( ",".join([tran.attributes["_".join(["associated", feature])], gff.attributes["protein_id"]])) elif "Name" in gff.attributes.keys(): tran.attributes["_".join(["associated", feature])] = ( ",".join([tran.attributes["_".join(["associated", feature])], gff.attributes["Name"]])) else: strand = Helper().get_strand_name(gff.strand) tran.attributes["_".join(["associated", feature])] = ( ",".join([tran.attributes["_".join( ["associated", feature])], "".join( [gff.feature, ":", str(gff.start), "-", str(gff.end), "_", strand])])) def compare_ta_gff(gffs, tran, check, tran_type, detect, stats, c_feature): for gff in gffs: if (gff.feature == c_feature): if (gff.strand == tran.strand) and ( gff.seq_id == tran.seq_id): if (gff.start < tran.start) and ( gff.end > tran.end): if check[0] != 1: stats[tran.seq_id]["bsae"] += 1 stats["All"]["bsae"] += 1 tran_type.append("within") assign_parent(gff, tran, c_feature) detect = True check[0] = 1 elif (gff.start >= tran.start) and ( gff.end <= tran.end): if check[3] != 1: stats[tran.seq_id]["asbe"] += 1 stats["All"]["asbe"] += 1 tran_type.append("cover") assign_parent(gff, tran, c_feature) check[3] = 1 detect = True elif (gff.start >= tran.start) and ( gff.end > tran.end) and ( gff.start < tran.end): if check[1] != 1: stats[tran.seq_id]["asae"] += 1 stats["All"]["asae"] += 1 tran_type.append("left_shift") assign_parent(gff, tran, c_feature) check[1] = 1 detect = True elif (gff.start < tran.start) and ( gff.end <= tran.end) and ( gff.end > tran.start): if check[2] != 1: stats[tran.seq_id]["bsbe"] += 1 stats["All"]["bsbe"] += 1 tran_type.append("right_shift") assign_parent(gff, tran, c_feature) check[2] = 1 detect = True return detect def detect_tag_region(gffs, trans, stats, out_t, out_g, c_feature, region): detect = False for tran in trans: check = [0, 0, 0, 0, 0] tran_type = [] tran_type_string = "" detect = compare_ta_gff(gffs, tran, check, tran_type, detect, stats, c_feature) if not detect: stats[tran.seq_id]["other"] += 1 stats["All"]["other"] += 1 check[4] = 1 tran_type.append("not_related") else: detect = False tran_type_string = ",".join(tran_type) attribute_string = ";".join( ["=".join(items) for items in tran.attributes.items()]) out_t.write("\t".join( [tran.info_without_attributes, attribute_string]) + ";compare_" + c_feature + "=" + tran_type_string + "\n") if region is not None: out_g.write(region.info + "\n") for gff in gffs: attribute_string = ";".join( ["=".join(items) for items in gff.attributes.items()]) out_g.write(gff.info_without_attributes + "\t" + attribute_string + "\n") def detect_express_gene(gffs, c_feature, strain): express_gene = 0 for gff in gffs: if (gff.feature == c_feature) and ( (strain == "all") or ( gff.seq_id == strain)) and ( "Parent" in gff.attributes.keys()): if "tran" in gff.attributes["Parent"].lower(): express_gene += 1 return express_gene def print_tag_stat(stats, out, express_gene, c_feature): total = (stats["bsae"] + stats["bsbe"] + stats["asae"] + stats["asbe"] + stats["other"]) if stats["gene"] != 0: out.write("\t\tTranscript starts before and " "ends after {0}:{1} ({2})\n".format( c_feature, str(stats["asbe"]), str(float(stats["asbe"]) / float(total)))) out.write("\t\tTranscript starts after and " "ends before {0}:{1} ({2})\n".format( c_feature, str(stats["bsae"]), str(float(stats["bsae"]) / float(total)))) out.write("\t\tTranscript starts before and " "ends within {0}:{1} ({2})\n".format( c_feature, str(stats["asae"]), str(float(stats["asae"]) / float(total)))) out.write("\t\tTranscript starts within and " "ends after {0}:{1} ({2})\n".format( c_feature, str(stats["bsbe"]), str(float(stats["bsbe"]) / float(total)))) out.write("\t\tTranscript has no overlap of {0}:{1} ({2})\n".format( c_feature, str(stats["other"]), str(float(stats["other"]) / float(total)))) out.write("\t\tTotal {0}s which have expression:{1} ({2})\n".format( c_feature, str(express_gene), str(float(express_gene) / float(stats["gene"])))) else: out.write("\t\tNo {0} is detected in genome annotation file\n".format( c_feature)) def read_tag_file(gff_file, ta_file, c_feature): region = None gffs = [] tas = [] stats = {} stats["All"] = {"bsae": 0, "bsbe": 0, "asae": 0, "asbe": 0, "other": 0, "gene": 0} pre_seq_id = "" ta_f = open(ta_file, "r") for entry in Gff3Parser().entries(ta_f): if entry.seq_id != pre_seq_id: pre_seq_id = entry.seq_id stats[entry.seq_id] = {"bsae": 0, "bsbe": 0, "asae": 0, "asbe": 0, "other": 0, "gene": 0} entry.attributes = del_attributes(entry, [ "_".join(["associated", c_feature]), "_".join(["compare", c_feature])]) tas.append(entry) ta_f.close() g_f = open(gff_file, "r") for entry in Gff3Parser().entries(g_f): if (entry.feature == c_feature): ori_parents = [] if "Parent" in entry.attributes.keys(): parents = entry.attributes["Parent"].split(",") for parent in parents: if "gene" in parent: ori_parents.append(parent) if len(ori_parents) == 0: entry.attributes = del_attributes(entry, ["Parent"]) else: entry.attributes["Parent"] = ",".join(ori_parents) if entry.seq_id in stats.keys(): stats[entry.seq_id]["gene"] += 1 stats["All"]["gene"] += 1 if (entry.feature.lower() != "region") and ( entry.feature.lower() != "source") and ( entry.feature.lower() != "remark"): gffs.append(entry) else: region = entry g_f.close() tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return gffs, tas, stats, region def stat_ta_gff(ta_file, gff_file, stat_file, out_ta_file, out_gff_file, c_feature): '''statistics for comparison of transcript and genome annotation''' tmp_gff_file = gff_file + "tmp" tmp_ta_file = ta_file + "tmp" shutil.copy(gff_file, tmp_gff_file) shutil.copy(ta_file, tmp_ta_file) out_stat = open(stat_file, "w") for feature in c_feature: og_f = open(out_gff_file, "w") o_f = open(out_ta_file, "w") o_f.write("##gff-version 3\n") og_f.write("##gff-version 3\n") gffs, tas, stats, region = read_tag_file( tmp_gff_file, tmp_ta_file, feature) detect_tag_region(gffs, tas, stats, o_f, og_f, feature, region) express_gene = detect_express_gene(gffs, feature, "all") out_stat.write("For {0}:\n".format(feature)) out_stat.write("\tAll genomes:\n") out_stat.write("\tThe transcript information " "compares with {0}:\n".format(feature)) print_tag_stat(stats["All"], out_stat, express_gene, feature) if len(stats) > 2: for strain in stats.keys(): if strain != "All": express_gene = detect_express_gene(gffs, feature, strain) out_stat.write("\t" + strain + ":\n") out_stat.write("\tThe transcript information " "compares with {0}:\n".format(feature)) print_tag_stat(stats[strain], out_stat, express_gene, feature) og_f.close() o_f.close() shutil.copy(out_gff_file, tmp_gff_file) shutil.copy(out_ta_file, tmp_ta_file) out_stat.close() os.remove(tmp_gff_file) os.remove(tmp_ta_file)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/stat_TA_comparison.py
stat_TA_comparison.py
import csv import numpy as np import os from annogesiclib.gff3 import Gff3Parser from annogesiclib.coverage_detection import coverage_comparison, check_tex, get_repmatch from annogesiclib.lib_reader import read_libs, read_wig def import_data(row): return {"method": "gene_converged", "strain": row[0], "start": int(row[1]), "end": int(row[2]), "name": row[3], "miss": int(row[4]), "loop": int(row[5]), "diff": [], "length": int(row[6]), "r_stem": int(row[7]), "strand": row[8], "l_stem": int(row[9]), "parent_p": row[10], "parent_m": row[11], "ut": int(row[12]), "print": False, "detect_p": False, "detect_m": False, "express": "False"} def compare_ta(terms, tas, fuzzy): '''Compare transcript and terminator to find the expressed terminator''' for term in terms: for ta in tas: start = ta.start - fuzzy end = ta.end + fuzzy if ((ta.seq_id == term["strain"]) and ( ta.strand == term["strand"])): if ((start <= term["start"]) and ( end >= term["start"]) and ( end <= term["end"])) or ( (start >= term["start"]) and ( end <= term["end"])) or ( (start >= term["start"]) and ( start <= term["end"]) and ( end >= term["end"])) or ( (start <= term["start"]) and ( end >= term["end"])): term["express"] = "True" def compare_transtermhp(hps, fr_terms): '''compare two methods - gene converged and TransTermHP''' terms = [] for term in fr_terms: detect = False for hp in hps: if (hp.seq_id == term["strain"]) and ( hp.strand == term["strand"]): if ((hp.start <= term["start"]) and ( hp.end >= term["start"]) and ( hp.end <= term["end"])) or ( (hp.start >= term["start"]) and ( hp.end <= term["end"])) or ( (hp.start >= term["start"]) and ( hp.start <= term["end"]) and ( hp.end >= term["end"])) or ( (hp.start <= term["start"]) and ( hp.end >= term["end"])): if hp.start < term["start"]: term["start"] = hp.start if hp.end > term["end"]: term["end"] = hp.end hp.attributes["print"] = True detect = True if detect: term["method"] = ",".join([term["method"], "TransTermHP"]) terms.append(term) for hp in hps: need_print = False if "print" not in hp.attributes.keys(): need_print = True else: if not hp.attributes["print"]: need_print = True if need_print: if hp.strand == "+": terms.append({"method": "TransTermHP", "strain": hp.seq_id, "start": hp.start, "end": hp.end, "strand": hp.strand, "name": hp.attributes["ID"], "parent_p": hp.attributes["associated_gene"], "print": False, "detect_p": False, "detect_m": False, "express": "False", "diff": []}) else: terms.append({"method": "TransTermHP", "strain": hp.seq_id, "start": hp.start, "end": hp.end, "strand": hp.strand, "name": hp.attributes["ID"], "parent_m": hp.attributes["associated_gene"], "print": False, "detect_p": False, "detect_m": False, "express": "False", "diff": []}) terms = sorted(terms, key=lambda x: (x["strain"], x["start"], x["end"], x["strand"])) return terms def compare_replicates(term_covers, template_texs, cond, args_term): '''check the cutoff of replicate match''' detect_num = 0 term_datas = [] diff_cover = -1 diff = [] detect = False detect_num = check_tex(template_texs, term_covers, term_datas, None, "terminator", None, None, None, None, 0, args_term.tex_notex) if ("texnotex" in cond): tex_rep = get_repmatch(args_term.replicates["tex"], cond) if detect_num >= tex_rep: detect = True elif ("frag" in cond): frag_rep = get_repmatch(args_term.replicates["frag"], cond) if detect_num >= frag_rep: detect = True if detect: detect = False for term in term_datas: if (len(diff) == 0) or (diff_cover < term["diff"]): diff_cover = term["diff"] diff = term return diff_cover, diff, term_datas, detect_num def coverage2term(covers, term, hl_covers, hl_poss, strand, term_covers, track, args_term, start_plus, end_minus, lib_type): '''It is for get the highest and lowest coverage''' first = True pos = 0 for cover in covers: if strand == "+": cover_pos = start_plus + pos else: cover_pos = end_minus - pos if (term["start"] <= cover_pos + args_term.fuzzy) and ( term["end"] >= cover_pos - args_term.fuzzy): first = coverage_comparison(cover, hl_covers, hl_poss, first, strand, cover_pos) else: if (strand == "+") and ( cover_pos > term["end"] + args_term.fuzzy): break elif (strand == "-") and ( cover_pos < term["start"] - args_term.fuzzy): break if (first is not True) and (hl_covers["high"] > 0): if ((hl_covers["low"] / hl_covers["high"]) < args_term.decrease) and ( hl_covers["low"] > -1): term_covers.append({ "track": track, "high": hl_covers["high"], "low": hl_covers["low"], "detect": "True", "diff": (hl_covers["high"] - hl_covers["low"]), "type": lib_type}) break pos += 1 def check_start_and_end(term, args_term, covers): if (term["start"] - args_term.fuzzy - 2) < 0: start = 0 else: start = term["start"] - args_term.fuzzy - 2 if (term["end"] + args_term.fuzzy + 1) > len(covers): end = len(covers) else: end = term["end"] + args_term.fuzzy + 1 return start, end def get_coverage(term, wigs, strand, template_texs, args_term): '''get proper coverage to check the coverage decrease''' hl_poss = {"high": 0, "low": 0} hl_covers = {"high": 0, "low": 0} term_datas = {} detect_nums = {} diff_cover = -1 diff = [] for wig_strain, conds in wigs.items(): if wig_strain == term["strain"]: for cond, tracks in conds.items(): term_covers = [] term_datas[cond] = [] detect_nums[cond] = 0 for lib_name, covers in tracks.items(): track = lib_name.split("|")[-3] lib_strand = lib_name.split("|")[-2] lib_type = lib_name.split("|")[-1] c_start, c_end = check_start_and_end(term, args_term, covers) covers = covers[c_start: c_end] if strand == "-": covers = covers[::-1] coverage2term(covers, term, hl_covers, hl_poss, strand, term_covers, track, args_term, c_start, c_end, lib_type) if len(term_covers) != 0: tmp_cov, tmp_diff, term_datas[cond], detect_nums[cond] = ( compare_replicates(term_covers, template_texs, cond, args_term)) if (diff_cover == -1) or (diff_cover < tmp_cov): diff_cover = tmp_cov diff = tmp_diff detect = False for cond, num in detect_nums.items(): if ("texnotex" in cond): tex_rep = get_repmatch(args_term.replicates["tex"], cond) if num >= tex_rep: detect = True elif ("frag" in cond): frag_rep = get_repmatch(args_term.replicates["frag"], cond) if num >= frag_rep: detect = True if detect: detect = False if strand == "+": term["detect_p"] = True else: term["detect_m"] = True return diff_cover, diff, term_datas, detect_nums def compare_term(term, terms): '''For the terminators which are associated with the gene, it will try to find the best one based on secondary structure''' if len(terms) != 0: for tmp in terms: if term["miss"] < tmp["miss"]: terms = [] terms.append(term) break elif term["miss"] == tmp["miss"]: if ("diff_cover" in term.keys()) and \ ("diff_cover" in tmp.keys()): if (term["diff_cover"] > tmp["diff_cover"]): terms = [] terms.append(term) elif (term["diff_cover"] == tmp["diff_cover"]): if term["ut"] > tmp["ut"]: terms = [] terms.append(term) elif term["ut"] == tmp["ut"]: terms.append(term) break elif term["miss"] > tmp["miss"]: break else: terms.append(term) return terms def first_term(strand, term, detect_terms, detect): '''for the first and latest terminators, we only need to check parent gene for one side of terminator and also need to take care of the terminal end''' if (strand == "+"): if (term["detect_p"]): detect_terms["detect"].append(term) detect = True else: detect_terms["undetect"].append(term) elif (strand == "-"): if (term["detect_m"]): detect_terms["detect"].append(term) detect = True else: detect_terms["undetect"].append(term) return detect def get_attribute_string(num, name, parent, diff, term, coverage, method): attribute_string = ";".join( ["=".join(items) for items in [ ("ID", term["strain"] + "_terminator" + str(num)), ("Name", name), ("associated_gene", parent), ("coverage_decrease", coverage), ("diff_coverage", diff), ("express", term["express"]), ("method", method)]]) return attribute_string def print_table(term, out_t, args_term): '''Print to table''' first = True if (term["express"] == "True") and \ (term["diff_cover"] != -1): if term["diff"]["high"] >= args_term.cutoff_coverage: out_t.write("\tTrue\t") for datas in term["datas"].values(): for data in datas: if first: out_t.write( "{0}(diff={1};high={2};low={3})".format( data["track"], data["diff"], data["high"], data["low"])) first = False else: out_t.write( ";{0}(diff={1};high={2};low={3})".format( data["track"], data["diff"], data["high"], data["low"])) else: out_t.write("{0}(diff={1};high={2};low={3})".format( term["diff"]["track"], term["diff_cover"], term["diff"]["high"], term["diff"]["low"])) else: out_t.write("\tFalse\t") out_t.write("No_coverage_decreasing") elif (term["express"] == "True") and \ (term["diff_cover"] == -1): out_t.write("\tFalse\t") out_t.write("No_coverage_decreasing") elif term["express"] == "False": out_t.write("\tFalse\t") out_t.write("NA") def print2file(num, term, coverage, parent, out, out_t, method, args_term): '''Print to gff file and table''' name = 'terminator_%0*d' % (5, num) if ("detect_num" in term.keys()) and \ (term["diff_cover"] != -1): out_t.write("\t".join([term["strain"], name, str(term["start"]), str(term["end"]), term["strand"], term["method"].replace(",", ";"), parent])) else: out_t.write("\t".join([term["strain"], name, str(term["start"]), str(term["end"]), term["strand"], term["method"].replace(",", ";"), parent])) if (term["express"] == "True") and (term["diff_cover"] != -1): if (term["diff"]["high"] >= args_term.cutoff_coverage): diff = ("{0}(high:{1},low:{2})".format( term["diff"]["track"], term["diff"]["high"], term["diff"]["low"])) attribute_string = get_attribute_string( num, name, parent, diff, term, coverage, method) elif (term["diff"]["high"] < args_term.cutoff_coverage): attribute_string = get_attribute_string( num, name, parent, "NA", term, "No_coverage_decreasing", method) elif (term["express"] == "True") and (term["diff_cover"] == -1): attribute_string = get_attribute_string( num, name, parent, "NA", term, "No_coverage_decreasing", method) elif (term["express"] == "False"): attribute_string = get_attribute_string( num, name, parent, "NA", term, "NA", method) out.write("\t".join([str(field) for field in [ term["strain"], "ANNOgesic", "terminator", str(term["start"]), str(term["end"]), ".", term["strand"], ".", attribute_string]]) + "\n") print_table(term, out_t, args_term) out_t.write("\n") def print_detect_undetect(term, num, out, out_t, detect, args_term): '''For controlling the output of different strand of terminator''' if term["strand"] == "+": print2file(num, term, detect, term["parent_p"], out, out_t, term["method"], args_term) num += 1 else: print2file(num, term, detect, term["parent_m"], out, out_t, term["method"], args_term) num += 1 return num def term_validation(pre_term, term, detect, detect_terms, out, out_t, num, args_term, final_terms): '''Classification of terminators''' if (pre_term["name"] != term["name"]) or (args_term.keep_multi): if detect: final_terms["detect"] = (final_terms["detect"] + detect_terms["detect"]) detect = False else: final_terms["undetect"] = (final_terms["undetect"] + detect_terms["undetect"]) detect_terms["detect"] = [] detect_terms["undetect"] = [] detect = first_term(term["strand"], term, detect_terms, detect) else: if term["strand"] == "+": if (term["detect_p"]): detect = True detect_terms["detect"] = compare_term( term, detect_terms["detect"]) else: if not detect: detect_terms["undetect"] = compare_term( term, detect_terms["undetect"]) else: if (term["detect_m"]): detect = True detect_terms["detect"] = compare_term( term, detect_terms["detect"]) else: if not detect: detect_terms["undetect"] = compare_term( term, detect_terms["undetect"]) return num, detect def print_term(terms, out, out_t, args_term): first = True detect = False detect_terms = {"detect": [], "undetect": []} num = 0 final_terms = {"detect": [], "undetect": []} for term in terms: if first: first = False pre_term = term detect = first_term(term["strand"], term, detect_terms, detect) else: num, detect = term_validation(pre_term, term, detect, detect_terms, out, out_t, num, args_term, final_terms) pre_term = term if detect: final_terms["detect"] = final_terms["detect"] + detect_terms["detect"] else: final_terms["undetect"] = (final_terms["undetect"] + detect_terms["undetect"]) remove_repeat(final_terms["detect"], num, out, out_t, "True", args_term) remove_repeat(final_terms["undetect"], num, out, out_t, "False", args_term) def del_repeat_term(terms): '''delete the repeat terminators''' first = True new_terms = [] for term in terms: detect = False if first: first = False pre_term = term else: if (term["strain"] == pre_term["strain"]) and ( term["strand"] == pre_term["strand"]) and ( term["parent_p"] == pre_term["parent_p"]) and ( term["parent_m"] == pre_term["parent_m"]): if (term["start"] <= pre_term["start"]) and ( term["end"] >= pre_term["start"]) and ( term["end"] <= pre_term["end"]): detect = True pre_term["start"] = term["start"] elif (term["start"] <= pre_term["start"]) and ( term["end"] >= pre_term["end"]): detect = True pre_term["start"] = term["start"] pre_term["end"] = term["end"] elif (term["start"] >= pre_term["start"]) and ( term["end"] <= pre_term["end"]): detect = True elif (term["start"] >= pre_term["start"]) and ( term["start"] <= pre_term["end"]) and ( term["end"] >= pre_term["end"]): detect = True pre_term["end"] = term["end"] if detect: if term["miss"] < pre_term["miss"]: pre_term["miss"] = term["miss"] else: new_terms.append(pre_term) pre_term = term else: new_terms.append(pre_term) pre_term = term if len(terms) != 0: new_terms.append(term) return new_terms def read_data(gff_file, tran_file, tranterm_file, seq_file, term_table): gff_parser = Gff3Parser() gffs = [] tas = [] hps = [] fr_terms = [] seq = {} new_terms = [] for entry in gff_parser.entries(open(gff_file)): if (entry.feature == "gene"): gffs.append(entry) if os.path.exists(tran_file): for entry in gff_parser.entries(open(tran_file)): tas.append(entry) if os.path.exists(tranterm_file): for entry in gff_parser.entries(open(tranterm_file)): hps.append(entry) with open(seq_file, "r") as s_f: for line in s_f: line = line.strip() if line.startswith(">"): strain = line[1:] seq[strain] = "" else: seq[strain] = seq[strain] + line if os.path.exists(term_table): term_f = open(term_table, "r") for row in csv.reader(term_f, delimiter="\t"): fr_terms.append(import_data(row)) new_terms = del_repeat_term(fr_terms) tas = sorted(tas, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) gffs = sorted(gffs, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) hps = sorted(hps, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) return gffs, tas, hps, new_terms, seq def compute_wig(wig_file, libs, terms, strand, texs, args_term): '''Get the coverage information to specific terminator''' wigs = {} wigs = read_wig(wig_file, strand, libs) for term in terms: if (term["strand"] == strand) and \ (term["express"] == "True"): term["diff_cover"], term["diff"], term["datas"], detect_nums = \ get_coverage(term, wigs, strand, texs, args_term) term["detect_num"] = {} for cond, num in detect_nums.items(): term["detect_num"][cond] = str(num) def remove_repeat(terms, num, out, out_t, detect, args_term): finals = [] for term1 in terms: if args_term.keep_multi: num = print_detect_undetect(term1, num, out, out_t, detect, args_term) else: tmp_term = term1 for term2 in terms: if (term1["strain"] == term2["strain"]) and ( term1["strand"] == term2["strand"]): if term1["strand"] == "+": parents1 = set(tmp_term["parent_p"].split(",")) parents2 = set(term2["parent_p"].split(",")) if (parents1.issubset(parents2)): if (parents2.issubset(parents1)): if tmp_term["end"] < term2["end"]: tmp_term = term2 elif (tmp_term["end"] == term2["end"]) and ( tmp_term["start"] > term2["start"]): tmp_term = term2 else: tmp_term = term2 else: parents1 = set(tmp_term["parent_m"].split(",")) parents2 = set(term2["parent_m"].split(",")) if (parents1.issubset(parents2)): if (parents2.issubset(parents1)): if tmp_term["start"] > term2["start"]: tmp_term = term2 elif (tmp_term["start"] == term2["start"]) and ( term1["end"] < term2["end"]): tmp_term = term2 else: tmp_term = term2 if tmp_term not in finals: num = print_detect_undetect(tmp_term, num, out, out_t, detect, args_term) finals.append(tmp_term) return num def detect_coverage(term_table, gff_file, tran_file, seq_file, wig_f_file, wig_r_file, tranterm_file, wig_folder, output_file, output_table, args_term): '''For detecting the coverage of terminator''' gffs, tas, hps, fr_terms, seq = read_data(gff_file, tran_file, tranterm_file, seq_file, term_table) terms = compare_transtermhp(hps, fr_terms) compare_ta(terms, tas, args_term.fuzzy) libs, texs = read_libs(args_term.libs, wig_folder) compute_wig(wig_f_file, libs, terms, "+", texs, args_term) compute_wig(wig_r_file, libs, terms, "-", texs, args_term) out = open(output_file, "w") out_t = open(output_table, "w") print_term(terms, out, out_t, args_term)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/detect_coverage_term.py
detect_coverage_term.py
import os import sys from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper from annogesiclib.parser_wig import WigParser from annogesiclib.gen_TSS_type import compare_tss_cds, fix_primary_type from annogesiclib.lib_reader import read_libs, read_wig def get_upstream(seq, tss, out, name, nt_before): if tss.strand == "+": if (tss.start - nt_before + 1) <= 0: start = 1 else: start = tss.start - nt_before + 1 fasta = Helper().extract_gene(seq, start, tss.start, tss.strand) else: if (tss.start + nt_before - 1) > len(seq): end = len(seq) else: end = tss.start + nt_before - 1 fasta = Helper().extract_gene(seq, tss.start, end, tss.strand) if len(fasta) >= nt_before: out.write("{0}\n{1}\n".format(name, fasta)) def print_fasta(seq, tss, files, name, nt_before): for key in seq.keys(): if tss.seq_id == key: if "Primary" in tss.attributes["type"]: get_upstream(seq[key], tss, files["pri"], name, nt_before) if "Secondary" in tss.attributes["type"]: get_upstream(seq[key], tss, files["sec"], name, nt_before) if "Internal" in tss.attributes["type"]: get_upstream(seq[key], tss, files["inter"], name, nt_before) if "Antisense" in tss.attributes["type"]: get_upstream(seq[key], tss, files["anti"], name, nt_before) if "Orphan" in tss.attributes["type"]: get_upstream(seq[key], tss, files["orph"], name, nt_before) def read_data(tss_file, fasta_file): seq = {} tsss = [] t_f = open(tss_file, "r") for entry in Gff3Parser().entries(t_f): tsss.append(entry) if fasta_file is not None: with open(fasta_file, "r") as f_h: for line in f_h: line = line.strip() if len(line) != 0: if line[0] == ">": seq[line[1:]] = "" seq_id = line[1:] else: seq[seq_id] = seq[seq_id] + line tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return tsss, seq def read_gff(gff_file): cdss = [] genes = [] g_f = open(gff_file, "r") for entry in Gff3Parser().entries(g_f): if (Helper().feature_without_notgene(entry)): cdss.append(entry) if entry.feature == "gene": genes.append(entry) cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return cdss, genes def upstream(tss_file, fasta_file, gff_file, out_class, args_pro, prefix): '''get the upstream sequence of TSS''' if fasta_file is not None: files = {"pri": open("tmp/primary.fa", "w"), "sec": open("tmp/secondary.fa", "w"), "inter": open("tmp/internal.fa", "w"), "anti": open("tmp/antisense.fa", "w"), "orph": open("tmp/orphan.fa", "w")} tsss, seq = read_data(tss_file, fasta_file) num_tss = 0 if not args_pro.source: out = open(out_class, "w") out.write("##gff-version 3\n") cdss, genes = read_gff(gff_file) for tss in tsss: if ("type" not in tss.attributes.keys()) and (args_pro.source): print("Error: The TSS gff file may not generated from ANNOgesic." "Please run with --tss_source!") sys.exit() if args_pro.source: name = ">" + "_".join([str(tss.start), tss.strand, tss.seq_id]) print_fasta(seq, tss, files, name, args_pro.nt_before) else: tss_type = compare_tss_cds(tss, cdss, genes) tss.attributes = tss_type[1] tss.attributes["ID"] = tss.seq_id + "_tss" + str(num_tss) tss.attribute_string = "".join([ tss_type[0], ";ID=", tss.seq_id, "_tss", str(num_tss)]) num_tss += 1 if not args_pro.source: if args_pro.tex_wigs is not None: libs, texs = read_libs(args_pro.input_libs, args_pro.tex_wigs) wigs_f = read_wig(os.path.join( args_pro.wig_path, prefix + "_forward.wig"), "+", libs) wigs_r = read_wig(os.path.join( args_pro.wig_path, prefix + "_reverse.wig"), "+", libs) else: wigs_f = None wigs_r = None sort_tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) final_tsss = fix_primary_type(sort_tsss, wigs_f, wigs_r) for tss in final_tsss: name = ">" + "_".join([str(tss.start), tss.strand, tss.seq_id]) tss.attribute_string = ";".join( ["=".join(items) for items in tss.attributes.items()]) out.write("\t".join([str(field) for field in [ tss.seq_id, tss.source, tss.feature, tss.start, tss.end, tss.score, tss.strand, tss.phase, tss.attribute_string]]) + "\n") if fasta_file is not None: print_fasta(seq, tss, files, name, args_pro.nt_before) def del_repeat_fasta(input_file, out_file): data = {} check_same = False first_file = True out = open(out_file, "w") with open(input_file, "r") as f_h: for line in f_h: line = line.strip() if line[0] == ">": if check_same: check_same = False if first_file: seq_id = line[1:] first_file = False data[seq_id] = "" else: if line[1:] in data.keys(): check_same = True else: seq_id = line[1:] data[seq_id] = "" else: if check_same: pass else: data[seq_id] = data[seq_id] + line for strain, fasta in data.items(): out.write(">" + strain + "\n") out.write(fasta + "\n")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/TSS_upstream.py
TSS_upstream.py
import os import shutil from annogesiclib.gff3 import Gff3Parser def get_overlap(anno, source, finals, overlaps, detect, out): if (anno.source in source) and ( anno not in overlaps): finals.append(anno) detect = True return detect def deal_overlap(out_folder, source): gffs = {} num = 0 for gff_file in os.listdir(out_folder): if gff_file.endswith(".gff"): gff_f = open(os.path.join(out_folder, gff_file), "r") for entry in Gff3Parser().entries(gff_f): if entry.feature not in gffs.keys(): gffs[entry.feature] = [] gffs[entry.feature].append(entry) gff_f.close() out = open(os.path.join(out_folder, gff_file + "tmp"), "w") finals = [] overlaps = [] for feature, annos in gffs.items(): for anno1 in annos: detect = False for anno2 in annos: if (anno1.seq_id == anno2.seq_id) and ( anno1.strand == anno2.strand) and ( anno1 != anno2) and ( anno1.feature == anno2.feature) and ( anno1.source != anno2.source): if ((anno1.start <= anno2.start) and ( anno1.end >= anno2.end)) or ( (anno1.start >= anno2.start) and ( anno1.end <= anno2.end)) or ( (anno1.start <= anno2.start) and ( anno1.end <= anno2.end) and ( anno1.end >= anno2.start)) or ( (anno1.start >= anno2.start) and ( anno1.start <= anno2.end) and ( anno1.end >= anno2.end)): detect = get_overlap(anno1, source, finals, overlaps, detect, out) detect = get_overlap(anno2, source, finals, overlaps, detect, out) if detect: overlaps.append(anno1) overlaps.append(anno2) if (not detect) and (anno1 not in overlaps): finals.append(anno1) finals = sorted(finals, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) for final in finals: if (final.feature == "region") or ( final.feature == "source") or ( final.feature == "remark"): out.write(final.info + "\n") break for final in finals: if (final.feature != "region") and ( final.feature != "source"): out.write(final.info + "\n") out.close() shutil.move(os.path.join(out_folder, gff_file + "tmp"), os.path.join(out_folder, gff_file))
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/overlap.py
overlap.py
import os import sys import copy from annogesiclib.gff3 import Gff3Parser from annogesiclib.lib_reader import read_wig, read_libs from annogesiclib.plot_coverage_table import plot_table def read_data(gff, features): gffs = {} stats = {} outs = {} for entry in Gff3Parser().entries(open(gff)): for feature in features: if feature not in gffs.keys(): gffs[feature] = [] stats[feature] = {} outs[feature] = {"all": [], "least_one": [], "none": []} if entry.feature == feature: gffs[feature].append(entry) for feature in gffs.keys(): gffs[feature] = sorted(gffs[feature], key=lambda k: ( k.seq_id, k.start, k.end, k.strand)) return gffs, stats, outs def set_cutoff(cond, percent_tex, percent_frag, detects, gff): if ("tex" in cond) or ("notex" in cond): cutoffs = percent_tex.split("_") elif ("frag" in cond): cutoffs = percent_frag.split("_") if cutoffs[0] == "p": cutoff_percent = float(cutoffs[1]) diff = detects["express"] / (gff.end - gff.start + 1) elif cutoffs[0] == "n": cutoff_percent = float(cutoffs[1]) diff = detects["express"] elif cutoffs[0] == "all": cutoff_percent = 0 diff = detects["express"] else: print("Error: Please assign the valid cutoff_overlap!!") return diff, cutoff_percent def detect_express(wigs, gff, cutoff_coverage, detects, percent_tex, percent_frag, texs, cond, tex_notex, track, plots, cover_type, name): total = 0 high = 0 for wig in wigs[(gff.start - 1): gff.end]: total = wig["coverage"] + total if wig["coverage"] > high: high = wig["coverage"] if wig["coverage"] >= cutoff_coverage: detects["express"] += 1 if cover_type == "average": plots[cond][name] = float(total) / float(gff.end - gff.start + 1) elif cover_type == "high": plots[cond][name] = high else: print("Error: The coverage_type is not correct!!!") sys.exit() diff, cutoff_percent = set_cutoff(cond, percent_tex, percent_frag, detects, gff) if (diff >= float(cutoff_percent)) and (diff != 0): if ("tex" in cond) or ("notex" in cond): for key, num in texs.items(): if track in key: texs[key] += 1 if texs[key] == tex_notex: detects["track"] += 1 elif "frag" in cond: detects["track"] += 1 def compare_wigs(wigs, gff, tex_notex, template_texs, replicates, stats, outs, plots, cover_type, cutoff_coverage, percent_tex, percent_frag): detects = {"cond": 0, "track": 0, "import": False, "express": 0} texs = copy.deepcopy(template_texs) for strain, conds in wigs.items(): if gff.seq_id == strain: detects["cond"] = 0 num_conds = 0 for cond, tracks in conds.items(): num_conds += 1 plots[cond] = {} if cond not in stats[strain].keys(): stats[strain][cond] = 0 if cond not in stats["total"].keys(): stats["total"][cond] = 0 if cond not in outs.keys(): outs[cond] = [] detects["track"] = 0 for track, wigs in tracks.items(): name = track plots[cond][name] = 0 detects["express"] = 0 detect_express(wigs, gff, cutoff_coverage, detects, percent_tex, percent_frag, texs, cond, tex_notex, track, plots, cover_type, name) if ("tex" in cond) or ("notex" in cond): if detects["track"] >= replicates["tex"]: detects["import"] = True elif ("frag" in cond): if detects["track"] >= replicates["frag"]: detects["import"] = True if detects["import"]: detects["import"] = False stats["total"][cond] += 1 stats[strain][cond] += 1 outs[cond].append(gff) detects["cond"] += 1 if detects["cond"] == 0: stats["total"]["none"] += 1 stats[strain]["none"] += 1 outs["none"].append(gff) if detects["cond"] == num_conds: stats["total"]["all"] += 1 stats[strain]["all"] += 1 outs["all"].append(gff) if (detects["cond"] <= num_conds) and ( detects["cond"] > 0): stats["total"]["least_one"] += 1 stats[strain]["least_one"] += 1 outs["least_one"].append(gff) def print_stat(out, stats): out.write("\t".join(["total input:", str(stats["total"])]) + "\n") for cond, num in stats.items(): if cond == "least_one": tag = "expression at lease one condition:" elif cond == "all": tag = "expression at all conditions:" elif cond == "none": tag = "no expression:" else: tag = "condition " + cond + ":" if cond != "total": per = "(" + str(float(num) / float(stats["total"])) + ")" out.write("\t".join([tag, " ".join([str(num), per])]) + "\n") def output_stat(stats, stat_folder, prefix): for feature, strains in stats.items(): out = open(os.path.join(stat_folder, "_".join([prefix, feature + ".csv"])), "w") if len(strains.keys()) > 2: out.write("All strain:\n") print_stat(out, strains["total"]) for strain, stat in strains.items(): if strain != "total": out.write(strain + ":\n") print_stat(out, stat) out.close() def output_gff(outs, out_gff_folder, prefix): for feature, conds in outs.items(): for cond, gffs in conds.items(): if cond == "least_one": out = open(os.path.join( out_gff_folder, "_".join([ prefix, feature, "at_least_one_lib.gff"])), "w") elif cond == "all": out = open(os.path.join( out_gff_folder, "_".join([ prefix, feature, "all_libs.gff"])), "w") elif cond == "none": out = open(os.path.join( out_gff_folder, "_".join([ prefix, feature, "no_express.gff"])), "w") else: out = open(os.path.join( out_gff_folder, "_".join([ prefix, feature, cond + ".gff"])), "w") out.write("##gff-version 3\n") for gff in gffs: out.write(gff.info + "\n") out.close() def deal_repeat_tag(gff, plots, feature, repeat, tag, tags): if (gff.attributes[tag] in tags) and ( gff.attributes[tag] not in repeat.keys()): plots[feature].append({gff.attributes[tag] + "_2": {}}) repeat[gff.attributes[tag]] = 2 name = gff.attributes[tag] + "_2" elif (gff.attributes[tag] in tags) and ( gff.attributes[tag] in repeat.keys()): plots[feature].append({"_".join([gff.attributes[tag], str(repeat[gff.attributes[tag]] + 1)]): {}}) name = "_".join([gff.attributes[tag], str(repeat[gff.attributes[tag]] + 1)]) repeat[gff.attributes[tag]] += 1 else: plots[feature].append({gff.attributes[tag]: {}}) name = gff.attributes[tag] return name def get_name(plots, gff, feature, repeat, tags): name = "".join([gff.feature, ":", str(gff.start), "-", str(gff.end), "_", gff.strand]) if feature == "gene": if "locus_tag" in gff.attributes.keys(): name = deal_repeat_tag(gff, plots, feature, repeat, "locus_tag", tags) else: plots[feature].append({name: {}}) elif feature == "CDS": if "locus_tag" in gff.attributes.keys(): name = deal_repeat_tag(gff, plots, feature, repeat, "locus_tag", tags) elif "protein_id" in gff.attributes.keys(): name = deal_repeat_tag(gff, plots, feature, repeat, "protein_id", tags) else: plots[feature].append({name: {}}) else: plots[feature].append({name: {}}) tags.append(name) return name def plot(plots, stat_folder, max_color, min_color, cover_type): for feature in plots: plot_table(plots[feature], max_color, min_color, os.path.join(stat_folder, "_".join([ feature, cover_type, "express_analysis.png"]))) def gene_expression(input_libs, gff_folder, percent_tex, percent_frag, wig_f_file, wig_r_file, features, wigs, cutoff_coverage, tex_notex, replicates, stat_folder, out_gff_folder, cover_type, max_color, min_color): print("Loading wiggle file...") libs, texs = read_libs(input_libs, wigs) wig_fs = read_wig(wig_f_file, "+", libs) wig_rs = read_wig(wig_r_file, "-", libs) plots = {} repeat = {} for gff in os.listdir(gff_folder): if gff.endswith(".gff"): prefix = gff.replace(".gff", "") print("Computing " + prefix) gff_list, stats, outs = read_data(os.path.join(gff_folder, gff), features) for feature, gffs in gff_list.items(): plots[feature] = [] repeat[feature] = {} tags = [] stats[feature]["total"] = {"total": 0, "least_one": 0, "all": 0, "none": 0} num = 0 for gff in gffs: if gff.seq_id not in stats[feature].keys(): stats[feature][gff.seq_id] = { "total": 0, "least_one": 0, "all": 0, "none": 0} stats[feature]["total"]["total"] += 1 stats[feature][gff.seq_id]["total"] += 1 name = get_name(plots, gff, feature, repeat[feature], tags) if gff.strand == "+": compare_wigs( wig_fs, gff, tex_notex, texs, replicates, stats[feature], outs[feature], plots[feature][num][name], cover_type, cutoff_coverage, percent_tex, percent_frag) elif gff.strand == "-": compare_wigs( wig_rs, gff, tex_notex, texs, replicates, stats[feature], outs[feature], plots[feature][num][name], cover_type, cutoff_coverage, percent_tex, percent_frag) num += 1 output_stat(stats, stat_folder, prefix) output_gff(outs, out_gff_folder, prefix) plot(plots, stat_folder, max_color, min_color, cover_type)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gene_express_analysis.py
gene_express_analysis.py
import math from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def import_to_operon(start, end, strand): return {"start": start, "end": end, "strand": strand} def get_gene_info(cds): if "locus_tag" in cds.attributes.keys(): feature = cds.attributes["locus_tag"] else: strand = Helper().get_strand_name(cds.strand) feature = "".join([cds.feature, ":", str(cds.start), "-", str(cds.end), "_", strand]) return feature def get_term_feature(ta, data, term_fuzzy, features, datas, ta_check_point, data_check_start, data_check_end): '''verify and get proper terminator to operon''' jump = False if (ta.strand == data.strand) and ( ta.seq_id == data.seq_id) and ( (math.fabs(data.start - ta_check_point) <= term_fuzzy) or ( math.fabs(data.end - ta_check_point) <= term_fuzzy) or ( (ta_check_point >= data.start) and ( ta_check_point <= data.end))): features["detect"] = True if (ta.strand == data.strand) and ( ta.seq_id == data.seq_id): if (ta.start <= data_check_start) and ( ta.end >= data_check_end): features["num"] += 1 datas.append(data) elif (ta_check_point >= data.start) and ( ta_check_point <= data.end): features["num"] += 1 datas.append(data) if (ta.seq_id == data.seq_id) and ( data.start - term_fuzzy > ta.end): jump = True return jump def get_tss_feature(ta, data, features, tss_fuzzy, datas, ta_check_point, data_check_start, data_check_end): '''verify and get the proper TSS for operon''' jump = False if (ta.strand == data.strand) and ( ta.seq_id == data.seq_id) and ( math.fabs(ta_check_point - data.start) <= tss_fuzzy): features["detect"] = True if (ta.strand == data.strand) and ( ta.seq_id == data.seq_id) and ( ta.start <= data_check_start) and ( ta.end >= data_check_end): features["num"] += 1 datas.append(data) if (ta.seq_id == data.seq_id) and ( data_check_end > ta.end): jump = True return jump def detect_features(ta, inputs, feature, term_fuzzy, tss_fuzzy): '''Detect the feature which should group as a operon''' features = {"num": 0, "detect": False} datas = [] for data in inputs: if (feature == "term"): if ta.strand == "+": jump_term = get_term_feature(ta, data, term_fuzzy, features, datas, ta.end, data.start, data.start - term_fuzzy) elif ta.strand == "-": jump_term = get_term_feature(ta, data, term_fuzzy, features, datas, ta.start, data.end + term_fuzzy, data.end) if jump_term: break elif (feature == "tss"): if ta.strand == "+": jump_tss = get_tss_feature(ta, data, features, tss_fuzzy, datas, ta.start, data.start + tss_fuzzy, data.end) elif ta.strand == "-": jump_tss = get_tss_feature(ta, data, features, tss_fuzzy, datas, ta.end, data.end, data.start - tss_fuzzy) if jump_tss: break else: if feature == "gene": if (ta.strand == data.strand) and ( ta.seq_id == data.seq_id) and ( data.feature == "gene"): if ((ta.start <= data.start) and ( ta.end >= data.end)) or ( (ta.start >= data.start) and ( ta.end <= data.end)) or ( (ta.start >= data.start) and ( ta.start <= data.end) and ( ta.end >= data.end)) or ( (ta.start <= data.start) and ( ta.end <= data.end) and ( ta.end >= data.start)): features["num"] += 1 features["detect"] = True datas.append(data) return {"data_list": datas, "num_feature": features["num"], "with_feature": features["detect"]} def check_conflict(genes, pos, strand): '''check TSS which is not primary or secondary TSS''' conflict = False for gene in genes["data_list"]: if (gene.strand == strand): if (gene.start < pos) and ( gene.end >= pos): conflict = True break return conflict def check_gene(tsss, genes, strand, ta_pos, first, min_length, end, operons, operon_pos): '''Check TSS and annotated feature. It can group the feature and TSS to be operon or sub-operon''' no_count_tsss = [] for tss in tsss: if tss not in no_count_tsss: end_points = [ta_pos] for pos in tsss: if (pos not in no_count_tsss) and ( tss.start != pos.start): end_points.append(pos.start) end_points.append(end) if tss.strand == "+": end_points.sort() else: end_points.sort(reverse=True) for point in end_points: detect_pos = False if tss.strand == "+": for gene in genes["data_list"]: if (gene.seq_id == tss.seq_id) and ( gene.strand == tss.strand): if (gene.start >= tss.start) and ( gene.end <= point): detect_pos = True break else: for gene in genes["data_list"]: if (gene.seq_id == tss.seq_id) and ( gene.strand == tss.strand): if (gene.start >= point) and ( gene.end <= tss.start): detect_pos = True break if not detect_pos: no_count_tsss.append(tss) else: operon_pos, first = compute_sub_operon( strand, point, ta_pos, first, min_length, end, operons, operon_pos) break def sub_operon_gene_conflict(tsss, strand, genes, ta_pos, first, min_length, end, operons, operon_pos): '''remove the TSS which is not primary or secondary TSS of gene This TSS can not form sub-operon''' new_tsss = [] for tss in tsss["data_list"]: conflict = check_conflict(genes, tss.start, strand) if not conflict: new_tsss.append(tss) check_gene(new_tsss, genes, strand, ta_pos, first, min_length, end, operons, operon_pos) def sub_operon(strand, tsss, ta_pos, end, genes, min_length): '''verify the sub-operon''' first = True operons = [] operon_pos = ta_pos if tsss["with_feature"]: if tsss["num_feature"] == 1: pass else: sub_operon_gene_conflict( tsss, strand, genes, ta_pos, first, min_length, end, operons, operon_pos) else: sub_operon_gene_conflict( tsss, strand, genes, ta_pos, first, min_length, end, operons, operon_pos) return operons def compute_sub_operon(strand, point, ta_pos, first, min_length, end, operons, operon_pos): '''For computting and import the sub-operon''' if first: operon_pos = ta_pos first = False if math.fabs(point - operon_pos) > min_length: if strand == "+": operons.append(import_to_operon(operon_pos, point - 1, strand)) operon_pos = point else: operons.append(import_to_operon(point + 1, operon_pos, strand)) operon_pos = point return operon_pos, first def read_gff(ta_file, gff_file, tss_file, terminator_file): tas = [] gffs = [] tss_gffs = [] term_gffs = [] gff_parser = Gff3Parser() for ta in gff_parser.entries(open(ta_file)): tas.append(ta) for entry in gff_parser.entries(open(gff_file)): gffs.append(entry) if tss_file is not False: for entry in gff_parser.entries(open(tss_file)): tss_gffs.append(entry) tss_gffs = sorted(tss_gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) if terminator_file is not False: for entry in gff_parser.entries(open(terminator_file)): term_gffs.append(entry) term_gffs = sorted(term_gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return tas, gffs, tss_gffs, term_gffs def print_file(ta, operons, out, operon_id, whole_operon, tsss, terms, genes, whole_gene, out_g): attribute_string = ";".join( ["=".join(items) for items in [ ("ID", "_".join([ta.seq_id, operon_id.replace("_", "")])), ("Name", operon_id), ("associated_gene", ",".join(whole_gene))]]) out_g.write("{0}\tANNOgesic\toperon\t{1}" "\t{2}\t.\t{3}\t.\t{4}\n".format( ta.seq_id, str(whole_operon.start), str(whole_operon.end), whole_operon.strand, attribute_string)) if len(operons) <= 1: out.write("\t".join([operon_id, ta.seq_id, "-".join([str(whole_operon.start), str(whole_operon.end)]), whole_operon.strand, "0", "NA", str(tsss["with_feature"]), str(tsss["num_feature"]), str(terms["with_feature"]), str(terms["num_feature"]), "NA", str(genes["num_feature"]), "NA", ", ".join(whole_gene)]) + "\n") else: for sub in operons: sub_gene = [] num_sub_gene = 0 for gene in genes["data_list"]: if (sub["strand"] == gene.strand) and ( sub["start"] <= gene.start) and ( sub["end"] >= gene.end): if "locus_tag" in gene.attributes.keys(): sub_gene.append(gene.attributes["locus_tag"]) else: sub_gene.append("".join([gene.feature, ":", str(gene.start), "-", str(gene.end), "_", gene.strand])) num_sub_gene += 1 if num_sub_gene == 0: sub_gene.append("NA") out.write("\t".join([operon_id, ta.seq_id, "-".join([str(whole_operon.start), str(whole_operon.end)]), whole_operon.strand, str(len(operons)), "-".join([str(sub["start"]), str(sub["end"])]), str(tsss["with_feature"]), str(tsss["num_feature"]), str(terms["with_feature"]), str(terms["num_feature"]), str(num_sub_gene), str(genes["num_feature"]), ", ".join(sub_gene), ", ".join(whole_gene)]) + "\n") def operon(ta_file, tss_file, gff_file, terminator_file, tss_fuzzy, term_fuzzy, min_length, out_file, out_gff): '''main part for detection of operon''' out = open(out_file, "w") out_g = open(out_gff, "w") out_g.write("##gff-version 3\n") out.write("Operon_ID\tGenome\tOperon_position\tStrand\t") out.write("Number_of_suboperon\tPosition_of_suboperon\tStart_with_TSS\t") out.write("Number_of_TSS\tTerminated_with_terminator\t") out.write("Number_of_terminator\tNumber_of_gene_associated_suboperon\t") out.write("Number_of_gene_associated_operon\t") out.write("Associated_genes_with_suboperon\t") out.write("Associated_genes_with_whole_operon\n") num_operon = 0 tas, gffs, tss_gffs, term_gffs = read_gff(ta_file, gff_file, tss_file, terminator_file) for ta in tas: whole_gene = [] check_operon = False if (math.fabs(ta.start - ta.end) >= min_length): whole_operon = ta check_operon = True genes = detect_features(ta, gffs, "gene", term_fuzzy, tss_fuzzy) if len(tss_gffs) != 0: tsss = detect_features(ta, tss_gffs, "tss", term_fuzzy, tss_fuzzy) else: tsss = {"with_feature": "NA", "num_feature": "NA"} if terminator_file is None: terms = {"with_feature": "NA", "num_feature": "NA"} else: terms = detect_features(ta, term_gffs, "term", term_fuzzy, tss_fuzzy) if len(tss_gffs) != 0: if ta.strand == "+": operons = sub_operon(ta.strand, tsss, ta.start, ta.end, genes, min_length) else: operons = sub_operon(ta.strand, tsss, ta.end, ta.start, genes, min_length) else: operons = [{"start": ta.start, "end": ta.end, "strand": ta.strand}] if genes["num_feature"] != 0: for gene in genes["data_list"]: whole_gene.append(get_gene_info(gene)) else: whole_gene.append("NA") if check_operon: if whole_gene != ["NA"]: operon_id = "Operon" + str(num_operon) num_operon += 1 if len(tss_gffs) != 0: print_file(ta, operons, out, operon_id, whole_operon, tsss, terms, genes, whole_gene, out_g) else: print_file(ta, operons, out, operon_id, ta, tsss, terms, genes, whole_gene, out_g) out.close() out_g.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/detect_operon.py
detect_operon.py
import os import sys from annogesiclib.gen_screenshots import gen_screenshot from annogesiclib.helper import Helper class Screen(object): '''generation of screenshot''' def __init__(self, args_sc, out_folder): self.helper = Helper() args_sc.output_folder = out_folder filename = args_sc.fasta.split("/")[-1] self.strain = ".".join(filename.split(".")[0:-1]) self.helper.check_make_folder(os.path.join(args_sc.output_folder, self.strain)) self.forward_file = os.path.join(args_sc.output_folder, self.strain, "forward") self.reverse_file = os.path.join(args_sc.output_folder, self.strain, "reverse") os.mkdir(self.forward_file) os.mkdir(self.reverse_file) def _import_libs(self, texs, strand, lib_dict): if strand == "+": tex = "ft" notex = "fn" else: tex = "rt" notex = "rn" for flib in texs: if (flib[1] == "tex"): lib_dict[tex].append(flib[0]) for nlib in texs: if (nlib[1] == "notex") and \ (flib[2] == nlib[2]) and \ (flib[3] == nlib[3]): lib_dict[notex].append(nlib[0]) def screenshot(self, args_sc, log): lib_dict = {"ft": [], "fn": [], "rt": [], "rn": [], "ff": [], "rf": []} f_texs = [] r_texs = [] if args_sc.tlibs is not None: for lib in args_sc.tlibs: lib_datas = lib.split(":") if not lib_datas[0].endswith(".wig"): log.write("Wiggle files should end with .wig.\n") print("Error: Wiggle files should end with .wig!") sys.exit() else: if lib_datas[-1] == "+": f_texs.append(lib_datas) else: r_texs.append(lib_datas) f_texs = sorted(f_texs, key=lambda x: (x[1], x[2], x[3])) r_texs = sorted(r_texs, key=lambda x: (x[1], x[2], x[3])) self._import_libs(f_texs, "+", lib_dict) self._import_libs(r_texs, "-", lib_dict) if args_sc.flibs is not None: for lib in args_sc.flibs: lib_datas = lib.split(":") if not lib_datas[0].endswith(".wig"): log.write("Wiggle files should end with .wig.\n") print("Error: Wiggle files should end with .wig!") sys.exit() else: if lib_datas[-1] == "+": lib_dict["ff"].append(lib_datas[0]) else: lib_dict["rf"].append(lib_datas[0]) log.write("Running gen_screenshots.py to generate IGV batch script.\n") gen_screenshot(args_sc, lib_dict, self.forward_file + ".txt", self.reverse_file + ".txt", self.strain) log.write("\t" + self.forward_file + ".txt is generated.\n") log.write("\t" + self.reverse_file + ".txt is generated.\n") if (args_sc.tlibs is None) and (args_sc.flibs is None): log.write("No wig files can be found.\n") print("Error: There is no wig file assigned!") sys.exit()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/screen.py
screen.py
import os import sys import csv import shutil from subprocess import call from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.format_fixer import FormatFixer from annogesiclib.extract_psortb import extract_psortb from annogesiclib.stat_sublocal import stat_sublocal from annogesiclib.gff3 import Gff3Parser class SubLocal(object): '''detection of subcellular localization''' def __init__(self, args_sub): self.multiparser = Multiparser() self.helper = Helper() self.fixer = FormatFixer() self.gff_path = os.path.join(args_sub.gffs, "tmp") self.fasta_path = os.path.join(args_sub.fastas, "tmp") if args_sub.trans is not None: self.tran_path = os.path.join(args_sub.trans, "tmp") else: self.tran_path = None self.out_all = os.path.join(args_sub.out_folder, "all_CDSs") self.out_express = os.path.join(args_sub.out_folder, "expressed_CDSs") self.all_tmp_path = os.path.join(self.out_all, "tmp") self.express_tmp_path = os.path.join(self.out_express, "tmp") self.all_stat_path = os.path.join(self.out_all, "statistics") self.express_stat_path = os.path.join(self.out_express, "statistics") self.all_tmp_result = os.path.join(self.out_all, "tmp_results") self.express_tmp_result = os.path.join(self.out_express, "tmp_results") self.all_result = os.path.join(self.out_all, "psortb_results") self.express_result = os.path.join(self.out_express, "psortb_results") self.endfix_table = "table.csv" self.endfix_raw = "raw.txt" self._make_folder() def _make_folder(self): self.helper.check_make_folder(self.out_all) self.helper.check_make_folder(self.out_express) self.helper.check_make_folder(self.all_stat_path) self.helper.check_make_folder(self.express_stat_path) self.helper.check_make_folder(self.all_result) self.helper.check_make_folder(self.express_result) def _compare_cds_tran(self, gff_file, tran_file, log): '''compare CDS and transcript to find the expressed CDS''' log.write("Comparing transcripts and CDSs to get expressed CDSs.\n") out = open(os.path.join(self.out_all, "tmp_cds.gff"), "w") cdss = [] fh = open(gff_file) th = open(tran_file) for entry in Gff3Parser().entries(fh): if entry.feature == "CDS": cdss.append(entry) trans = [] for entry in Gff3Parser().entries(th): trans.append(entry) for cds in cdss: for ta in trans: if (cds.strand == ta.strand) and ( cds.seq_id == ta.seq_id): if ((cds.end < ta.end) and ( cds.end > ta.start) and ( cds.start <= ta.start)) or ( (cds.start > ta.start) and ( cds.start < ta.end) and ( cds.end >= ta.end)) or ( (cds.end >= ta.end) and ( cds.start <= ta.start)) or ( (cds.end <= ta.end) and ( cds.start >= ta.start)): out.write(cds.info + "\n") break fh.close() th.close() out.close() log.write("\t" + os.path.join(self.out_all, "tmp_cds.gff") + " is " "temporary generated.\n") def _get_protein_seq(self, gff, tmp_path, tran_path, args_sub, log): prefix = gff.replace(".gff", "") fasta = self.helper.get_correct_file(self.fasta_path, ".fa", prefix, None, None) dna_seq_file = os.path.join(tmp_path, "_".join([prefix, "dna.fa"])) print("Generating CDS fasta files of {0}".format(prefix)) if tran_path is not None: tran_file = os.path.join(tran_path, "_".join([ prefix, "transcript.gff"])) if (os.path.exists(tran_file)): log.write("Predicting subcellular localization for expressed " "CDSs for {0}.\n".format(prefix)) self._compare_cds_tran(os.path.join(self.gff_path, gff), tran_file, log) log.write("Running helper.py to extract sequences for CDSs.\n") self.helper.get_cds_seq(os.path.join(self.out_all, "tmp_cds.gff"), fasta, dna_seq_file) os.remove(os.path.join(self.out_all, "tmp_cds.gff")) else: log.write("Predicting subcellular localization for all CDSs for " "{0}.\n".format(prefix)) log.write("Running helper.py to extract sequences for CDSs.\n") self.helper.get_cds_seq(os.path.join(self.gff_path, gff), fasta, dna_seq_file) log.write("\t" + dna_seq_file + " is generated.\n") print("Transfering DNA sequences to protein sequence of {0}".format( prefix)) log.write("Running helper.py to translate DNA sequences to Protein " "sequences.\n") tmp_file = os.path.join(args_sub.out_folder, "tmp") if os.path.exists(dna_seq_file): self.helper.translation(dna_seq_file, tmp_file) prot_seq_file = os.path.join( tmp_path, "_".join([prefix, "protein.fa"])) self.fixer.fix_emboss(tmp_file, prot_seq_file) log.write(prot_seq_file + " is generated.\n") os.remove(tmp_file) return prefix def _psortb(self, psortb_path, strain_type, prot_seq_file, out_raw, out_err, log): log.write(" ".join([psortb_path, strain_type, prot_seq_file]) + "\n") call([psortb_path, strain_type, prot_seq_file], stdout=out_raw, stderr=out_err) def _run_psortb(self, args_sub, prefix, out_folder, tmp_path, tmp_result, log): print("Running psortb of {0}".format(prefix)) log.write("Running Psortb for predict subcellular localization for " "{0}.\n".format(prefix)) out_err = open(os.path.join(out_folder, "tmp_log"), "w") out_raw = open(os.path.join(tmp_result, "_".join([prefix, self.endfix_raw])), "w") prot_seq_file = os.path.join(tmp_path, "_".join([prefix, "protein.fa"])) if args_sub.gram == "positive": self._psortb(args_sub.psortb_path, "-p", prot_seq_file, out_raw, out_err, log) elif args_sub.gram == "negative": self._psortb(args_sub.psortb_path, "-n", prot_seq_file, out_raw, out_err, log) else: log.write("Please assign \"positive\" or \"negative\" to " "--bacteria_type.\n") print("Error: {0} is not a proper bacteria type! " "Please assign positive or negative.".format( args_sub.gram)) sys.exit() log.write("\t" + os.path.join(tmp_result, "_".join([ prefix, self.endfix_raw])) + " is temporary generated.\n") out_err.close() out_raw.close() def _extract_result(self, args_sub, tmp_psortb_path, prefix, gff_file, log): '''extract the result of psortb''' log.write("Running extract_psortb.py to extract the information of " "localization.\n") extract_psortb(os.path.join( tmp_psortb_path, "_".join([prefix, self.endfix_raw])), os.path.join(tmp_psortb_path, "_".join([ prefix, self.endfix_table])), None, None, args_sub.fuzzy) log.write("\t" + os.path.join(tmp_psortb_path, "_".join([ prefix, self.endfix_table])) + " is tempoaray generated.\n") def _remove_header(self, out_all): out = open(out_all + "_tmp", "w") fh = open(out_all, "r") out.write("\t".join(["#Genome", "Protein", "Strand", "Start", "End", "Location", "Score"]) + "\n") for row in csv.reader(fh, delimiter='\t'): if row[0] != "#Genome": out.write("\t".join(row) + "\n") out.close() fh.close() shutil.move(out_all + "_tmp", out_all) def _merge_and_stat(self, gffs, tmp_psortb_path, stat_path, psortb_result, log): for folder in os.listdir(gffs): if folder.endswith(".gff_folder"): prefix = folder.replace(".gff_folder", "") self.helper.check_make_folder( os.path.join(psortb_result, prefix)) merge_table = os.path.join( psortb_result, prefix, "_".join([prefix, self.endfix_table])) for gff in os.listdir(os.path.join(gffs, folder)): result = self.helper.get_correct_file( tmp_psortb_path, "_" + self.endfix_raw, gff.replace(".gff", ""), None, None) shutil.copy(result, os.path.join(psortb_result, prefix)) result = self.helper.get_correct_file( tmp_psortb_path, "_" + self.endfix_table, gff.replace(".gff", ""), None, None) self.helper.merge_file(result, merge_table) log.write("\t" + merge_table + "\n") self._remove_header(merge_table) self.helper.check_make_folder(os.path.join(stat_path, prefix)) stat_folder = os.path.join(stat_path, prefix) stat_file = os.path.join(stat_folder, "_".join([ "stat", prefix, "sublocal.csv"])) stat_sublocal(merge_table, os.path.join(stat_folder, prefix), stat_file) for file_ in os.listdir(stat_folder): log.write("\t" + os.path.join(stat_folder, file_) + "\n") def _remove_tmps(self, args_sub): self.helper.remove_tmp_dir(args_sub.fastas) self.helper.remove_tmp_dir(args_sub.gffs) self.helper.remove_all_content(args_sub.out_folder, "tmp", "dir") self.helper.remove_all_content(self.out_all, "tmp", "dir") self.helper.remove_all_content(self.out_express, "tmp", "dir") os.remove(os.path.join(self.out_all, "tmp_log")) if args_sub.trans is not None: os.remove(os.path.join(self.out_express, "tmp_log")) self.helper.remove_tmp_dir(args_sub.trans) def run_sub_local(self, args_sub, log): for gff in os.listdir(args_sub.gffs): if gff.endswith(".gff"): self.helper.check_uni_attributes(os.path.join( args_sub.gffs, gff)) self.multiparser.parser_gff(args_sub.gffs, None) self.multiparser.parser_fasta(args_sub.fastas) if args_sub.trans is not None: self.multiparser.parser_gff(args_sub.trans, "transcript") self.helper.check_make_folder(self.express_tmp_path) self.helper.check_make_folder(self.express_tmp_result) self.helper.check_make_folder(self.all_tmp_path) self.helper.check_make_folder(self.all_tmp_result) for gff in os.listdir(self.gff_path): if args_sub.trans is not None: print("Running expressed genes now") prefix = self._get_protein_seq(gff, self.express_tmp_path, self.tran_path, args_sub, log) self._run_psortb(args_sub, prefix, self.out_express, self.express_tmp_path, self.express_tmp_result, log) self._extract_result(args_sub, self.express_tmp_result, prefix, os.path.join(self.gff_path, gff), log) print("Running all genes now") prefix = self._get_protein_seq(gff, self.all_tmp_path, None, args_sub, log) self._run_psortb(args_sub, prefix, self.out_all, self.all_tmp_path, self.all_tmp_result, log) self._extract_result(args_sub, self.all_tmp_result, prefix, os.path.join(self.gff_path, gff), log) log.write("Running stat_sublocal.py to do statistics, generate " "merged tables, and plot figures.\n") log.write("The following files are generated:\n") self._merge_and_stat(args_sub.gffs, self.all_tmp_result, self.all_stat_path, self.all_result, log) if args_sub.trans is not None: self._merge_and_stat(args_sub.gffs, self.express_tmp_result, self.express_stat_path, self.express_result, log) self._remove_tmps(args_sub)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sublocal.py
sublocal.py
import os import sys import copy import shutil import csv import re from Bio.Seq import Seq from annogesiclib.gff3 import Gff3Parser class Helper(object): '''For some small and regular modules for ANNOgesic''' def __init__(self): self.gff3parser = Gff3Parser() def feature_without_notgene(self, entry): if (entry.feature != "gene") and ( entry.feature != "exon") and ( entry.feature != "source") and ( entry.feature != "region") and ( entry.feature != "repeat_region") and ( entry.feature != "transcript") and ( entry.feature != "STS") and ( entry.feature != "remark"): utr_markers = r'[^\'\ \-35]' for sub_f in entry.feature.lower().split("utr"): match = (re.search(utr_markers, sub_f)) if match is None: pass else: return True return False def get_strand_name(self, strand): '''change the strand name to f/r''' name = "" if strand == "+": name = "f" else: name = "r" return name def _fix_break_line(self, tar, prefix): '''fix the break line at the end of file''' tmp_out = open("tmp_file", "w") first = True with open(tar) as fh: for line in fh: line = line.strip() if (prefix == ">"): if (prefix in line) and (first): first = False elif (prefix in line) and (not first) and ( not line.startswith(prefix)): line = line.replace(prefix, "\n" + prefix) else: row = line.split("\t") if (len(row) > 9): for strain in prefix: if strain in line: line = line.replace(strain, "\n" + strain) break tmp_out.write(line + "\n") tmp_out.close() os.remove(tar) shutil.move("tmp_file", tar) def merge_file(self, ref, tar): '''merge two files''' os.system(" ".join(["cat", ref, ">>", tar])) if tar.endswith(".fa"): self._fix_break_line(tar, ">") elif tar.endswith(".gff"): strains = [] fh = open(ref, "r") for row in csv.reader(fh, delimiter='\t'): if row[0] not in strains: strains.append(row[0]) fh.close() self._fix_break_line(tar, strains) def merge_blast_out(self, ref, tar): tmp_out = tar + "_tmp" out = open(tmp_out, "w") file_num = 0 for file_ in (ref, tar): start = False finish = False with open(file_) as fh: for line in fh: check_line = line.strip() if (check_line.startswith("Query")): start = True if (not start) and (not finish) and (file_num == 0): out.write(line) if start and (not check_line.startswith("Database")): out.write(line) if (check_line.startswith("Database")) and start: if file_num == 1: out.write(line) else: start = False finish = True file_num += 1 os.remove(tar) os.remove(ref) shutil.move(tmp_out, tar) def remove_all_content(self, folder, feature, data_type): '''remove all files in one folder''' for file_ in os.listdir(folder): remove = False if feature is None: remove = True else: if feature in file_: remove = True if remove: target = os.path.join(folder, file_) if (data_type == "file") and os.path.isfile(target): os.remove(target) elif (data_type == "dir") and os.path.isdir(target): shutil.rmtree(target) def move_all_content(self, ref_folder, tar_folder, features): '''move all files form one folder to another one''' for file_ in os.listdir(ref_folder): move = False if (features is not None): move = True for feature in features: if (feature not in file_): move = False elif (features is None): move = True if move: shutil.move(os.path.join(ref_folder, file_), os.path.join(tar_folder, file_)) def remove_tmp(self, folder): '''remove the tmp folder''' if folder: if os.path.isdir(os.path.join(folder, "tmp")): shutil.rmtree(os.path.join(folder, "tmp")) self.remove_all_content(folder, "_folder", "dir") def remove_tmp_dir(self, folder): if folder is not None: if os.path.isdir(folder): shutil.rmtree(folder) def remove_tmp_dir_ori(self, ori_folder, folder, types): if folder is not None: if os.path.isdir(folder): for file_ in os.listdir(ori_folder): for type_ in types: ori_file = os.path.join(ori_folder, file_) if (file_.endswith(type_)) and ( os.path.isfile(ori_file)): shutil.move(ori_file, ori_file + "_old") for file_ in os.listdir(folder): if os.path.isfile(os.path.join(folder, file_)): shutil.move(os.path.join(folder, file_), ori_folder) shutil.rmtree(folder) def remove_wigs(self, wigs): '''remove the merge wig folder which is generated by ANNOgesic''' if wigs: folder = wigs.split("/") folder = "/".join(folder[:-1]) if os.path.isdir(os.path.join(folder, "merge_wigs")): shutil.rmtree(os.path.join(folder, "merge_wigs")) self.remove_tmp(wigs) def get_correct_file(self, datas, feature, prefix, for_wig_type, libs): '''get the correct file by comparing the strain name''' detect = False for data in os.listdir(datas): if os.path.isfile(os.path.join(datas, data)): if for_wig_type is None: if feature in data: file_ = data[:-1 * len(feature)] if prefix == file_: detect = True return os.path.join(datas, data) else: filename = data.split("_STRAIN_") if ("reverse" in data) and ("forward" in data): print("Error: Assign reverse or forward wigs!!!") sys.exit() elif (prefix == filename[-1][:-1 * len(feature)]): if (for_wig_type == "forward"): for lib in libs: if (filename[0] in lib) and (lib[-1] == "+"): return os.path.join(datas, data) if (for_wig_type == "reverse"): for lib in libs: if (filename[0] in lib) and (lib[-1] == "-"): return os.path.join(datas, data) if detect: detect = False else: print("Warning: No proper file - " + prefix + feature) return None def check_make_folder(self, folder): '''make new folder (if the folder exists, it will remove it and create new one)''' path = os.path.dirname(folder) gen_folder = os.path.basename(folder) if gen_folder in os.listdir(path): shutil.rmtree(os.path.join(path, gen_folder)) os.mkdir(os.path.join(path, gen_folder)) def sort_gff(self, gff_file, out_file): gffs = [] g_f = open(gff_file, "r") for entry in self.gff3parser.entries(g_f): gffs.append(entry) g_f.close() sort_gffs = sorted(gffs, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) out = open(out_file, "w") out.write("##gff-version 3\n") for gff in sort_gffs: out.write("\t".join([str(field) for field in [ gff.seq_id, gff.source, gff.feature, gff.start, gff.end, gff.score, gff.strand, gff.phase, gff.attribute_string]]) + "\n") out.close() def extract_gene(self, seq, start, end, strand): '''extract gene seqence''' fasta = '' if strand == "+": return seq[(int(start)-1):int(end)] else: rev_seq = seq[(int(start)-1):int(end)] fasta = self._reverse_seq(rev_seq) return fasta def _reverse_seq(self, rev_seq): '''deal with the reverse strand''' fasta = "" for base in rev_seq[::-1]: if base.upper() == 'A': fasta = fasta + 'T' elif base.upper() == 'T': fasta = fasta + 'A' elif base.upper() == 'C': fasta = fasta + 'G' elif base.upper() == 'G': fasta = fasta + 'C' return fasta def _add_element(self, list_, type_, gff): if type_ in gff.attributes.keys(): list_.add(gff.attributes[type_]) def check_uni_attributes(self, gff_file): '''check the attributes of gff filee, ie ID has to be unique''' print("Checking gff file of {0}".format(gff_file.split("/")[-1])) gffs = [] fh = open(gff_file) lens = {} for entry in self.gff3parser.entries(fh): if (entry.feature == "source") or ( entry.feature == "region") or ( entry.feature == "remark"): if entry.seq_id not in lens.keys(): lens[entry.seq_id] = entry.end else: if entry.end > lens[entry.seq_id]: lens[entry.seq_id] = entry.end print("Warning: Detect repeated source/region/remark of {0}! " "The longest end point is used as the length of the genome.".format( entry.seq_id)) gffs.append(entry) gffs = sorted(gffs, key=lambda x: (x.seq_id, x.start, x.end, x.strand)) first = True ids = set() locus_tags = set() pre_gff = None for gff in gffs: if (gff.feature != "source") and ( gff.feature != "region") and ( gff.feature != "remark"): if gff.seq_id in lens.keys(): if (gff.end > lens[gff.seq_id]): name = "".join([gff.feature, ":", str(gff.start), "-", str(gff.end), "_", gff.strand]) print("Warning: The end point of " + name + " is longer than the length of whole genome.") if "ID" not in gff.attributes.keys(): gene_name = (gff.feature + ":" + str(gff.start) + "-" + str(gff.end) + "_" + gff.strand) if (gff.feature != "exon") or (gff.feature != "Exon"): print("Warning: {0} contains no ID information, " "it may cause error!".format(gene_name)) if first: first = False self._add_element(ids, "ID", gff) self._add_element(locus_tags, "locus_tag", gff) else: if gff.seq_id == pre_gff.seq_id: if "ID" in gff.attributes.keys(): if gff.attributes["ID"] in ids: print("Warning: Repeat ID {0} " "in gff file!!!".format( gff.attributes["ID"])) else: self._add_element(ids, "ID", gff) if "locus_tag" in gff.attributes.keys(): if gff.attributes["locus_tag"] in ids: print("Warning: Repeat locus_tag {0} " "in gff file!!!".format( gff.attributes["locus_tag"])) else: self._add_element(locus_tags, "locus_tag", gff) pre_gff = copy.copy(gff) fh.close() def _read_fasta(self, fasta_file): seq_ids = [] seqs = [] seq = "" with open(fasta_file, "r") as seq_f: for line in seq_f: line = line.strip() if line.startswith(">"): if seq != "": seqs.append(seq) seq_ids.append(line[1:]) seq = "" else: seq = seq + line seqs.append(seq) return seq_ids, seqs def get_seq(self, gff_file, fasta_file, out_file): gff_f = open(gff_file, "r") out = open(out_file, "w") seq_ids, seqs = self._read_fasta(fasta_file) num = 0 pre_id = "" for entry in self.gff3parser.entries(gff_f): if entry.seq_id != pre_id: pre_id = entry.seq_id id_num = 0 for seq_id in seq_ids: if seq_id == entry.seq_id: break else: id_num += 1 gene = self.extract_gene(seqs[id_num], entry.start, entry.end, entry.strand) if "ID" in entry.attributes.keys(): id_ = entry.attributes["ID"] else: id_ = entry.feature + str(num) out.write(">{0}|{1}|{2}|{3}|{4}\n{5}\n".format( id_, entry.seq_id, entry.start, entry.end, entry.strand, gene)) num += 1 gff_f.close() out.close() def get_cds_seq(self, gff_file, fasta_file, out_file): seq_ids, seqs = self._read_fasta(fasta_file) out = open(out_file, "w") cdss = [] gh = open(gff_file) for entry in self.gff3parser.entries(gh): if entry.feature == "CDS": cdss.append(entry) cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for entry in cdss: for seq_id, seq in zip(seq_ids, seqs): if seq_id == entry.seq_id: cds = self.extract_gene(seq, entry.start, entry.end, entry.strand) if "protein_id" in entry.attributes.keys(): protein_id = entry.attributes["protein_id"] elif "locus_tag" in entry.attributes.keys(): protein_id = entry.attributes["locus_tag"] else: protein_id = entry.attributes["ID"] out.write("_".join([">" + entry.seq_id, "_" + protein_id, entry.strand, str(entry.start), str(entry.end)]) + "\n") out.write(cds + "\n") out.close() gh.close() def translation(self, dna_file, protein_file): '''translate the DNA to residues''' out = open(protein_file, "w") with open(dna_file) as d_h: for seq in d_h: seq = seq.strip() if seq.startswith(">"): out.write(seq + "\n") else: dna = Seq(seq) out.write(str(dna.translate()) + "\n") out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/helper.py
helper.py
from annogesiclib.gff3 import Gff3Parser def print_stat(feature, num, out): if num["_".join(["all", feature])] != 0: out.write("The number of {0} which is start " "from TSS: {1} ({2})\n".format( feature, num[feature], float(num[feature]) / float( num["_".join(["all", feature])]))) else: out.write("The number of {0} which is start " "from TSS: {1} ({2})\n".format( feature, num[feature], "NA")) def read_gff(gff_file, tss_file): tsss = [] gffs = [] gff_parser = Gff3Parser() fh = open(gff_file) for gff in gff_parser.entries(fh): gffs.append(gff) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh.close() tss_f = open(tss_file, "r") for tss in gff_parser.entries(tss_f): tsss.append(tss) tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) tss_f.close() return gffs, tsss def compare_tss(tsss, gff, utr_length, num_all, num_strain, program): detect = False for tss in tsss: length = utr_length if (gff.feature == "CDS") or ( gff.feature == "rRNA") or ( gff.feature == "tRNA"): if (gff.seq_id == tss.seq_id) and ( gff.start < tss.start) and ( gff.strand == "+") and (tss.strand == "+"): break elif (gff.seq_id == tss.seq_id) and ( gff.end < tss.start - utr_length) and ( gff.strand == "-") and (tss.strand == "-"): break if (gff.seq_id == tss.seq_id) and ( gff.strand == "+") and (tss.strand == "+") and ( gff.start - tss.start <= utr_length) and ( gff.start - tss.start >= 0): detect = True if (gff.start - tss.start) <= length: start = tss length = (gff.start - tss.start) elif (gff.seq_id == tss.seq_id) and ( gff.strand == "-") and (tss.strand == "-") and ( tss.start - gff.end <= utr_length) and ( tss.start - gff.end >= 0): detect = True if (tss.start - gff.end) <= length: start = tss length = (tss.start - gff.end) if program == "tss": type_ = "TSS" elif program == "processing": type_ = "Cleavage" if detect: detect = False gff.attributes["start_" + type_] = ( type_ + "_" + str(start.start) + start.strand) if gff.feature == "CDS": num_all["cds"] += 1 num_strain[gff.seq_id]["cds"] += 1 elif gff.feature == "tRNA": num_all["tRNA"] += 1 num_strain[gff.seq_id]["tRNA"] += 1 elif gff.feature == "rRNA": num_all["rRNA"] += 1 num_strain[gff.seq_id]["rRNA"] += 1 def print_file(gffs, out_cds_file, stat_file, num_all, num_strain): out_cds = open(out_cds_file, "w") out_cds.write("##gff-version 3\n") for gff in gffs: attribute_string = ";".join( ["=".join(items) for items in gff.attributes.items()]) out_cds.write("\t".join([str(field) for field in [ gff.seq_id, gff.source, gff.feature, gff.start, gff.end, gff.score, gff.strand, gff.phase, attribute_string]]) + "\n") out = open(stat_file, "w") out.write("All genomes:\n") print_stat("cds", num_all, out) print_stat("tRNA", num_all, out) print_stat("rRNA", num_all, out) if len(num_strain) > 1: for strain in num_strain.keys(): out.write(strain + ":\n") print_stat("cds", num_strain[strain], out) print_stat("tRNA", num_strain[strain], out) print_stat("rRNA", num_strain[strain], out) out_cds.close() out.close() def validate_gff(tss_file, gff_file, stat_file, out_cds_file, utr_length, program): num_all = {"all_cds": 0, "all_tRNA": 0, "all_rRNA": 0, "cds": 0, "tRNA": 0, "rRNA": 0} num_strain = {} pre_seq_id = "" gffs, tsss = read_gff(gff_file, tss_file) for gff in gffs: if gff.seq_id != pre_seq_id: num_strain[gff.seq_id] = {"all_cds": 0, "all_tRNA": 0, "all_rRNA": 0, "cds": 0, "tRNA": 0, "rRNA": 0} pre_seq_id = gff.seq_id if gff.feature == "CDS": num_all["all_cds"] += 1 num_strain[gff.seq_id]["all_cds"] += 1 elif gff.feature == "tRNA": num_all["all_tRNA"] += 1 num_strain[gff.seq_id]["all_tRNA"] += 1 elif gff.feature == "rRNA": num_all["all_rRNA"] += 1 num_strain[gff.seq_id]["all_rRNA"] += 1 compare_tss(tsss, gff, utr_length, num_all, num_strain, program) print_file(gffs, out_cds_file, stat_file, num_all, num_strain)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/validate_gene.py
validate_gene.py
import os import sys from annogesiclib.helper import Helper from annogesiclib.multiparser import Multiparser from annogesiclib.optimize_TSSpredator import optimization def get_length(fasta_file): length = 0 with open(fasta_file) as fh: for line in fh: line = line.strip() if not line.startswith(">"): length = length + len(line) return length def optimize_tss(args_ops, log): if len(os.listdir(args_ops.gffs)) == 0: print("Error: There is no gff file!") sys.exit() if len(os.listdir(args_ops.fastas)) == 0: print("Error: There is no fasta file!") sys.exit() if len(os.listdir(args_ops.wigs)) == 0: print("Error: There is no wiggle file!") sys.exit() Multiparser().parser_wig(args_ops.wigs) Multiparser().parser_gff(args_ops.gffs, None) Multiparser().parser_fasta(args_ops.fastas) Multiparser().parser_gff(args_ops.manuals, None) gff_path = os.path.join(args_ops.gffs, "tmp") wig_path = os.path.join(args_ops.wigs, "tmp") fasta_path = os.path.join(args_ops.fastas, "tmp") manual_path = os.path.join(args_ops.manuals, "tmp") if "all" not in args_ops.strain_lengths.keys(): for strain in args_ops.strain_lengths.keys(): detect = False for man in os.listdir(manual_path): if strain == man.replace(".gff", ""): detect = True log.write("The manually-curated set is found - " "{0}\n".format( os.path.join(manual_path, man))) if not detect: log.write("The manually-curated set of {0} is not found.\n" .format(strain)) print("Error: There are genomes in --genome_lengths " "which is not contained in manually-detected " "TSS gff files!") sys.exit() for man in os.listdir(manual_path): run = False prefix = man.replace(".gff", "") man_file = os.path.join(manual_path, man) if (prefix in args_ops.strain_lengths.keys()): length = args_ops.strain_lengths[prefix] run = True elif("all" in args_ops.strain_lengths.keys()): length = "all" run = True log.write("The comparing sequence region of {0} is ".format( prefix, length)) if run: gff_file = None for gff in os.listdir(gff_path): if (gff[:-4] == prefix) and (".gff" in gff): gff_file = os.path.join(gff_path, gff) break for fa in os.listdir(fasta_path): if (".".join(fa.split(".")[:-1]) == prefix) and ( ".fa" in fa): fasta_file = os.path.join(fasta_path, fa) break if length == "all": length = get_length(fasta_file) log.write(str(length) + "\n") if gff_file is None: print("Error: No corresponding genome annotation gff file " "of {0} can be found!".format(prefix)) sys.exit() Helper().check_uni_attributes(gff_file) log.write("Running optimize_TSSpredator.py for optimization.\n") optimization(wig_path, fasta_file, gff_file, args_ops, man_file, length, prefix, log) Helper().remove_all_content(os.path.join( args_ops.output_folder, "optimized_TSSpredator"), "config", "file") Helper().remove_all_content(os.path.join( args_ops.output_folder, "optimized_TSSpredator"), "Master", "dir") Helper().remove_tmp_dir(args_ops.wigs) Helper().remove_tmp_dir(args_ops.gffs) Helper().remove_tmp_dir(args_ops.fastas) Helper().remove_tmp_dir(args_ops.manuals)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/optimize.py
optimize.py
from annogesiclib.gff3 import Gff3Parser def modify_position(frag, norm): '''get proper position, we choose the long one''' if frag.end < norm.end: frag.end = norm.end if frag.start > norm.start: frag.start = norm.start norm.attributes["print"] = True frag.attributes["print"] = True def print_file(data, out, name, num): attributes = {} attributes["ID"] = data.seq_id + "_transcript" + str(num) attributes["Name"] = "transcript_" + name attributes["detect_lib"] = data.attributes["detect_lib"] attribute_string = ";".join(["=".join(items) for items in attributes.items()]) out.write("\t".join([str(field) for field in [ data.seq_id, data.source, data.feature, data.start, data.end, data.score, data.strand, data.phase, attribute_string]]) + "\n") def store(data, source, finals): data.attributes["detect_lib"] = source data.attributes["print"] = False finals.append(data) def compare(data1, data2, overlap, tolerance): '''search the sRNA which can be detected in frag and tex libs. Then, try to merge them to be a longer one''' if (data1.seq_id == data2.seq_id) and (data1.strand == data2.strand): if (data1.start <= (data2.end + tolerance)) and ( data1.start >= data2.start) and ( data1.end >= (data2.end + tolerance)): modify_position(data1, data2) overlap = True elif (data1.end >= (data2.start - tolerance)) and ( data1.end <= data2.end) and ( data1.start <= (data2.start - tolerance)): modify_position(data1, data2) overlap = True elif (data1.start <= data2.start) and ( data1.end >= data2.end): modify_position(data1, data2) overlap = True elif (data2.start <= data1.start) and ( data2.end >= data1.end): modify_position(data1, data2) overlap = True return overlap def combine(frag_file, tex_file, tolerance, output_file): '''merge the results of sRNA which detected by fragmented and dRNA''' frags = [] norms = [] finals = [] out = open(output_file, "w") out.write("##gff-version 3\n") f_h = open(frag_file, "r") for entry in Gff3Parser().entries(f_h): entry.attributes["print"] = False frags.append(entry) f_h.close() n_h = open(tex_file, "r") for entry in Gff3Parser().entries(n_h): entry.attributes["print"] = False norms.append(entry) n_h.close() sort_frags = sorted(frags, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) sort_norms = sorted(norms, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for frag in sort_frags: overlap = False for norm in sort_norms: overlap = compare(frag, norm, overlap, tolerance) if overlap: store(frag, "fragmented,tex_notex", finals) else: store(frag, "fragmented", finals) for norm in sort_norms: if not norm.attributes["print"]: store(norm, "tex_notex", finals) sort_finals = sorted(finals, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) num = 0 for tar in sort_finals: if tar.attributes["print"]: continue overlap = False for ref in sort_finals: overlap = compare(tar, ref, overlap, tolerance) name = '%0*d' % (5, num) print_file(tar, out, name, num) num += 1 out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/combine_frag_tex.py
combine_frag_tex.py
import os import csv import shutil from annogesiclib.gff3 import Gff3Parser def del_attributes(entry, features): attributes = {} for key, value in entry.attributes.items(): if (key not in features): attributes[key] = value return attributes def comparing(ta, ter, fuzzy_down_ta, fuzzy_up_ta, stats): '''main part for comparing terminator and transcript''' if (ta.seq_id == ter.seq_id) and ( ta.strand == ter.strand): if ta.strand == "+": if ((ta.end >= ter.start) and ( ta.end <= ter.end)) or ( (ta.end <= ter.start) and ( (ter.start - ta.end) <= fuzzy_down_ta)) or ( (ta.end >= ter.end) and ( (ta.end - ter.end) <= fuzzy_up_ta)) or ( (ta.start <= ter.start) and ( ta.end >= ter.end)): if ter.attributes["Parent"] == "NA": stats[ta.seq_id]["overlap"] += 1 ta.attributes["associated_term"] = ( "terminator:" + str(ter.start) + "-" + str(ter.end) + "_" + ter.strand) if "ID" in ta.attributes.keys(): ter.attributes["Parent"] = ta.attributes["ID"] else: ter.attributes["Parent"] = ( "transcript:" + str(ta.start) + "-" + str(ta.end) + "_" + ta.strand) else: if ((ta.start >= ter.start) and ( ta.start <= ter.end)) or ( (ta.start <= ter.start) and ( (ter.start - ta.start) <= fuzzy_up_ta)) or ( (ta.start >= ter.end) and ( (ta.start - ter.end) <= fuzzy_down_ta)) or ( (ta.start <= ter.start) and ( ta.end >= ter.end)): if ter.attributes["Parent"] == "NA": stats[ta.seq_id]["overlap"] += 1 ta.attributes["associated_term"] = ( "terminator:" + str(ter.start) + "-" + str(ter.end) + "_" + ter.strand) if "ID" in ta.attributes.keys(): ter.attributes["Parent"] = ta.attributes["ID"] else: ter.attributes["Parent"] = ( "transcript:" + str(ta.start) + "-" + str(ta.end) + "_" + ta.strand) def output_term(ters, term_file, type_, term_outfolder): out = open(term_file + "tmp", "w") out.write("##gff-version 3\n") for ter in ters: attribute_string = ";".join( ["=".join(items) for items in ter.attributes.items()]) out.write("\t".join([ter.info_without_attributes, attribute_string]) + "\n") out.close() os.remove(term_file) filename = term_file.split("/")[-1] if filename in os.listdir(term_outfolder): os.remove(os.path.join(term_outfolder, filename)) shutil.copy(term_file + "tmp", term_file) shutil.move(term_file + "tmp", os.path.join(term_outfolder, filename)) if type_ == "terminator": table_file = term_file.replace("/gffs/", "/tables/") table_file = table_file.replace(".gff", ".csv") out_t = open(table_file + "tmp", "w") out_t.write("\t".join(["Genome", "Name", "Start", "End", "Strand", "Detect", "Associated_gene", "Associated_transcript", "Coverage_decrease", "Coverage_detail"]) + "\n") fh = open(table_file, "r") for row in csv.reader(fh, delimiter='\t'): if row[0] != "genome": for ter in ters: if (row[0] == ter.seq_id) and ( row[2] == str(ter.start)) and ( row[3] == str(ter.end)) and ( row[4] == ter.strand): out_t.write("\t".join([row[0], row[1], row[2], row[3], row[4], row[5], row[6], ter.attributes["Parent"], row[7], row[8]]) + "\n") break fh.close() out_t.close() os.remove(table_file) shutil.move(table_file + "tmp", table_file) def read_gff(filename, index): gf = open(filename, "r") gff_parser = Gff3Parser() datas = [] for entry in gff_parser.entries(gf): entry.attributes[index] = "NA" datas.append(entry) datas = sorted(datas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) gf.close() return datas def compare_term_tran(trans, terms, fuzzy_up_ta, fuzzy_down_ta, out_folder, type_, term_outfolder, tran_outfolder): '''Comparison of terminator and transcript. It can realise the relationship of terminator and transcript''' for tran in os.listdir(trans): if tran.endswith("_transcript.gff"): prefix = tran.replace("_transcript.gff", "") out_g = open(os.path.join(trans, tran) + "tmp", "w") out_g.write("##gff-version 3\n") tas = read_gff(os.path.join(trans, tran), "associated_term") ters = read_gff(os.path.join(terms, prefix + "_term.gff"), "Parent") stats = {} pre_seq = "" for ta in tas: if ta.seq_id != pre_seq: stats[ta.seq_id] = {"all_tran": 0, "all_term": 0, "overlap": 0} pre_seq = ta.seq_id new_term = True stats[ta.seq_id]["all_tran"] += 1 for ter in ters: if new_term: stats[ta.seq_id]["all_term"] += 1 comparing(ta, ter, fuzzy_down_ta, fuzzy_up_ta, stats) if new_term: new_term = False attribute_string = ";".join( ["=".join(items) for items in ta.attributes.items()]) out_g.write("\t".join([ta.info_without_attributes, attribute_string]) + "\n") os.remove(os.path.join(trans, tran)) if tran in os.listdir(tran_outfolder): os.remove(os.path.join(tran_outfolder, tran)) shutil.copy(os.path.join(trans, tran) + "tmp", os.path.join(tran_outfolder, tran)) shutil.move(os.path.join(trans, tran) + "tmp", os.path.join(trans, tran)) output_term(ters, os.path.join(terms, prefix + "_term.gff"), type_, term_outfolder) out = open(os.path.join(out_folder, "statistics/stat_compare_transcript_terminator_" + prefix + ".csv"), "w") for strain, stat in stats.items(): out.write(strain + ":\n") out.write("\tThe overlap between transcripts " "and terminators are {0}\n".format( stat["overlap"])) out.write("\tThe overlap percentage of transcripts are {0}\n".format( float(stat["overlap"])/float(stat["all_tran"]))) out.write("\tThe overlap percentage of terminators are {0}\n".format( float(stat["overlap"])/float(stat["all_term"]))) out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/compare_tran_term.py
compare_tran_term.py
import os import shutil import sys import time from subprocess import Popen, call from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.potential_target import potential_target from annogesiclib.format_fixer import FormatFixer from annogesiclib.merge_rnaplex_rnaup import merge_srna_target from annogesiclib.gff3 import Gff3Parser class sRNATargetPrediction(object): '''detection of sRNA-target interaction''' def __init__(self, args_tar): self.multiparser = Multiparser() self.helper = Helper() self.fixer = FormatFixer() self.gff_parser = Gff3Parser() self.target_seq_path = os.path.join(args_tar.out_folder, "target_seqs") self.srna_seq_path = os.path.join(args_tar.out_folder, "sRNA_seqs") self.rnaplex_path = os.path.join(args_tar.out_folder, "RNAplex_results") self.rnaup_path = os.path.join(args_tar.out_folder, "RNAup_results") self.intarna_path = os.path.join(args_tar.out_folder, "IntaRNA_results") self.merge_path = os.path.join(args_tar.out_folder, "merged_results") self.srna_path = os.path.join(args_tar.srnas, "tmp") self.fasta_path = os.path.join(args_tar.fastas, "tmp") self.gff_path = os.path.join(args_tar.gffs, "tmp") self.tmps = {"tmp": "tmp_srna_target", "rnaup": "tmp_rnaup", "log": "tmp_log", "all_fa": "tmp*.fa", "all_txt": "tmp*.txt"} def _check_gff(self, gffs): for gff in os.listdir(gffs): if gff.endswith(".gff"): self.helper.check_uni_attributes(os.path.join(gffs, gff)) def _check_long_id(self, seq_file, long_ids, type_): out_file = seq_file + "_tmp.fa" out = open(out_file, "w") with open(seq_file) as f_h: for line in f_h: line = line.strip() if line.startswith(">"): if len(line) > 40: long_ids[type_].append(line[1:]) out.write(">TMP" + type_ + "_" + str(len(long_ids[type_])) + "\n") else: out.write(line + "\n") else: out.write(line + "\n") out.close() return out_file def _run_rnaplfold(self, rnaplfold_path, file_type, win_size, span, unstr_region, long_ids, seq_path, prefix, out_path, log): current = os.getcwd() os.chdir(out_path) command = " ".join([rnaplfold_path, "-W", str(win_size), "-L", str(span), "-u", str(unstr_region), "-O"]) if file_type == "sRNA": srna_seq_file = os.path.join(current, seq_path, "_".join([self.tmps["tmp"], prefix, file_type + ".fa"])) out_file = self._check_long_id(srna_seq_file, long_ids, "srna") log.write("<".join([command, out_file]) + "\n") os.system("<".join([command, out_file])) else: tar_seq_file = os.path.join(current, seq_path, "_".join([prefix, file_type + ".fa"])) for tar_seq_file in os.listdir(os.path.join(current, seq_path)): if (prefix + "_" + file_type + "_") in tar_seq_file: out_file = self._check_long_id(os.path.join( current, seq_path, tar_seq_file), long_ids, "tar") log.write("<".join([command, out_file]) + "\n") os.system("<".join([command, out_file])) os.chdir(current) def _wait_process(self, processes): for p in processes: p.wait() if p.stdout: p.stdout.close() if p.stdin: p.stdin.close() if p.stderr: p.stderr.close() try: p.kill() except OSError: pass time.sleep(5) def _sort_srna_fasta(self, fasta, prefix, path): out = open(os.path.join(path, "_".join([self.tmps["tmp"], prefix, "sRNA.fa"])), "w") srnas = [] with open(fasta) as f_h: for line in f_h: line = line.strip() if line.startswith(">"): name = line[1:] else: srnas.append({"name": name, "seq": line, "len": len(line)}) srnas = sorted(srnas, key=lambda x: (x["len"])) for srna in srnas: out.write(">" + srna["name"].split("|")[0] + "\n") out.write(srna["seq"] + "\n") out.close() def _read_fasta(self, fasta_file): seq = "" with open(fasta_file, "r") as seq_f: for line in seq_f: line = line.strip() if line.startswith(">"): continue else: seq = seq + line return seq def _get_specific_seq(self, srna_file, seq_file, srna_out, querys, prefix, check_q): for query in querys: if query not in check_q.keys(): check_q[query] = False srna_datas = query.split(":") srna = {"seq_id": srna_datas[0], "strand": srna_datas[3], "start": int(srna_datas[1]), "end": int(srna_datas[2])} gff_f = open(srna_file, "r") out = open(srna_out, "a") seq = self._read_fasta(seq_file) num = 0 detect = False if srna["seq_id"] == prefix: for entry in self.gff_parser.entries(gff_f): if (entry.seq_id == srna["seq_id"]) and ( entry.strand == srna["strand"]) and ( entry.start == srna["start"]) and ( entry.end == srna["end"]): detect = True check_q[query] = True if "ID" in entry.attributes.keys(): id_ = entry.attributes["ID"] else: id_ = entry.feature + str(num) gene = self.helper.extract_gene(seq, entry.start, entry.end, entry.strand) out.write(">{0}|{1}|{2}|{3}|{4}\n{5}\n".format( id_, entry.seq_id, entry.start, entry.end, entry.strand, gene)) num += 1 # if not detect: # print("Error: {} do not exist!".format(query)) # sys.exit() gff_f.close() out.close() def _gen_seq(self, prefixs, target_prefixs, args_tar): print("Generating sRNA fasta files") check_q = {} for gff in os.listdir(self.gff_path): if gff.endswith(".gff"): prefix = gff.replace(".gff", "") target_prefixs.append(prefix) detect = False for gff in os.listdir(self.gff_path): if gff.endswith(".gff"): prefix = gff.replace(".gff", "") potential_target(os.path.join(self.gff_path, gff), os.path.join(self.fasta_path, prefix + ".fa"), os.path.join(self.target_seq_path), args_tar, target_prefixs) file_num = 1 num = 0 sub_prefix = os.path.join(self.target_seq_path, "_".join([prefix, "target"])) if os.path.exists(sub_prefix + ".fa"): sub_out = open("_".join([sub_prefix, str(file_num) + ".fa"]), "w") with open((sub_prefix + ".fa"), "r") as t_f: for line in t_f: line = line.strip() if line.startswith(">"): # line = line.replace("|", "_") num += 1 if (num == 100): num = 0 file_num += 1 sub_out.close() sub_out = open("_".join([sub_prefix, str(file_num) + ".fa"]), "w") detect = True sub_out.write(line + "\n") sub_out.close() else: open(sub_prefix + ".fa", "w").close() if not detect: print("No assigned features can be found. " "Please check your genome annotation. " "And assign correct features to --target_feature.") sys.exit() print("Generating sRNA fasta files") for srna in os.listdir(self.srna_path): if srna.endswith("_sRNA.gff"): prefix = srna.replace("_sRNA.gff", "") prefixs.append(prefix) srna_out = os.path.join(self.srna_seq_path, "_".join([prefix, "sRNA.fa"])) if "all" in args_tar.query: self.helper.get_seq( os.path.join(self.srna_path, srna), os.path.join(self.fasta_path, prefix + ".fa"), srna_out) else: if "_".join([prefix, "sRNA.fa"]) in os.listdir( self.srna_seq_path): os.remove(srna_out) self._get_specific_seq( os.path.join(self.srna_path, srna), os.path.join(self.fasta_path, prefix + ".fa"), srna_out, args_tar.query, prefix, check_q) self._sort_srna_fasta(srna_out, prefix, self.srna_seq_path) for key, value in check_q.items(): if not value: print("Error: {} does not exist.".format(key)) sys.exit() def _run_rnaplex(self, prefix, rnaplfold_folder, args_tar, log): print("Running RNAplex of {0}".format(prefix)) num_process = 0 processes = [] for seq in os.listdir(self.target_seq_path): if ("_target_" in seq) and (".fa_tmp.fa" in seq): print("Running RNAplex with {0}".format(seq.replace(".fa_tmp.fa", ""))) out_rnaplex = open(os.path.join( self.rnaplex_path, prefix, "_".join([ prefix, "RNAplex", str(num_process) + ".txt"])), "w") num_process += 1 log.write(" ".join([args_tar.rnaplex_path, "-q", os.path.join( self.srna_seq_path, "_".join([ self.tmps["tmp"], prefix, "sRNA.fa_tmp.fa"])), "-t", os.path.join(self.target_seq_path, seq), "-l", str(args_tar.inter_length), "-e", str(args_tar.energy), "-z", str(args_tar.duplex_dist), "-a", rnaplfold_folder]) + "\n") p = Popen([args_tar.rnaplex_path, "-q", os.path.join( self.srna_seq_path, "_".join([ self.tmps["tmp"], prefix, "sRNA.fa_tmp.fa"])), "-t", os.path.join(self.target_seq_path, seq), "-l", str(args_tar.inter_length), "-e", str(args_tar.energy), "-z", str(args_tar.duplex_dist), "-a", rnaplfold_folder], stdout=out_rnaplex) processes.append(p) if num_process % args_tar.core_plex == 0: self._wait_process(processes) self._wait_process(processes) log.write("The prediction for {0} is done.\n".format(prefix)) log.write("The following temporary files for storing results of {0} are " "generated:\n".format(prefix)) for file_ in os.listdir(os.path.join(self.rnaplex_path, prefix)): log.write("\t" + os.path.join(self.rnaplex_path, prefix, file_) + "\n") return num_process def _restore_long_ids(self, rnaplex_file, long_ids): out = open(rnaplex_file + "tmp", "w") with open(rnaplex_file, "r") as t_f: for line in t_f: line = line.strip() if (line.startswith(">")): if (line.startswith(">TMPtar_")): header = long_ids["tar"][int(line.split("_")[1]) - 1] elif (line.startswith(">TMPsrna_")): header = long_ids["srna"][int(line.split("_")[1]) - 1] else: header = line[1:] out.write(">" + header + "\n") else: out.write(line + "\n") out.close() shutil.move(rnaplex_file + "tmp", rnaplex_file) def _rna_plex(self, prefixs, target_prefixs, args_tar, log): log.write("Using RNAplex and RNAplfold to predict sRNA targets.\n") log.write("Please make sure the version of Vienna RNA package is " "at least 2.3.2.\n") tmp_rnaplfold_folder = os.path.join(self.rnaplex_path, "tmp_RNAplfold") if os.path.exists(tmp_rnaplfold_folder): shutil.rmtree(tmp_rnaplfold_folder) os.mkdir(tmp_rnaplfold_folder) long_ids = {"tar": [], "srna": []} for prefix in target_prefixs: self._run_rnaplfold( args_tar.rnaplfold_path, "target", args_tar.win_size_t, args_tar.span_t, args_tar.unstr_region_rnaplex_t, long_ids, self.target_seq_path, prefix, tmp_rnaplfold_folder, log) for prefix in prefixs: print("Running RNAplfold of {0}".format(prefix)) self.helper.check_make_folder( os.path.join(self.rnaplex_path, prefix)) rnaplfold_folder = os.path.join(self.rnaplex_path, prefix, "RNAplfold") shutil.copytree(tmp_rnaplfold_folder, rnaplfold_folder) self._run_rnaplfold( args_tar.rnaplfold_path, "sRNA", args_tar.win_size_s, args_tar.span_s, args_tar.unstr_region_rnaplex_s, long_ids, self.srna_seq_path, prefix, rnaplfold_folder, log) num_process = self._run_rnaplex(prefix, rnaplfold_folder, args_tar, log) rnaplex_file = os.path.join(self.rnaplex_path, prefix, "_".join([prefix, "RNAplex.txt"])) if ("_".join([prefix, "RNAplex.txt"]) in os.listdir(os.path.join(self.rnaplex_path, prefix))): os.remove(rnaplex_file) for index in range(0, num_process): log.write("Using helper.py to merge the temporary files.\n") self.helper.merge_file(os.path.join( self.rnaplex_path, prefix, "_".join([ prefix, "RNAplex", str(index) + ".txt"])), rnaplex_file) if (len(long_ids["tar"]) != 0) or (len(long_ids["srna"]) != 0): self._restore_long_ids(rnaplex_file, long_ids) log.write("\t" + rnaplex_file + " is generated.\n") self.helper.remove_all_content(os.path.join( self.rnaplex_path, prefix), "_RNAplex_", "file") self.fixer.fix_rnaplex(rnaplex_file, self.tmps["tmp"]) shutil.move(self.tmps["tmp"], rnaplex_file) shutil.rmtree(rnaplfold_folder) def _run_rnaup(self, num_up, processes, prefix, out_rnaup, out_log, args_tar, log): for index in range(1, num_up + 1): out_tmp_up = open(os.path.join( args_tar.out_folder, "".join([self.tmps["rnaup"], str(index), ".txt"])), "w") out_err = open(os.path.join( args_tar.out_folder, "".join([self.tmps["log"], str(index), ".txt"])), "w") in_up = open(os.path.join( args_tar.out_folder, "".join([self.tmps["tmp"], str(index), ".fa"])), "r") log.write(" ".join([args_tar.rnaup_path, "-u", str(args_tar.unstr_region_rnaup), "-o", "--interaction_first"]) + "\n") p = Popen([args_tar.rnaup_path, "-u", str(args_tar.unstr_region_rnaup), "-o", "--interaction_first"], stdin=in_up, stdout=out_tmp_up, stderr=out_err) processes.append(p) if len(processes) != 0: time.sleep(5) self._wait_process(processes) log.write("The following temporary files for storing results of {0} are " "generated:\n".format(prefix)) for file_ in os.listdir(os.path.join(args_tar.out_folder)): log.write("\t" + os.path.join(args_tar.out_folder, file_) + "\n") os.system("rm " + os.path.join(args_tar.out_folder, self.tmps["all_fa"])) self._merge_txt(num_up, out_rnaup, out_log, args_tar.out_folder) os.system("rm " + os.path.join(args_tar.out_folder, self.tmps["all_txt"])) def _merge_txt(self, num_up, out_rnaup, out_log, out_folder): for index in range(1, num_up + 1): self.helper.merge_file( os.path.join(out_folder, "".join([self.tmps["rnaup"], str(index), ".txt"])), out_rnaup) self.helper.merge_file( os.path.join(out_folder, "".join([self.tmps["log"], str(index), ".txt"])), out_log) def _get_continue(self, out_rnaup): '''For RNAup, it can continue running RNAup based on previous run''' srnas = [] matchs = {} out = open("tmp.txt", "w") with open(out_rnaup) as f_h: for line in f_h: line = line.strip() if ">srna" in line: srna = line[1:] srnas.append(srna) matchs[srna] = [] else: matchs[srna].append(line) srnas = srnas[:-1] for srna in srnas: out.write(">" + srna + "\n") for target in matchs[srna]: out.write(target + "\n") out.close() os.remove(out_rnaup) shutil.move("tmp.txt", out_rnaup) return srnas def _rnaup(self, prefixs, target_prefixs, args_tar, log): log.write("Using RNAup to predict sRNA targets.\n") log.write("Please make sure the version of Vienna RNA package is " "at least 2.3.2.\n") for prefix in prefixs: srnas = [] print("Running RNAup of {0}".format(prefix)) if not os.path.exists(os.path.join(self.rnaup_path, prefix)): os.mkdir(os.path.join(self.rnaup_path, prefix)) num_up = 0 processes = [] out_rnaup = os.path.join(self.rnaup_path, prefix, "_".join([prefix + "_RNAup.txt"])) out_log = os.path.join(self.rnaup_path, prefix, "_".join([prefix + "_RNAup.log"])) if "_".join([prefix, "RNAup.txt"]) in \ os.listdir(os.path.join(self.rnaup_path, prefix)): if not args_tar.continue_rnaup: os.remove(out_rnaup) os.remove(out_log) else: log.write("The data from the previous run is found.\n") srnas = self._get_continue(out_rnaup) log.write("The previous data is loaded.\n") with open(os.path.join(self.srna_seq_path, "_".join([ self.tmps["tmp"], prefix, "sRNA.fa"])), "r") as s_f: for line in s_f: line = line.strip() if line.startswith(">"): if line[1:] in srnas: start = False continue start = True print("Running RNAup with {0}".format(line[1:])) num_up += 1 out_up = open(os.path.join(args_tar.out_folder, "".join([self.tmps["tmp"], str(num_up), ".fa"])), "w") out_up.write(line + "\n") else: if start: out_up.write(line + "\n") out_up.close() for prefix in target_prefixs: self.helper.merge_file(os.path.join( self.target_seq_path, "_".join([prefix, "target.fa"])), os.path.join(args_tar.out_folder, "".join([self.tmps["tmp"], str(num_up), ".fa"]))) if num_up == args_tar.core_up: self._run_rnaup(num_up, processes, prefix, out_rnaup, out_log, args_tar, log) processes = [] num_up = 0 self._run_rnaup(num_up, processes, prefix, out_rnaup, out_log, args_tar, log) log.write("The prediction for {0} is done.\n".format(prefix)) log.write("\t" + out_rnaup + " is complete generated and updated.\n") def _intarna(self, prefixs, target_prefixs, args_tar, log): log.write("Using IntaRNA to predict sRNA targets.\n") log.write("Please make sure the version of IntaRNA is at least 2.0.4.\n") all_target = os.path.join(self.target_seq_path, "all_target.fa") if os.path.exists(all_target): os.remove(all_target) for prefix in target_prefixs: self.helper.merge_file(os.path.join(self.target_seq_path, prefix + "_target.fa"), all_target) for prefix in prefixs: print("Running IntaRNA of {0}".format(prefix)) intarna_file = os.path.join(self.intarna_path, prefix, prefix + "_IntaRNA.txt") self.helper.check_make_folder( os.path.join(self.intarna_path, prefix)) call([args_tar.intarna_path, "-q", os.path.join( self.srna_seq_path, "_".join([ self.tmps["tmp"], prefix, "sRNA.fa"])), "-t", all_target, "--qAccW", str(args_tar.slide_win_srna), "--qAccL", str(args_tar.max_loop_srna), "--tAccW", str(args_tar.slide_win_target), "--tAccL", str(args_tar.max_loop_target), "--outMode", "C", "-m", args_tar.mode_intarna, "--threads", str(args_tar.core_inta), "--out", intarna_file]) log.write("The prediction for {0} is done.\n".format(prefix)) log.write("\t" + intarna_file + " is generated.\n") def _merge_rnaplex_rnaup(self, prefixs, target_prefixs, args_tar, log): '''merge the result of IntaRNA, RNAup and RNAplex''' log.write("Running merge_rnaplex_rnaup.py to merge the results from " "RNAplex, RNAup, and IntaRNA for generating finanl output.\n") log.write("The following files are generated:\n") all_gff = os.path.join(self.gff_path, "all.gff") if os.path.exists(all_gff): os.remove(all_gff) for prefix in target_prefixs: self.helper.merge_file(os.path.join(self.gff_path, prefix + ".gff"), all_gff) for prefix in prefixs: rnaplex_file = None rnaup_file = None out_rnaplex = None out_rnaup = None intarna_file = None out_intarna = None self.helper.check_make_folder(os.path.join( self.merge_path, prefix)) print("Ranking {0} now".format(prefix)) if ("RNAplex" in args_tar.program): rnaplex_file = os.path.join(self.rnaplex_path, prefix, "_".join([prefix, "RNAplex.txt"])) out_rnaplex = os.path.join( self.rnaplex_path, prefix, "_".join([prefix, "RNAplex_rank.csv"])) self._remove_repeat(rnaplex_file, "RNAplex") if ("RNAup" in args_tar.program): rnaup_file = os.path.join(self.rnaup_path, prefix, "_".join([prefix, "RNAup.txt"])) out_rnaup = os.path.join(self.rnaup_path, prefix, "_".join([prefix, "RNAup_rank.csv"])) self._remove_repeat(rnaup_file, "RNAup") if ("IntaRNA" in args_tar.program): intarna_file = os.path.join(self.intarna_path, prefix, "_".join([prefix, "IntaRNA.txt"])) out_intarna = os.path.join(self.intarna_path, prefix, "_".join([prefix, "IntaRNA_rank.csv"])) self._remove_repeat(intarna_file, "IntaRNA") overlap_file = os.path.join(self.merge_path, prefix, "_".join([prefix, "overlap.csv"])) merge_file = os.path.join(self.merge_path, prefix, "_".join([prefix, "merge.csv"])) merge_srna_target(rnaplex_file, rnaup_file, intarna_file, args_tar, out_rnaplex, out_rnaup, out_intarna, os.path.join(self.fasta_path, prefix + ".fa"), merge_file, overlap_file, os.path.join(self.srna_path, "_".join([prefix, "sRNA.gff"])), all_gff, target_prefixs) if ("RNAplex" in args_tar.program): log.write("\t" + out_rnaplex + "\n") if ("RNAup" in args_tar.program): log.write("\t" + out_rnaup + "\n") if ("IntaRNA" in args_tar.program): log.write("\t" + out_intarna + "\n") if (os.path.exists(merge_file)): log.write("\t" + merge_file + "\n") if (os.path.exists(overlap_file)): log.write("\t" + overlap_file + "\n") def _remove_rnaplex(self, line, num, pre_num, pre, checks, out_tmp, print_): if (line.startswith(">")): if (num % 2 == 1): print_ = False pre = line if (line not in checks): checks[line] = [] print_ = True elif (num % 2 == 0) and (line not in checks[pre]): checks[pre].append(line) print_ = True num = num + 1 else: if (print_): if (num != pre_num): out_tmp.write(pre + "\n") out_tmp.write(checks[pre][-1] + "\n") out_tmp.write(line + "\n") pre_num = num return num, pre_num, print_, pre, def _remove_rnaup(self, line, pre, num, pre_num, srna_info, checks, out_tmp, print_, tar): if (line.startswith(">")): print_ = False tar = False if (pre.startswith(">")): if (pre not in checks): checks[pre] = [line] srna_info = pre print_ = True else: if (line not in checks[pre]): checks[pre].append(line) print_ = True else: if (num != 1): if (line not in checks[srna_info]): checks[srna_info].append(line) print_ = True else: if (print_): if (pre_num != len(checks)): out_tmp.write(srna_info + "\n") out_tmp.write(checks[srna_info][-1] + "\n") out_tmp.write(line + "\n") else: if (not tar): out_tmp.write(checks[srna_info][-1] + "\n") out_tmp.write(line + "\n") pre_num = len(checks) tar = True pre = line num = num + 1 return num, pre_num, print_, pre, tar, srna_info def _remove_intarna(self, line, checks, tar, srna_info, seq, out_tmp): if (line.startswith(".")) or ( line.startswith("(")) or ( line.startswith(")")): seq = line.split(";")[0] if (seq not in checks[tar][srna_info]): checks[tar][srna_info].append(seq) out_tmp.write(line + "\n") else: if (len(line.split(";")) >= 8): tar = line.split(";")[0] srna_info = line.split(";")[3] seq = line.split(";")[7] if (tar not in checks): checks[tar] = {} checks[tar][srna_info] = [seq] out_tmp.write(line + "\n") else: if (srna_info not in checks[tar]): checks[tar][srna_info] = [seq] out_tmp.write(line + "\n") return tar, srna_info, seq def _remove_repeat(self, interact_file, type_): checks = {} seq = "" pre = "" srna_info = "" num = 1 tar = False pre_num = 0 print_ = False out_tmp = open(interact_file + "tmp", "w") with open(interact_file) as fh: for line in fh: line = line.strip() if (type_ == "RNAplex"): num, pre_num, print_, pre = self._remove_rnaplex( line, num, pre_num, pre, checks, out_tmp, print_) elif (type_ == "RNAup"): num, pre_num, print_, pre, tar, srna_info = ( self._remove_rnaup( line, pre, num, pre_num, srna_info, checks, out_tmp, print_, tar)) elif (type_ == "IntaRNA"): tar, srna_info, seq = self._remove_intarna( line, checks, tar, srna_info, seq, out_tmp) out_tmp.close() shutil.move(interact_file + "tmp", interact_file) def run_srna_target_prediction(self, args_tar, log): self._check_gff(args_tar.gffs) self._check_gff(args_tar.srnas) self.multiparser.parser_gff(args_tar.gffs, None) self.multiparser.parser_fasta(args_tar.fastas) self.multiparser.parser_gff(args_tar.srnas, "sRNA") prefixs = [] target_prefixs = [] self._gen_seq(prefixs, target_prefixs, args_tar) if ("RNAplex" in args_tar.program): self._rna_plex(prefixs, target_prefixs, args_tar, log) self.helper.remove_all_content(self.target_seq_path, "_target_", "file") if os.path.exists(os.path.join(self.rnaplex_path, "tmp_RNAplfold")): shutil.rmtree(os.path.join(self.rnaplex_path, "tmp_RNAplfold")) log.write("The temporary files for running RNAplex are deleted.\n") if ("RNAup" in args_tar.program): self._rnaup(prefixs, target_prefixs, args_tar, log) if ("IntaRNA" in args_tar.program): self._intarna(prefixs, target_prefixs, args_tar, log) self._merge_rnaplex_rnaup(prefixs, target_prefixs, args_tar, log) self.helper.remove_all_content(args_tar.out_folder, self.tmps["tmp"], "dir") self.helper.remove_all_content(args_tar.out_folder, self.tmps["tmp"], "file") self.helper.remove_tmp_dir(args_tar.gffs) self.helper.remove_tmp_dir(args_tar.srnas) self.helper.remove_tmp_dir(args_tar.fastas) self.helper.remove_all_content(self.srna_seq_path, "tmp_", "file") if os.path.exists(os.path.join(self.target_seq_path, "all_target.fa")): os.remove(os.path.join(self.target_seq_path, "all_target.fa"))
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/srna_target.py
srna_target.py
from annogesiclib.gff3 import Gff3Parser def detect_energy(line, srna): duplex = line.split(" ")[0] if ("(" in duplex) or (")" in duplex): energy = line.split(":")[1].split("(")[1].split("=")[0] energy = float(energy.strip()) if energy < srna["energy"]: srna["energy"] = energy def get_locus(gene): if gene is not None: if ("locus_tag" in gene.attributes.keys()): locus = gene.attributes["locus_tag"] else: locus = "NA" else: locus = "NA" return locus def check_parent_gene(cds, genes): target_gene = None if target_gene is None: for gene in genes: if (gene.seq_id == cds.seq_id) and ( gene.strand == cds.strand): if ((cds.start <= gene.start) and ( cds.end >= gene.end)) or ( (cds.start >= gene.start) and ( cds.end <= gene.end)) or ( (cds.start <= gene.start) and ( cds.end <= gene.end) and ( cds.end >= gene.start)) or ( (cds.start >= gene.start) and ( cds.start <= gene.end) and ( cds.end >= gene.end)): target_gene = gene if (cds.start == gene.start) and ( cds.end == gene.end): target_gene = gene break return target_gene def mod_srna_tar_pos(gff, pos, type_, pre_target, suf_target, length): if "NA" not in pos: start = int(pos.split(",")[0]) end = int(pos.split(",")[-1]) if (gff.strand == "+"): if type_ == "srna": g_start = gff.start + start - 1 g_end = gff.start + end - 1 else: if (gff.start - pre_target) <= 0: g_start = start g_end = end else: g_start = gff.start - pre_target + start - 1 g_end = gff.start - pre_target + end - 1 if (gff.start - pre_target + end - 1) > length: g_end = length else: if type_ == "srna": g_end = gff.end - start + 1 g_start = gff.end - end + 1 else: if (gff.end + pre_target) > length: g_start = length - end + 1 g_end = length - start + 1 else: g_start = gff.end + pre_target - end + 1 g_end = gff.end + pre_target - start + 1 if (gff.end + pre_target - end + 1) <= 0: g_start = 1 else: g_start = "NA" g_end = "NA" return g_start, g_end def print_rank_one(srnas, out, feature, gffs, srna_gffs, args_tar, length): out.write("\t".join(["sRNA", "sRNA_genome", "sRNA_position", "sRNA_interacted_position_" + feature, "sRNA_strand", "Target_genome", "Target_gene_ID", "Target_ID", "Target_locus_tag", "Target_position", "Target_interacted_position_" + feature, "Target_strand", "Energy_" + feature, "Rank_" + feature]) + "\n") for method, srna_datas in srnas.items(): for srna_id, targets in srna_datas.items(): rank = 0 sort_targets = sorted(targets, key=lambda k: (k["energy"])) for target in sort_targets: if (target["tar_pos"] != "NA"): if target["energy"] < 0: rank += 1 target["rank"] = rank if (rank <= args_tar.top) and (method == feature): srna_infos = get_srna_name(srna_gffs, srna_id) name = srna_infos[0] srna_info = srna_infos[1] target_info = get_target_info(gffs, target) s_start, s_end = mod_srna_tar_pos( srna_info, target["srna_pos"], "srna", args_tar.tar_start, args_tar.tar_end, length) t_start, t_end = mod_srna_tar_pos( target_info, target["tar_pos"], "tar", args_tar.tar_start, args_tar.tar_end, length) if srna_info is not None: out.write("\t".join([ name, str(srna_info.seq_id), "-".join([str(srna_info.start), str(srna_info.end)]), "-".join([str(s_start), str(s_end)]), srna_info.strand, target["target_genome"], target["gene_id"], target["target_id"], target["target_locus"], "-".join([str(target_info.start), str(target_info.end)]), "-".join([str(t_start), str(t_end)]), target_info.strand, str(target["energy"]), str(target["rank"])]) + "\n") def extract_pos(line, srnas, method): if (line.startswith("(")) or ( line.startswith(")")) or ( line.startswith(".")): tar_pos = line.split(" : ")[0].strip().split(" ")[-1] srna_pos = line.split(" : ")[-1].strip().split(" ")[0] srnas["tar_pos"] = tar_pos srnas["srna_pos"] = srna_pos def read_rnaplex(rnaplex, genes, genomes, features, srnas, target_prefixs): start = False count_seq = 0 with open(rnaplex, "r") as p_h: for line in p_h: line = line.strip() if line.startswith(">"): start = True count_seq += 1 if count_seq == 1: tags = line[1:].split("_") target_locus = "_".join(tags[:-3]) target_id = tags[-3] detail = "_".join(tags[-2:]) target_genome = target_prefixs[int(tags[0])] gene_id, tar, target_id, target_locus = get_gene_id( detail, genes, genomes, features) elif count_seq == 2: srna = line[1:] if srna not in srnas["RNAplex"].keys(): srnas["RNAplex"][srna] = [] srnas["RNAplex"][srna].append({ "target_id": target_id, "gene_id": gene_id, "target_locus": target_locus, "target_genome": target_genome, "detail": detail, "energy": 0}) count_seq = 0 else: if start: detect_energy(line, srnas["RNAplex"][srna][-1]) extract_pos(line, srnas["RNAplex"][srna][-1], "RNAplex") def read_rnaup(rnaup, srna_names, srnas, genes, genomes, features, target_prefixs): with open(rnaup, "r") as u_h: for line in u_h: line = line.strip() if line.startswith(">"): if line[1:] in srna_names: srna = line[1:] else: tags = line[1:].split("_") detail = "_".join(tags[-2:]) gene_id, tar, target_id, target_locus = get_gene_id( detail, genes, genomes, features) target_genome = target_prefixs[int(tags[0])] if srna in srnas["RNAup"].keys(): srnas["RNAup"][srna].append({ "target_id": target_id, "target_locus": target_locus, "detail": detail, "energy": 0, "target_genome": target_genome, "gene_id": gene_id}) else: srnas["RNAup"][srna] = [] srnas["RNAup"][srna].append({ "target_id": target_id, "target_locus": target_locus, "detail": detail, "energy": 0, "target_genome": target_genome, "gene_id": gene_id}) else: detect_energy(line, srnas["RNAup"][srna][-1]) extract_pos(line, srnas["RNAup"][srna][-1], "RNAplex") def read_intarna(intarna, srnas, genes, genomes, features, target_prefixs): with open(intarna, "r") as i_h: for line in i_h: inter = line.strip().split(";") if inter[0] != "id1": if len(inter) == 9: srna = inter[3] tags = inter[0].split("_") if (len(tags) >= 5): try: if(int(inter[1]) and int(inter[2]) and int(inter[4]) and int(inter[5])): detail = "_".join(tags[-2:]) if (len(tags[0])) != 0: gene_id, tar, target_id, target_locus = get_gene_id( detail, genes, genomes, features) target_genome = target_prefixs[int(tags[0])] if tar is not None: if (srna not in srnas["IntaRNA"].keys()): srnas["IntaRNA"][srna] = [] srnas["IntaRNA"][srna].append({ "target_id": target_id, "target_locus": target_locus, "detail": detail, "energy": float(inter[-1]), "gene_id": gene_id, "target_genome": target_genome, "tar_pos": ",".join(inter[1:3]), "srna_pos": ",".join(inter[4:6])}) except: pass def read_table(gffs, rnaplex, rnaup, intarna, genes, genomes, features, target_prefixs): srnas = {"RNAup": {}, "RNAplex": {}, "IntaRNA": {}} srna_names = set() for gff in gffs: if gff.attributes["ID"] not in srna_names: srna_names.add(gff.attributes["ID"]) if rnaplex is not None: read_rnaplex(rnaplex, genes, genomes, features, srnas, target_prefixs) if rnaup is not None: read_rnaup(rnaup, srna_names, srnas, genes, genomes, features, target_prefixs) if intarna is not None: read_intarna(intarna, srnas, genes, genomes, features, target_prefixs) return srnas def get_gene_id(detail, genes, gffs, features): tar = None for gff in gffs: if gff.feature in features: strand = detail.split("_")[-1] start = int(detail.split("-")[0]) end = int(detail.split("-")[1].split("_")[0]) if (gff.start == start) and (gff.end == end) and ( gff.strand == strand): tar = gff break gene_id = "NA" target_id = "NA" target_locus = "NA" if tar is not None: if "ID" in tar.attributes.keys(): target_id = tar.attributes["ID"] if "locus_tag" in tar.attributes.keys(): target_locus = tar.attributes["locus_tag"] for gene in genes: if "Parent" in tar.attributes.keys(): if "ID" in gene.attributes.keys(): if (gene.attributes["ID"] in tar.attributes["Parent"].split(",")): gene_id = gene.attributes["ID"] return gene_id, tar, target_id, target_locus if gene_id == "NA": if (gene.seq_id == tar.seq_id) and ( gene.strand == tar.strand) and ( (tar.start == gene.start) and ( tar.end == gene.end)): if "ID" in gene.attributes.keys(): gene_id = gene.attributes["ID"] return gene_id, tar, target_id, target_locus if gene_id == "NA": for gene in genes: if (gene.seq_id == tar.seq_id) and ( gene.strand == tar.strand): if ((tar.start <= gene.start) and ( tar.end >= gene.end)) or ( (tar.start >= gene.start) and ( tar.end <= gene.end)) or ( (tar.start <= gene.start) and ( tar.end <= gene.end) and ( tar.end >= gene.start)) or ( (tar.start >= gene.start) and ( tar.start <= gene.end) and ( tar.end >= gene.end)): if "ID" in gene.attributes.keys(): gene_id = gene.attributes["ID"] return gene_id, tar, target_id, target_locus return gene_id, tar, target_id, target_locus def append_merge_three_methods(name, srna_info, ps_pos, pt_pos, u_data, i_data, srna_m1, target_info, merges): merges.append([name, srna_info.seq_id, "-".join([str(srna_info.start), str(srna_info.end)]), ps_pos, u_data["s_pos"], i_data["s_pos"], srna_info.strand, srna_m1["target_genome"], srna_m1["gene_id"], srna_m1["target_id"], srna_m1["target_locus"], "-".join([str(target_info.start), str(target_info.end)]), pt_pos, u_data["t_pos"], i_data["t_pos"], target_info.strand, str(srna_m1["energy"]), str(srna_m1["rank"]), str(u_data["energy"]), str(u_data["rank"]), str(i_data["energy"]), str(i_data["rank"])]) def import_merge(merges, name, srna_info, srna_m1, srna_m2, srna_m3, target_info, pre_target, suf_target, length, num_method): ps_start, ps_end = mod_srna_tar_pos( srna_info, srna_m1["srna_pos"], "srna", pre_target, suf_target, length) pt_start, pt_end = mod_srna_tar_pos( target_info, srna_m1["tar_pos"], "tar", pre_target, suf_target, length) ps_pos = "-".join([str(ps_start), str(ps_end)]) pt_pos = "-".join([str(pt_start), str(pt_end)]) u_data = {"s_pos": "NA", "t_pos": "NA", "energy": 1000, "rank": "NA"} if (srna_m1["detail"] == srna_m2["detail"]): us_start, us_end = mod_srna_tar_pos( srna_info, srna_m2["srna_pos"], "srna", pre_target, suf_target, length) ut_start, ut_end = mod_srna_tar_pos( target_info, srna_m2["tar_pos"], "tar", pre_target, suf_target, length) u_data["s_pos"] = "-".join([str(us_start), str(us_end)]) u_data["t_pos"] = "-".join([str(ut_start), str(ut_end)]) u_data["energy"] = srna_m2["energy"] u_data["rank"] = srna_m2["rank"] i_data = {"s_pos": "NA", "t_pos": "NA", "energy": 1000, "rank": "NA"} if srna_m3 is not None: if (srna_m1["detail"] == srna_m3["detail"]): is_start, is_end = mod_srna_tar_pos( srna_info, srna_m3["srna_pos"], "srna", pre_target, suf_target, length) it_start, it_end = mod_srna_tar_pos( target_info, srna_m3["tar_pos"], "tar", pre_target, suf_target, length) i_data["s_pos"] = "-".join([str(is_start), str(is_end)]) i_data["t_pos"] = "-".join([str(it_start), str(it_end)]) i_data["energy"] = srna_m3["energy"] i_data["rank"] = srna_m3["rank"] append_merge_three_methods(name, srna_info, ps_pos, pt_pos, u_data, i_data, srna_m1, target_info, merges) else: if num_method == 2: merges.append([name, srna_info.seq_id, "-".join([str(srna_info.start), str(srna_info.end)]), ps_pos, u_data["s_pos"], srna_info.strand, srna_m1["target_genome"], srna_m1["gene_id"], srna_m1["target_id"], srna_m1["target_locus"], "-".join([str(target_info.start), str(target_info.end)]), pt_pos, u_data["t_pos"], target_info.strand, str(srna_m1["energy"]), str(srna_m1["rank"]), str(u_data["energy"]), str(u_data["rank"])]) elif num_method == 3: append_merge_three_methods(name, srna_info, ps_pos, pt_pos, u_data, i_data, srna_m1, target_info, merges) def get_srna_name(gffs, srna): detect_name = False srna_info = None for gff in gffs: if (gff.attributes["ID"] == srna) and \ ("Name" in gff.attributes.keys()): name = gff.attributes["Name"] detect_name = True srna_info = gff break elif (gff.attributes["ID"] == srna): srna_info = gff break if not detect_name: name = srna return (name, srna_info) def get_target_info(gffs, target): tmp_gff = None for gff in gffs: if (str(gff.start) + "-" + str(gff.end) + "_" + gff.strand) == target["detail"]: if gff.feature == "gene": target_info = gff if ("locus_tag" in gff.attributes.keys()) and ( "gene" in gff.attributes.keys()): if (gff.attributes["gene"] not in target["target_locus"]): target["target_locus"] = "|".join([ target["target_locus"], gff.attributes["gene"]]) elif ("locus_tag" in gff.attributes.keys()) and ( "Name" in gff.attributes.keys()): if (gff.attributes["Name"] not in target["target_locus"]): target["target_locus"] = "|".join([ target["target_locus"], gff.attributes["Name"]]) return target_info else: tmp_gff = gff if tmp_gff is not None: return tmp_gff def remove_no_rank(merges, index): new_merges = [] for merge in merges: if merge[index] != "NA": new_merges.append(merge) return new_merges def print_file(merges, out, num_method): if num_method == 2: merges = remove_no_rank(merges, 15) merges = sorted(merges, key=lambda k: (k[0], int(k[15]))) for merge in merges: if float(merge[14]) == 1000: merge[14] = "NA" if float(merge[16]) == 1000: merge[16] = "NA" out.write("\t".join(merge) + "\n") elif num_method == 3: merges = remove_no_rank(merges, 17) merges = sorted(merges, key=lambda k: (k[0], int(k[17]))) for merge in merges: if float(merge[16]) == 1000: merge[16] = "NA" if float(merge[18]) == 1000: merge[18] = "NA" if float(merge[20]) == 1000: merge[20] = "NA" out.write("\t".join(merge) + "\n") def read_gff(filename): gffs = [] genes = [] for entry in Gff3Parser().entries(open(filename)): if entry.feature == "gene": genes.append(entry) gffs.append(entry) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) if len(genes) != 0: genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return gffs, genes def print_title(out, methods): if len(methods) == 2: out.write("\t".join(["sRNA", "sRNA_genome", "sRNA_position", "sRNA_interacted_position_" + methods[0], "sRNA_interacted_position_" + methods[1], "sRNA_strand", "Target_genome", "Target_gene_ID", "Target_ID", "Target_locus_tag", "Target_position", "Target_interacted_position_" + methods[0], "Target_interacted_position_" + methods[1], "Target_strand", "Energy_" + methods[0], "Rank_" + methods[0], "Energy_" + methods[1], "Rank_" + methods[1]]) + "\n") if len(methods) == 3: out.write("\t".join(["sRNA", "sRNA_genome", "sRNA_position", "sRNA_interacted_position_" + methods[0], "sRNA_interacted_position_" + methods[1], "sRNA_interacted_position_" + methods[2], "sRNA_strand", "Target_genome", "Target_gene_ID", "Target_ID", "Target_locus_tag", "Target_position", "Target_interacted_position_" + methods[0], "Target_interacted_position_" + methods[1], "Target_interacted_position_" + methods[2], "Target_strand", "Energy_" + methods[0], "Rank_" + methods[0], "Energy_" + methods[1], "Rank_" + methods[1], "Energy_" + methods[2], "Rank_" + methods[2]]) + "\n") def merge_three(srnas, method3, srna, detect, srna_m1, top): srna_m3 = None if method3 is not None: for srna_m3 in srnas[method3][srna]: if (srna_m1["detail"] == srna_m3["detail"]) and ( "rank" in srna_m3.keys()): if (srna_m3["rank"] != "NA"): if (srna_m3["rank"] <= top) and ( "print" not in srna_m3.keys()) and ( srna_m3["tar_pos"] != "NA"): srna_m3["print"] = True detect["3"] = True return srna_m3 else: return srna_m3 def check_method3(srna_m3, srna_m1, method3, srna, srnas): if (srna_m3 is not None): if "rank" not in srna_m3.keys(): srna_m3["rank"] = "NA" srna_m3["energy"] = 1000 return srna_m3 else: if method3 is not None: for srna_m3 in srnas[method3][srna]: if (srna_m1["detail"] == srna_m3["detail"]): return srna_m3 def check_non_overlap(detect, methods, srna_m1, srna_m2, srna_m3, gffs, merges, name, srna_info, args_tar, length, method3, srna, srnas): if (not detect["2"] and len(methods) == 2) or ( ((not detect["2"]) or (not detect["3"])) and len(methods) == 3): if "rank" not in srna_m2.keys(): srna_m2["rank"] = "NA" srna_m2["energy"] = 1000 srna_m3 = check_method3(srna_m3, srna_m1, method3, srna, srnas) target_info = get_target_info( gffs, srna_m1) import_merge( merges, name, srna_info, srna_m1, srna_m2, srna_m3, target_info, args_tar.tar_start, args_tar.tar_end, length, len(methods)) srna_m1["print"] = True def merge_result(srnas, srna_gffs, args_tar, gffs, merges, length, methods): '''merge the results based on the ranking of RNAplex''' overlaps = [] method1 = methods[0] method2 = methods[1] method3 = None if len(methods) == 3: method3 = methods[2] for srna, srna_m1s in srnas[method1].items(): srna_datas = get_srna_name(srna_gffs, srna) name = srna_datas[0] srna_info = srna_datas[1] for srna_m1 in srna_m1s: if ("rank" in srna_m1.keys()): if (srna_m1["rank"] != "NA"): if ("print" not in srna_m1.keys()) and ( srna_m1["rank"] <= args_tar.top) and ( srna_m1["tar_pos"] != "NA"): detect = {"2": False, "3": False} srna_m3 = None if srna in srnas[method2].keys(): for srna_m2 in srnas[method2][srna]: if (srna_m1["detail"] == srna_m2["detail"]): if ("rank" in srna_m2.keys()): if (srna_m2["rank"] != "NA"): if (srna_m2["rank"] <= args_tar.top) and ( "print" not in srna_m2.keys()) and ( srna_m2["tar_pos"] != "NA"): detect["2"] = True srna_m3 = merge_three(srnas, method3, srna, detect, srna_m1, args_tar.top) if (len(methods) == 2) or ( (len(methods) == 3) and detect["3"]): target_info = get_target_info( gffs, srna_m1) import_merge( overlaps, name, srna_info, srna_m1, srna_m2, srna_m3, target_info, args_tar.tar_start, args_tar.tar_end, length, len(methods)) srna_m1["print"] = True srna_m2["print"] = True import_merge( merges, name, srna_info, srna_m1, srna_m2, srna_m3, target_info, args_tar.tar_start, args_tar.tar_end, length, len(methods)) break check_non_overlap(detect, methods, srna_m1, srna_m2, srna_m3, gffs, merges, name, srna_info, args_tar, length, method3, srna, srnas) return overlaps def compare_rest(srnas, rest, srna_last, srna): srna_m3 = None for srna_m3 in srnas[rest][srna]: if (srna_last["detail"] == srna_m3["detail"]) and ( "print" not in srna_m3.keys()): if ("rank" not in srna_m3.keys()): srna_m3["rank"] = "NA" srna_m3["energy"] = 1000 return srna_m3 return srna_m3 def merge_last(srnas, srna_gffs, args_tar, gffs, merges, length, method, ref, num_method, rest, switch): '''merge the results based on the ranking of RNAup''' for srna, srnas_last in srnas[method].items(): srna_datas = get_srna_name(srna_gffs, srna) name = srna_datas[0] srna_info = srna_datas[1] for srna_last in srnas_last: if "rank" in srna_last.keys(): if srna_last["rank"] != "NA": if ("print" not in srna_last.keys()) and ( srna_last["rank"] <= args_tar.top) and ( srna in srnas[ref].keys()) and ( srna_last["tar_pos"] != "NA"): for srna_ref in srnas[ref][srna]: if (srna_ref["detail"] == srna_last["detail"]) and ( "print" not in srna_ref.keys()): if ("rank" not in srna_ref.keys()): srna_ref["rank"] = "NA" srna_ref["energy"] = 1000 else: target_info = get_target_info( gffs, srna_last) if num_method == 2: import_merge( merges, name, srna_info, srna_ref, srna_last, None, target_info, args_tar.tar_start, args_tar.tar_end, length, num_method) elif num_method == 3: srna_m3 = compare_rest( srnas, rest, srna_last, srna) if switch: import_merge( merges, name, srna_info, srna_ref, srna_m3, srna_last, target_info, args_tar.tar_start, args_tar.tar_end, length, num_method) else: import_merge( merges, name, srna_info, srna_ref, srna_last, srna_m3, target_info, args_tar.tar_start, args_tar.tar_end, length, num_method) if srna_m3 is not None: srna_m3["print"] = True srna_last["print"] = True srna_ref["print"] = True def read_fasta(seq_file): length = 0 with open(seq_file) as fh: for line in fh: line = line.strip() if not line.startswith(">"): length = length + len(line) return length def merge_srna_target(rnaplex, rnaup, intarna, args_tar, out_rnaplex, out_rnaup, out_intarna, seq_file, output, out_overlap, srna_gff_file, annotation_gff, target_prefixs): '''merge the results of RNAup and RNAplex''' length = read_fasta(seq_file) merges = [] methods = [] srna_gffs, NA = read_gff(srna_gff_file) gffs, genes = read_gff(annotation_gff) srnas = read_table(srna_gffs, rnaplex, rnaup, intarna, genes, gffs, args_tar.features, target_prefixs) if out_rnaplex is not None: print("Ranking for RNAplex") methods.append("RNAplex") out_p = open(out_rnaplex, "w") print_rank_one(srnas, out_p, "RNAplex", gffs, srna_gffs, args_tar, length) if out_rnaup is not None: print("Ranking for RNAup") methods.append("RNAup") out_u = open(out_rnaup, "w") print_rank_one(srnas, out_u, "RNAup", gffs, srna_gffs, args_tar, length) if out_intarna is not None: print("Ranking for IntaRNA") methods.append("IntaRNA") out_i = open(out_intarna, "w") print_rank_one(srnas, out_i, "IntaRNA", gffs, srna_gffs, args_tar, length) if (len(args_tar.program) >= 2): out_m = open(output, "w") out_o = open(out_overlap, "w") print_title(out_m, methods) print_title(out_o, methods) print("Merging now...") overlaps = merge_result(srnas, srna_gffs, args_tar, gffs, merges, length, methods) if len(methods) == 2: merge_last(srnas, srna_gffs, args_tar, gffs, merges, length, methods[1], methods[0], 2, None, False) elif len(methods) == 3: merge_last(srnas, srna_gffs, args_tar, gffs, merges, length, methods[1], methods[0], 3, methods[2], False) merge_last(srnas, srna_gffs, args_tar, gffs, merges, length, methods[2], methods[0], 3, methods[1], True) print_file(merges, out_m, len(methods)) print_file(overlaps, out_o, len(methods))
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/merge_rnaplex_rnaup.py
merge_rnaplex_rnaup.py
import csv def import_data(row): return{"strain": row[1], "strand": row[2], "associate": row[3], "start_seq": int(row[4]), "end_seq": int(row[5]), "rfam": row[6], "e": row[7], "score": row[8], "start_align": int(row[9]), "end_align": int(row[10]), "info": "|".join(row[0:6]), "ID": row[0]} def read_file(ribo_table, rfam_table): ribos = [] rfams = [] f_h = open(ribo_table, "r") for row in csv.reader(f_h, delimiter="\t"): if not row[0].startswith("#"): ribos.append(import_data(row)) r_h = open(rfam_table, "r") for row in csv.reader(r_h, delimiter="\t"): rfams.append({"ID": row[0].strip(), "class": row[1].strip()}) ribos = sorted(ribos, key=lambda x: (x["strain"], x["start_seq"])) f_h.close() r_h.close() return ribos, rfams def get_overlap(pre_ribo, ribo, overlap, overlaps): if (pre_ribo["strain"] == ribo["strain"]) and \ (pre_ribo["strand"] == ribo["strand"]) and \ (pre_ribo["ID"] == ribo["ID"]): overlap = True if overlap: detect = False for over in overlaps[ribo["strain"]]: if pre_ribo["info"] in over: over = over + ";" + ribo["info"] detect = True if not detect: overlaps[ribo["strain"]].append( pre_ribo["info"] + ";" + ribo["info"]) def print_gff(num, ribo, out, stats, strain, feature): attribute = ";".join(["=".join(items) for items in [ ("ID", "_".join([strain, feature.lower() + str(num)])), ("Name", ribo["rfam_name"]), ("rfam_id", ribo["rfam"]), ("e_value", ribo["e"]), ("score", ribo["score"]), ("method", "infernal_to_Rfam")]]) out.write("\t".join([str(field) for field in [ ribo["strain"], "ANNOgesic", feature, str(ribo["start_seq"]), str(ribo["end_seq"]), ".", ribo["strand"], ".", attribute]]) + "\n") stats["total"]["total"] += 1 stats[strain]["total"] += 1 def import_stat(rfams, ribo, stats, strain): for rfam in rfams: if ribo["rfam"] == rfam["ID"]: ribo["rfam_name"] = rfam["class"] if rfam["class"] not in stats["total"].keys(): stats["total"][rfam["class"]] = 1 else: stats["total"][rfam["class"]] += 1 if rfam["class"] not in stats[strain].keys(): stats[strain][rfam["class"]] = 1 else: stats[strain][rfam["class"]] += 1 def print_number(stats, repeat, out, strain, feature): out.write("Total number of potential {0} are {1}\n".format( feature.replace("_", " "), stats[strain]["total"])) out.write("The number of potential {0} which " "have overlap region with others are {1}\n".format( feature.replace("_", " "), repeat,)) out.write(feature + "_name\tnumbers\n") for type_, num in stats[strain].items(): if type_ != "total": out.write("{0}\t{1}\n".format(type_, num)) def print_stat(stats, out_stat, overlaps, feature): out = open(out_stat, "w") print_file = False repeat = 0 if len(stats) > 2: out.write("All genomes:\n") print_file = True for strain, overs in overlaps.items(): for over in overs: datas = over.split(";") repeat = repeat + len(datas) print_number(stats, repeat, out, "total", feature) for strain, datas in stats.items(): repeat = 0 if strain != "total": print_file = True out.write("{0}:\n".format(strain)) for over in overlaps[strain]: datas = over.split(";") repeat = repeat + len(datas) print_number(stats, repeat, out, strain, feature) print_strain = strain if print_file: count = 1 if len(stats) > 2: for strain, overs in overlaps.items(): for over in overs: datas = over.split(";") out.write("\noverlap candidates set {0}:\n".format(count)) count += 1 for data in datas: out.write("\t{0}\n".format(data)) else: for over in overlaps[print_strain]: datas = over.split(";") out.write("\noverlap candidates set {0}:\n".format(count)) count += 1 for data in datas: out.write("\t{0}\n".format(data)) out.close() def stat_and_covert2gff(ribo_table, rfam_table, gff_file, fuzzy, out_stat, feature): '''do statistics and print gff file of riboswitch''' stats = {} overlaps = {} pre_strain = "" stats["total"] = {"total": 0} num = 0 strain = None ribos, rfams = read_file(ribo_table, rfam_table) out = open(gff_file, "w") out.write("##gff-version 3\n") pre_gff = None for ribo in ribos: overlap = False if ribo["strain"] != pre_strain: overlaps[ribo["strain"]] = [] first = True strain = ribo["strain"] pre_strain = ribo["strain"] stats[strain] = {"total": 0} if first: first = False pre_ribo = ribo else: get_overlap(pre_ribo, ribo, overlap, overlaps) pre_ribo = ribo if ribo["start_align"] > fuzzy: ribo["start_seq"] = ribo["start_seq"] + ribo["start_align"] - fuzzy if (ribo["end_seq"] - (ribo["start_seq"] + ribo["end_align"])) > fuzzy: ribo["end_seq"] = ribo["start_seq"] + ribo["end_align"] + fuzzy import_stat(rfams, ribo, stats, strain) if pre_gff is not None: if (pre_gff["strain"] == ribo["strain"]) and ( pre_gff["strand"] == ribo["strand"]) and ( pre_gff["start_seq"] == ribo["start_seq"]) and ( pre_gff["end_seq"] == ribo["end_seq"]): pre_gff["rfam_name"] = "/".join( [pre_gff["rfam_name"], ribo["rfam_name"]]) pre_gff["rfam"] = ",".join([pre_gff["rfam"], ribo["rfam"]]) pre_gff["e"] = ",".join([pre_gff["e"], ribo["e"]]) pre_gff["score"] = ",".join([pre_gff["score"], ribo["score"]]) else: print_gff(num, pre_gff, out, stats, strain, feature) num += 1 pre_gff = ribo else: pre_gff = ribo if strain is not None: print_gff(num, pre_gff, out, stats, strain, feature) print_stat(stats, out_stat, overlaps, feature) else: out_s = open(out_stat, "w") out_s.write("Nothing can be detected.") out_s.close() out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/ribo_gff.py
ribo_gff.py
import os import sys import shutil from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.gene_express_analysis import gene_expression class Expression(object): def __init__(self, gffs): self.multiparser = Multiparser() self.helper = Helper() self.out_folder = os.path.join(gffs, "for_libs") if os.path.exists(self.out_folder): shutil.rmtree(self.out_folder) os.mkdir(self.out_folder) self.stat = os.path.join(self.out_folder, "statistics") os.mkdir(self.stat) self.gff_folder = os.path.join(self.out_folder, "gffs") os.mkdir(self.gff_folder) self.merge_wigs = os.path.join(gffs, "merge_wigs") if os.path.exists(self.merge_wigs): shutil.rmtree(self.merge_wigs) def _get_replicates(self, replicates_tex, replicates_frag): if (replicates_tex is not None) and ( replicates_frag is not None): replicates = {"tex": int(replicates_tex), "frag": int(replicates_frag)} elif replicates_tex is not None: replicates = {"tex": int(replicates_tex), "frag": -1} elif replicates_frag is not None: replicates = {"tex": -1, "frag": int(replicates_frag)} else: print("Error:No replicates number assign!!!") sys.exit() return replicates def expression(self, tex_libs, frag_libs, tex_notex, replicates_tex, replicates_frag, tex_wigs, frag_wigs, percent_tex, percent_frag, cutoff_coverage, gffs, features, cover_type, max_color, min_color): replicates = self._get_replicates(replicates_tex, replicates_frag) if (tex_libs is not None) and (frag_libs is not None): input_libs = tex_libs + frag_libs elif tex_libs is not None: input_libs = tex_libs elif frag_libs is not None: input_libs = frag_libs else: print("Error: plese assign the libraries!!\n") sys.exit() if (tex_wigs is not None) and (frag_wigs is not None): merge_wigs = self.merge_wigs os.mkdir(merge_wigs) for wig in os.listdir(tex_wigs): if os.path.isfile(os.path.join(tex_wigs, wig)): shutil.copy(os.path.join(tex_wigs, wig), merge_wigs) for wig in os.listdir(frag_wigs): if os.path.isfile(os.path.join(frag_wigs, wig)): shutil.copy(os.path.join(frag_wigs, wig), merge_wigs) elif tex_wigs is not None: merge_wigs = tex_wigs elif frag_wigs is not None: merge_wigs = frag_wigs else: print("Error: plese assign the wiggle files!!\n") sys.exit() wig_f_file = os.path.join(merge_wigs, "whole_forward.wig") wig_r_file = os.path.join(merge_wigs, "whole_reverse.wig") for wig in os.listdir(merge_wigs): for lib in input_libs: if (wig in lib) and (lib[-1] == "+"): self.helper.merge_file(os.path.join(merge_wigs, wig), wig_f_file) elif (wig in lib) and (lib[-1] == "-"): self.helper.merge_file(os.path.join(merge_wigs, wig), wig_r_file) print("Computing expression analysis...") gene_expression(input_libs, gffs, percent_tex, percent_frag, wig_f_file, wig_r_file, features, merge_wigs, cutoff_coverage, tex_notex, replicates, self.stat, self.gff_folder, cover_type, max_color, min_color) os.remove(wig_f_file) os.remove(wig_r_file) if os.path.exists(self.merge_wigs): shutil.rmtree(self.merge_wigs)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/expression.py
expression.py
import os import csv import shutil from subprocess import call from annogesiclib.seq_editer import SeqEditer def wget(input_folder, ftp, files_type, log): log.write("\t" + " ".join(["wget", "-cP", input_folder, ftp + "/*" + files_type]) + "\n") os.system(" ".join(["wget", "-cP", input_folder, ftp + "/*" + files_type])) log.write("Done!\n") def deal_detect(input_file, file_path, change, input_folder): '''deal with the header of fasta file and put the files to corresponding folders''' if change: shutil.move(input_file, file_path) change = False SeqEditer().modify_header(file_path) with open(os.path.join(file_path)) as fh: for line in fh: line = line.strip() if line.startswith(">"): seq_name = line[1:] shutil.move(file_path, os.path.join(input_folder, seq_name + ".fa")) return change, seq_name def get_file(ftp, input_folder, files_type, log): checks = {"detect": False, "change": None} filename = None files = [] wget(input_folder, ftp, files_type, log) for file_ in os.listdir(input_folder): input_file = os.path.join(input_folder, file_) if (file_[-3:] == "fna"): filename = file_[0:-3] + "fa" checks = {"detect": True, "change": True} elif (file_[-5:] == "fasta"): filename = file_[0:-5] + "fa" checks = {"detect": True, "change": True} elif (file_[-2:] == "fa"): filename = file_[0:-2] + "fa" checks = {"detect": True, "change": True} elif (file_[-6:] == "fna.gz") and ("_genomic" in file_): if ("_cds_from_genomic" in file_) or ( "_rna_from_genomic" in file_): os.remove(input_file) else: filename = file_[0:-6] + "fa" checks = {"detect": True, "change": True} log.write("\tgunzip " + input_file + "\n") call(["gunzip", input_file]) input_file = input_file[:-3] elif (file_[-6:] == "gff.gz") or (file_[-3:] == "gff"): if ("_genomic" in file_) and (file_[-6:] == "gff.gz"): log.write("\tgunzip " + input_file + "\n") call(["gunzip", input_file]) input_file = input_file[:-3] fh = open(input_file, "r") for row in csv.reader(fh, delimiter='\t'): if not row[0].startswith("#"): gff_name = row[0] break shutil.move(input_file, os.path.join(input_folder, gff_name + ".gff")) fh.close() elif (file_[-3:] == "gbk") or (file_[-7:] == "gbff.gz") or ( file_[-4:] == "gbff"): if (file_[-7:] == "gbff.gz") and ("_genomic" in file_): log.write("\tgunzip " + input_file + "\n") call(["gunzip", input_file]) input_file = input_file[:-3] with open(input_file, "r") as g_f: for line in g_f: line = line.strip() if line.startswith("VERSION"): for data in line.split(" "): if (len(data) != 0) and (data != "VERSION"): break break print(os.path.join(input_folder, data + ".gbk")) shutil.move(input_file, os.path.join(input_folder, data + ".gbk")) if checks["detect"]: checks["detect"] = False checks["change"], seq_name = deal_detect( input_file, filename, checks["change"], input_folder)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/get_input.py
get_input.py
import os import csv from annogesiclib.gff3 import Gff3Parser import numpy as np import copy import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.style.use('ggplot') def import_uniprot_data(entry, name_list, feature): '''import uniprot to dict''' ref_name = entry.attributes[feature] if ref_name not in name_list: name_list.add(ref_name) def compare_cds_tran(gffs, trans): '''compare CDS with transcript to get the expressed CDS''' new_gffs = [] for gff in gffs: for ta in trans: if (gff.seq_id == ta.seq_id) and ( gff.strand == ta.strand): if ((gff.end < ta.end) and ( gff.end > ta.start) and ( gff.start <= ta.start)) or ( (gff.start > ta.start) and ( gff.start < ta.end) and ( gff.end >= ta.end)) or ( (gff.end >= ta.end) and ( gff.start <= ta.start)) or ( (gff.end <= ta.end) and ( gff.start >= ta.start)): new_gffs.append(gff) break return new_gffs def get_go_id(gffs, id_, uni_lines, gos): '''get the GO id of CDS''' detect = False for gff in gffs: if ("Name" in gff.attributes.keys()): if (id_ == gff.attributes["Name"]): detect = True if ("protein_id" in gff.attributes.keys()): if (id_ == gff.attributes["protein_id"]): detect = True if detect: detect = False gos.append({"strain": gff.seq_id, "strand": gff.strand, "start": gff.start, "end": gff.end, "protein_id": id_, "go": uni_lines[6]}) gff.attributes["print"] = True def print_go(gos, out): gos = sorted(gos, key=lambda x: (x["strain"], x["start"], x["end"], x["strand"])) pre_go = None for go in gos: if (go != pre_go) and (pre_go is not None): if (go["strain"] == pre_go["strain"]) and ( go["strand"] == pre_go["strand"]) and ( go["start"] == pre_go["start"]) and ( go["end"] == pre_go["end"]) and ( go["protein_id"] == pre_go["protein_id"]): go_ids = [] for go_id in go["go"].split("; "): if go_id not in go_ids: go_ids.append(go_id) for go_id in pre_go["go"].split("; "): if go_id not in go_ids: go_ids.append(go_id) pre_go["go"] = "; ".join(go_ids) out.write("\t".join([pre_go["strain"], pre_go["strand"], str(pre_go["start"]), str(pre_go["end"]), pre_go["protein_id"], pre_go["go"]]) + "\n") pre_go["print"] = True go["print"] = True else: if "print" not in pre_go.keys(): out.write("\t".join([pre_go["strain"], pre_go["strand"], str(pre_go["start"]), str(pre_go["end"]), pre_go["protein_id"], pre_go["go"]]) + "\n") pre_go["print"] = True pre_go = copy.deepcopy(go) if "print" not in pre_go.keys(): out.write("\t".join([pre_go["strain"], pre_go["strand"], str(pre_go["start"]), str(pre_go["end"]), pre_go["protein_id"], pre_go["go"]]) + "\n") def retrieve_uniprot(database_file, gff_file, out_file, tran_file, type_): '''Retrieve the GO term from Uniprot''' name_list = set() gffs = [] out = open(out_file, "w") out.write("\t".join(["Genome", "Strand", "Start", "End", "Protein_id", "Go_term"]) + "\n") for entry in Gff3Parser().entries(open(gff_file)): if entry.feature == "CDS": if ("Name" in entry.attributes.keys()) and ( "protein_id" in entry.attributes.keys()): if entry.attributes["Name"] == entry.attributes["protein_id"]: import_uniprot_data(entry, name_list, "Name") else: import_uniprot_data(entry, name_list, "Name") import_uniprot_data(entry, name_list, "protein_id") elif ("Name" in entry.attributes.keys()): import_uniprot_data(entry, name_list, "Name") elif ("protein_id" in entry.attributes.keys()): import_uniprot_data(entry, name_list, "protein_id") gffs.append(entry) if (type_ == "express") and (tran_file is not None): trans = [] for entry in Gff3Parser().entries(open(tran_file)): trans.append(entry) new_gffs = compare_cds_tran(gffs, trans) gffs = new_gffs idmapping = open(database_file, "r") gos = [] for uni_id in idmapping: uni_line = uni_id.rstrip("\n") uni_lines = uni_line.split("\t") uni_ids = uni_lines[3].split(";") for id_ in uni_ids: id_ = id_.strip() if id_ in name_list: get_go_id(gffs, id_, uni_lines, gos) for gff in gffs: if "print" not in gff.attributes.keys(): gos.append({"strain": gff.seq_id, "strand": gff.strand, "start": gff.start, "end": gff.end, "protein_id": id_, "go": ""}) print_go(gos, out) out.close() idmapping.close() def plot(total_nums, strain, filename, total, out_folder): '''plot the distribution of GO term by GOslim''' sort_total_nums = sorted(total_nums.items(), key=lambda x: (x[1]), reverse=True) classes = [] nums = [] width = 0.4 plt.figure(figsize=(16, 12)) for total_num in sort_total_nums: class_ = total_num[0] num = total_num[1] if class_ != "total": percent = (float(num) / float(total)) * 100 classes.append(class_.replace("_", " ")) nums.append(num) ind = np.arange(len(nums)) plt.bar(ind, nums, width, color='#FF9999') if filename == "three_roots": title = "Distribution of GO term hits in the three root classes" elif (filename == "molecular_function") or ( filename == "cellular_component") or ( filename == "biological_process"): tag = filename.replace("_", " ") title = " -- ".join(["Distribution of GO term of the class", tag]) plt.title(title, fontsize=22) plt.ylabel('Amount', fontsize=16) plt.xlim([0, len(nums) + 1]) plt.yticks(fontsize=16) plt.xticks(ind+width, classes, rotation=45, fontsize=16, ha='right') plt.tight_layout(3, None, None, None) plt.savefig(os.path.join(out_folder, "_".join([strain, filename + ".png"]))) def import_obo(filename): '''import the information of obo file to dict''' obos = [] start = False with open(filename, "r") as o_h: for line in o_h: line = line.strip() if line == "[Term]": obo = {} start = True elif start: if len(line) == 0: obos.append(obo.copy()) start = False else: datas = line.split(": ") if datas[0] == "is_a": if "is_a" not in obo.keys(): obo["is_a"] = [] obo["is_a"].append(datas[1].strip()) else: obo[datas[0]] = datas[1].strip() return obos def import_class(slim_obo, classes, strain): if slim_obo["name"] not in classes[strain][slim_obo["namespace"]]: classes[strain][slim_obo["namespace"]][slim_obo["name"]] = 0 classes[strain][slim_obo["namespace"]][slim_obo["name"]] += 1 def import_total(slim_obo, total_nums, strain): total_nums[strain][slim_obo["namespace"]] += 1 total_nums[strain]["total"] += 1 def print_file(classes, total_nums, out_folder, stat): out_stat = open(stat, "w") printed = True for strain, datas in classes.items(): if (strain == "All_genome") and len(classes) <= 2: printed = False if (printed) and (total_nums[strain]["total"] != 0): plot(total_nums[strain], strain, "three_roots", total_nums[strain]["total"], out_folder) out_stat.write("{0}:\n".format(strain)) for origin, types in datas.items(): plot(types, strain, origin, total_nums[strain][origin], out_folder) out_stat.write("\t{0}: {1}(percentage in total: {2})\n".format( origin, total_nums[strain][origin], float(total_nums[strain][origin]) / float(total_nums[strain]["total"]))) for type_, num in types.items(): out_stat.write("\t\t{0}: {1}(percentage " "in {2}: {3})\n".format( type_, num, origin, float(num) / float(total_nums[strain][origin]))) else: printed = True out_stat.close() def initiate_dict(classes, total_nums, index): classes[index] = {"biological_process": {}, "cellular_component": {}, "molecular_function": {}} total_nums[index] = {"biological_process": 0, "cellular_component": 0, "molecular_function": 0, "total": 0} def compare_go_slim(gos, term_obos, slim_obos, classes, total_nums): '''Compare GO term and GOslim database''' detect = False for strain, pros in gos.items(): for pro, go_ids in pros.items(): pro_list = [] for go_id in go_ids: target_terms = [go_id] for target_term in target_terms: for term_obo in term_obos: if target_term == term_obo["id"]: if "is_a" in term_obo.keys(): for is_a in term_obo["is_a"]: go_a = is_a.split(" ! ") if (go_a[1] != "biological_process") and ( go_a[1] != "cellular_component") and ( go_a[1] != "molecular_function"): target_terms.append(go_a[0]) elif ("is_obsolete" in term_obo.keys()): if term_obo["is_obsolete"] == "true": break for slim_obo in slim_obos: for target_term in target_terms: if (target_term == slim_obo["id"]) and ( target_term not in pro_list): detect = True import_class(slim_obo, classes, strain) import_class(slim_obo, classes, "All_genome") import_total(slim_obo, total_nums, strain) import_total(slim_obo, total_nums, "All_genome") pro_list.append(target_term) break if detect: detect = False break def map2goslim(slim_file, term_file, go_table, stat, out_folder): '''For mapping the GO to GOslim''' gos = {} classes = {} total_nums = {} initiate_dict(classes, total_nums, "All_genome") pre_strain = "" g_h = open(go_table, "r") print("Loading go table") for row in csv.reader(g_h, delimiter="\t"): if row[0] != "Genome": if row[0] != pre_strain: gos[row[0]] = {} initiate_dict(classes, total_nums, row[0]) go_terms = row[-1].split("; ") gos[row[0]]["\t".join(row[1:4])] = go_terms pre_strain = row[0] print("Loading obo file") term_obos = import_obo(term_file) slim_obos = import_obo(slim_file) print("Starting mapping") compare_go_slim(gos, term_obos, slim_obos, classes, total_nums) print("Doing statistics and ploting") print_file(classes, total_nums, out_folder, stat) g_h.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gene_ontology.py
gene_ontology.py
from annogesiclib.gff3 import Gff3Parser def create_dict(nums, strain, utr_detect): nums[strain] = {} if utr_detect: types = ["all", "5'UTR_derived", "3'UTR_derived", "interCDS", "intergenic", "antisense"] else: types = ["all", "intergenic", "antisense"] for type_ in types: nums[strain][type_] = {} for feature in ["TSS", "sRNA", "all", "RBS", "TSS_RBS", "TSS_sRNA_RBS", "TSS_sRNA", "RBS_sRNA"]: nums[strain][type_][feature] = 0 return nums def plus_data(nums, strain, sorf_types, features, utr_detect): for sorf_type in sorf_types: if ((not utr_detect) and ( (sorf_type == "intergenic") or ( sorf_type == "antisense") or ( sorf_type == "all"))) or ( utr_detect): for feature in features: nums[strain][sorf_type][feature] += 1 def print_num(out, num, nums, strain, type_): out.write("(for genome {0}; ".format( float(num) / float(nums[strain]["all"]["all"]))) if nums[strain][type_]["all"] == 0: out.write("for {0} - {1})\n".format( type_, 0)) else: out.write("for {0} - {1})\n".format( type_, float(num) / float(nums[strain][type_]["all"]))) def print_stat(nums, nums_best, strain, out, utr_detect): out.write(strain + ":\n") if utr_detect: out.write("\ttotal sORF in this genome are {0}\n".format( nums[strain]["all"]["all"])) for type_, features in nums[strain].items(): out.write("\ttotal sORF of {0} sORF candidates are {1}".format( type_, nums[strain][type_]["all"])) out.write("(for this genome - {0})\n".format( float(nums[strain][type_]["all"]) / float(nums[strain]["all"]["all"]))) for feature, num in features.items(): if feature == "TSS": out.write("\t\ttotal sORF which start " "from TSS are {0}".format(num)) print_num(out, num, nums, strain, type_) elif feature == "sRNA": out.write("\t\ttotal sORF without overlap with " "sRNA candidates are {0}".format(num)) print_num(out, num, nums, strain, type_) elif feature == "RBS": out.write("\t\ttotal sORF which related with " "ribosomal binding site are {0}".format(num)) print_num(out, num, nums, strain, type_) elif feature == "TSS_RBS": out.write("\t\ttotal sORF which start from TSS and related " "with ribosomal binding site are {0}".format(num)) print_num(out, num, nums, strain, type_) elif feature == "TSS_sRNA": out.write("\t\ttotal sORF which start from TSS and without " "overlap with sRNA candidates are {0}".format(num)) print_num(out, num, nums, strain, type_) elif feature == "RBS_sRNA": out.write("\t\ttotal sORF which related with " "ribosomal binding site and ") out.write("without overlap with " "sRNA candidates are {0}".format(num)) print_num(out, num, nums, strain, type_) elif feature == "TSS_RBS_sRNA": out.write("\t\ttotal sORF which start from TSS and " "related with ribosomal binding site and ") out.write("without overlap with " "sRNA candidates are {0}".format(num)) print_num(out, num, nums, strain, type_) if strain in nums_best.keys(): out.write("\t\tThe best sORF are {0}\n".format( nums_best[strain][type_]["all"])) out.write("\t\tThe best sORF which without overlap with " "sRNA are {0}".format(nums_best[strain][type_]["sRNA"])) print_num(out, nums_best[strain][type_]["sRNA"], nums_best, strain, type_) else: out.write("\t\tThe best sORF are 0\n") out.write("\t\tThe best sORF which without overlap with " "sRNA are 0\n") def read_file(sorf_gff): sorfs = [] fh = open(sorf_gff) for entry in Gff3Parser().entries(fh): sorfs.append(entry) sorfs = sorted(sorfs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh.close() return sorfs def get_stat_num(sorfs_all, utr_detect): strain = "" nums = {} create_dict(nums, "total", utr_detect) for sorf in sorfs_all: if strain != sorf.seq_id: create_dict(nums, sorf.seq_id, utr_detect) strain = sorf.seq_id if sorf.attributes["sORF_type"] == "intergenic": sorf_type = "intergenic" elif sorf.attributes["sORF_type"] == "antisense": sorf_type = "antisense" else: if "5utr" in sorf.attributes["sORF_type"]: sorf_type = "5'UTR_derived" elif "3utr" in sorf.attributes["sORF_type"]: sorf_type = "3'UTR_derived" elif "interCDS" in sorf.attributes["sORF_type"]: sorf_type = "interCDS" check_class(sorf, nums, sorf_type, utr_detect, strain) return nums def check_class(sorf, nums, sorf_type, utr_detect, strain): if (sorf.attributes["with_TSS"] != "NA") and \ (sorf.attributes["sRNA"] == "NA") and \ (sorf.attributes["rbs"] != "NA"): plus_data(nums, "total", [sorf_type, "all"], ["all", "TSS", "sRNA", "RBS", "TSS_RBS", "TSS_sRNA_RBS", "TSS_sRNA", "RBS_sRNA"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "TSS", "sRNA", "RBS", "TSS_RBS", "TSS_sRNA_RBS", "TSS_sRNA", "RBS_sRNA"], utr_detect) elif (sorf.attributes["with_TSS"] != "NA") and \ (sorf.attributes["sRNA"] == "NA"): plus_data(nums, "total", [sorf_type, "all"], ["all", "TSS", "sRNA", "TSS_sRNA"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "TSS", "sRNA", "TSS_sRNA"], utr_detect) elif (sorf.attributes["with_TSS"] != "NA") and \ (sorf.attributes["rbs"] != "NA"): plus_data(nums, "total", [sorf_type, "all"], ["all", "TSS", "RBS", "TSS_RBS"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "TSS", "RBS", "TSS_RBS"], utr_detect) elif (sorf.attributes["rbs"] != "NA") and \ (sorf.attributes["sRNA"] == "NA"): plus_data(nums, "total", [sorf_type, "all"], ["all", "RBS", "sRNA", "RBS_sRNA"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "RBS", "sRNA", "RBS_sRNA"], utr_detect) elif sorf.attributes["with_TSS"] != "NA": plus_data(nums, "total", [sorf_type, "all"], ["all", "TSS"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "TSS"], utr_detect) elif sorf.attributes["sRNA"] == "NA": plus_data(nums, "total", [sorf_type, "all"], ["all", "sRNA"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "sRNA"], utr_detect) elif sorf.attributes["rbs"] != "NA": plus_data(nums, "total", [sorf_type, "all"], ["all", "RBS"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all", "RBS"], utr_detect) else: plus_data(nums, "total", [sorf_type, "all"], ["all"], utr_detect) plus_data(nums, strain, [sorf_type, "all"], ["all"], utr_detect) def stat(sorf_all, sorf_best, stat_file, utr_detect): sorfs_all = read_file(sorf_all) sorfs_best = read_file(sorf_best) nums = get_stat_num(sorfs_all, utr_detect) nums_best = get_stat_num(sorfs_best, utr_detect) out = open(stat_file, "w") out.write("The filtering condition for the best sORF: \n") out.write("1. If TSS file exists, it will select the " "sORF which start from TSS.\n") out.write("2. If TSS file exists, it will select the " "sORF which have a ribosomal binding site ") out.write("and the ribosomal binding site shoule after a TSS.\n") out.write("3. If sRNA file exists and you want to " "exclude sORF which overlap with sRNA, ") out.write("it will select sORF which have non-overlap with sRNA.\n\n") if len(nums) <= 2: for strain in nums.keys(): if strain != "total": print_stat(nums, nums_best, strain, out, utr_detect) else: for strain in nums.keys(): print_stat(nums, nums_best, strain, out, utr_detect) out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/stat_sorf.py
stat_sorf.py
import os import sys import shutil from subprocess import call from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.TSS_upstream import upstream, del_repeat_fasta from annogesiclib.gen_promoter_table import gen_promoter_table class MEME(object): '''detection of promoter''' def __init__(self, args_pro): self.multiparser = Multiparser() self.helper = Helper() self.tss_path = os.path.join(args_pro.tsss, "tmp") if args_pro.gffs is not None: self.gff_path = os.path.join(args_pro.gffs, "tmp") else: self.gff_path = None self.out_fasta = os.path.join(args_pro.output_folder, "fasta_classes") self.tmp_folder = os.path.join(os.getcwd(), "tmp") self.fastas = {"pri": os.path.join(self.tmp_folder, "primary.fa"), "sec": os.path.join(self.tmp_folder, "secondary.fa"), "inter": os.path.join(self.tmp_folder, "internal.fa"), "anti": os.path.join(self.tmp_folder, "antisense.fa"), "orph": os.path.join(self.tmp_folder, "orphan.fa"), "all_no_orph": "without_orphan.fa", "all": "all_type.fa", "tmp_fa": os.path.join(self.tmp_folder, "tmp.fa"), "tmp_all": os.path.join(self.tmp_folder, "tmp_all.fa")} self.all_fasta = os.path.join(args_pro.fastas, "allfasta.fa") self.all_tss = os.path.join(self.tss_path, "allfasta_TSS.gff") def _gen_and_check_folder(self, out_path, folder, type_): sub_out_folder = os.path.join(out_path, type_) if folder in os.listdir(sub_out_folder): shutil.rmtree(os.path.join(sub_out_folder, folder)) return sub_out_folder def _run_normal_motif(self, input_path, out_path, filename, fasta, width, args_pro, log): '''run MEME with specific width''' folder = "_".join(["promoter_motifs", filename, str(width), "nt"]) if (args_pro.program.lower() == "meme") or ( args_pro.program.lower() == "both"): meme_folder = self._gen_and_check_folder( out_path, folder, "MEME") command = [args_pro.meme_path, "-maxsize", "1000000", "-dna", "-nmotifs", str(args_pro.num_motif), "-w", str(width), "-maxiter", "100", "-evt", str(args_pro.e_value)] if args_pro.para is not None: command = command + ["-p", args_pro.para] log.write(" ".join(command + ["-oc", os.path.join( meme_folder, folder), os.path.join(input_path, fasta)]) + "\n") call(command + ["-oc", os.path.join(meme_folder, folder), os.path.join(input_path, fasta)]) if (args_pro.program.lower() == "glam2") or ( args_pro.program.lower() == "both"): glam_folder = self._gen_and_check_folder( out_path, folder, "GLAM2") log.write(" ".join([args_pro.glam2_path, "-O", os.path.join(glam_folder, folder), "-w", str(width), "-b", str(width), "-r", str(args_pro.num_motif), "-n", str(args_pro.end_run), "n", os.path.join(input_path, fasta)]) + "\n") call([args_pro.glam2_path, "-O", os.path.join(glam_folder, folder), "-w", str(width), "-b", str(width), "-r", str(args_pro.num_motif), "-n", str(args_pro.end_run), "n", os.path.join(input_path, fasta)]) def _run_small_motif(self, input_path, out_path, filename, fasta, width, args_pro, log): '''run MEME with range of width''' data = width.split("-") min_width = data[0] max_width = data[1] folder = "_".join(["promoter_motifs", filename, "-".join([str(min_width), str(max_width)]), "nt"]) if (args_pro.program.lower() == "meme") or ( args_pro.program.lower() == "both"): meme_folder = self._gen_and_check_folder( out_path, folder, "MEME") command = [args_pro.meme_path, "-maxsize", "1000000", "-dna", "-nmotifs", str(args_pro.num_motif), "-minsites", "0", "-maxsites", "2", "-minw", str(min_width), "-maxw", str(max_width), "-maxiter", "100", "-evt", str(args_pro.e_value)] if args_pro.para is not None: command = command + ["-p", args_pro.para] log.write(" ".join(command + ["-oc", os.path.join( meme_folder, folder), os.path.join(input_path, fasta)]) + "\n") call(command + ["-oc", os.path.join(meme_folder, folder), os.path.join(input_path, fasta)]) if (args_pro.program.lower() == "glam2") or ( args_pro.program.lower() == "both"): glam_folder = self._gen_and_check_folder( out_path, folder, "GLAM2") log.write(" ".join([args_pro.glam2_path, "-O", os.path.join(glam_folder, folder), "-a", str(min_width), "-b", str(max_width), "-r", str(args_pro.num_motif), "-n", str(args_pro.end_run), "n", os.path.join(input_path, fasta)]) + "\n") call([args_pro.glam2_path, "-O", os.path.join(glam_folder, folder), "-a", str(min_width), "-b", str(max_width), "-r", str(args_pro.num_motif), "-n", str(args_pro.end_run), "n", os.path.join(input_path, fasta)]) def _get_fasta_file(self, fasta_path, prefix): for fasta in os.listdir(fasta_path): if (fasta.endswith(".fa")) and \ (prefix == fasta.replace(".fa", "")): break elif (fasta.endswith(".fna")) and \ (prefix == fasta.replace(".fna", "")): break elif (fasta.endswith(".fasta")) and \ (prefix == fasta.replace(".fasta", "")): break return fasta def _check_gff(self, gffs): for gff in os.listdir(gffs): if gff.endswith(".gff"): self.helper.check_uni_attributes(os.path.join(gffs, gff)) def _move_and_merge_fasta(self, input_path, prefix): all_type = os.path.join(self.tmp_folder, self.fastas["all"]) all_no_orph = os.path.join(self.tmp_folder, self.fastas["all_no_orph"]) if self.fastas["all"] in os.listdir(self.tmp_folder): os.remove(all_type) if self.fastas["all_no_orph"] in os.listdir(self.tmp_folder): os.remove(all_no_orph) shutil.copyfile(self.fastas["pri"], self.fastas["tmp_fa"]) self.helper.merge_file(self.fastas["sec"], self.fastas["tmp_fa"]) self.helper.merge_file(self.fastas["inter"], self.fastas["tmp_fa"]) self.helper.merge_file(self.fastas["anti"], self.fastas["tmp_fa"]) shutil.copyfile(self.fastas["tmp_fa"], self.fastas["tmp_all"]) self.helper.merge_file(self.fastas["orph"], self.fastas["tmp_all"]) del_repeat_fasta(self.fastas["tmp_fa"], all_no_orph) del_repeat_fasta(self.fastas["tmp_all"], all_type) os.remove(self.fastas["tmp_fa"]) os.remove(self.fastas["tmp_all"]) out_prefix = os.path.join(input_path, prefix) shutil.move(self.fastas["pri"], "_".join([ out_prefix, "allgenome_primary.fa"])) shutil.move(self.fastas["sec"], "_".join([ out_prefix, "allgenome_secondary.fa"])) shutil.move(self.fastas["inter"], "_".join([ out_prefix, "allgenome_internal.fa"])) shutil.move(self.fastas["anti"], "_".join([ out_prefix, "allgenome_antisense.fa"])) shutil.move(self.fastas["orph"], "_".join([ out_prefix, "allgenome_orphan.fa"])) shutil.move(all_type, "_".join([ out_prefix, "allgenome_all_types.fa"])) shutil.move(all_no_orph, "_".join([ out_prefix, "allgenome_without_orphan.fa"])) def _split_fasta_by_strain(self, input_path): for fasta in os.listdir(input_path): if "allgenome" not in fasta: os.remove(os.path.join(input_path, fasta)) out = None for fasta in os.listdir(input_path): if fasta.endswith(".fa"): pre_strain = "" num_strain = 0 with open(os.path.join(input_path, fasta), "r") as f_h: for line in f_h: line = line.strip() if line.startswith(">"): datas = line.split("_") strain = "_".join(datas[2:]) if (pre_strain != strain): num_strain += 1 filename = fasta.split("allgenome") if out is not None: out.close() out = open(os.path.join( input_path, "".join([ filename[0], strain, filename[-1]])), "a") pre_strain = strain out.write(line + "\n") else: out.write(line + "\n") if num_strain == 1: os.remove(os.path.join(input_path, "".join([filename[0], strain, filename[-1]]))) out.close() def _run_program(self, prefixs, args_pro, log, input_fastas): log.write("Using MEME or GLAM2 to predict promoter.\n") log.write("Please make sure their versions are at least 4.11.1.\n") log.write("If you are running for parallel, please make sure you " "have install MPICH and its version is at least 3.2.\n") for prefix in prefixs: input_path = os.path.join(self.out_fasta, prefix) out_path = os.path.join(args_pro.output_folder, prefix) if args_pro.program.lower() == "both": self.helper.check_make_folder(os.path.join(out_path, "MEME")) self.helper.check_make_folder(os.path.join(out_path, "GLAM2")) elif args_pro.program.lower() == "meme": self.helper.check_make_folder(os.path.join(out_path, "MEME")) elif args_pro.program.lower() == "glam2": self.helper.check_make_folder(os.path.join(out_path, "GLAM2")) for fasta in os.listdir(input_path): filename = fasta.replace(".fa", "") names = filename.split("_") if (names[-1] in input_fastas) or ( ("_".join(names[-2:]) == "all_types") and ( "all_types" in input_fastas)) or ( ("_".join(names[-2:]) == "without_orphan") and ( "without_orphan" in input_fastas)): for width in args_pro.widths: print("Computing promoters of {0} - {1}".format( fasta, width)) log.write("Computing promoters of {0} - length {1}.\n".format( fasta, width)) if "-" in width: self._run_small_motif(input_path, out_path, filename, fasta, width, args_pro, log) else: self._run_normal_motif(input_path, out_path, filename, fasta, width, args_pro, log) log.write("Promoter search for {0} is done.\n".format(prefix)) log.write("All the output files from MEME or GLAM2 are generated " "and stored in {0}.\n".format(out_path)) def _combine_file(self, prefixs, args_pro): '''combine all TSS file in the input folder to generate the global TSS for detecting the global promoter''' if args_pro.source: for tss in os.listdir(self.tss_path): if tss.endswith("_TSS.gff"): self.helper.merge_file(os.path.join( self.tss_path, tss), self.all_tss) for fasta in os.listdir(args_pro.fastas): if (fasta.endswith(".fa")) or ( fasta.endswith(".fna")) or ( fasta.endswith(".fasta")): self.helper.merge_file(os.path.join( args_pro.fastas, fasta), self.all_fasta) else: for tss in os.listdir(os.path.join( args_pro.output_folder, "TSS_classes")): if tss.endswith("_TSS.gff"): self.helper.merge_file(os.path.join( self.tss_path, tss), self.all_tss) for fasta in os.listdir(args_pro.fastas): if (fasta.endswith(".fa")) or ( fasta.endswith(".fna")) or ( fasta.endswith(".fasta")): self.helper.merge_file(os.path.join( args_pro.fastas, fasta), self.all_fasta) print("Generating fasta file of all sequences") prefixs.append("allfasta") input_path = os.path.join(self.out_fasta, "allfasta") self.helper.check_make_folder(os.path.join( args_pro.output_folder, "allfasta")) self.helper.check_make_folder(os.path.join( self.out_fasta, "allfasta")) args_pro.source = True upstream(self.all_tss, self.all_fasta, None, None, args_pro, None) self._move_and_merge_fasta(input_path, "allfasta") def _remove_files(self, args_pro): self.helper.remove_tmp_dir(args_pro.fastas) self.helper.remove_tmp_dir(args_pro.tsss) self.helper.remove_tmp_dir(args_pro.gffs) if "tmp_wig" in os.listdir(args_pro.output_folder): shutil.rmtree(os.path.join(args_pro.output_folder, "tmp_wig")) if "allfasta" in os.listdir(os.getcwd()): shutil.rmtree("allfasta") if "tmp" in os.listdir(os.getcwd()): shutil.rmtree("tmp") def _gen_table(self, output_folder, prefixs, combine, program, log): '''generate the promoter table''' log.write("Running gen_promoter_table.py to generate promoter " "table which is useful for sRNA prediction.\n") log.write("The following files are generated:\n") if combine: strains = prefixs + ["allfasta"] else: strains = prefixs for strain in strains: tss_file = os.path.join(self.tss_path, strain + "_TSS.gff") if (program.lower() == "both") or ( program.lower() == "meme"): for folder in os.listdir(os.path.join(output_folder, strain, "MEME")): csv_file = os.path.join(output_folder, strain, "MEME", folder, "meme.csv") gen_promoter_table(os.path.join(output_folder, strain, "MEME", folder, "meme.txt"), csv_file, tss_file, "meme") log.write("\t" + csv_file + "\n") if (program.lower() == "both") or ( program.lower() == "glam2"): for folder in os.listdir(os.path.join(output_folder, strain, "GLAM2")): csv_file = os.path.join(output_folder, strain, "GLAM2", folder, "glam2.csv") gen_promoter_table(os.path.join(output_folder, strain, "GLAM2", folder, "glam2.txt"), csv_file, tss_file, "glam2") log.write("\t" + csv_file + "\n") def _get_upstream(self, args_pro, prefix, tss, fasta): '''get upstream sequence of TSS''' if args_pro.source: print("Generating fasta file of {0}".format(prefix)) upstream(os.path.join(self.tss_path, tss), os.path.join(args_pro.fastas, fasta), None, None, args_pro, prefix) else: if (args_pro.gffs is None): print("Error: Please assign proper annotation!!!") sys.exit() if "TSS_classes" not in os.listdir(args_pro.output_folder): os.mkdir(os.path.join(args_pro.output_folder, "TSS_classes")) print("Classifying TSSs and extracting sequence of {0}".format(prefix)) upstream(os.path.join(self.tss_path, tss), os.path.join(args_pro.fastas, fasta), os.path.join(self.gff_path, prefix + ".gff"), os.path.join(args_pro.output_folder, "TSS_classes", "_".join([prefix, "TSS.gff"])), args_pro, prefix) def _get_used_tss_type(self, args_pro): input_fastas = [] for tss in args_pro.use_tss: if int(tss) == 1: input_fastas.append("all_types") elif int(tss) == 2: input_fastas.append("primary") elif int(tss) == 3: input_fastas.append("secondary") elif int(tss) == 4: input_fastas.append("internal") elif int(tss) == 5: input_fastas.append("antisense") elif int(tss) == 6: input_fastas.append("orphan") elif int(tss) == 7: input_fastas.append("without_orphan") else: print("Error: The assignment of --use_tss_typ is wrong!") sys.exit() return input_fastas def run_meme(self, args_pro, log): if "allfasta.fa" in os.listdir(args_pro.fastas): os.remove(self.all_fasta) if "allfasta.fa_folder" in os.listdir(args_pro.fastas): shutil.rmtree(os.path.join(args_pro.fastas, "allfasta.fa_folder")) self.multiparser.parser_fasta(args_pro.fastas) self.multiparser.parser_gff(args_pro.tsss, "TSS") if "allfasta_TSS.gff" in os.listdir(self.tss_path): os.remove(self.all_tss) if args_pro.gffs is not None: self._check_gff(args_pro.gffs) self.multiparser.parser_gff(args_pro.gffs, None) self.multiparser.combine_gff(args_pro.fastas, self.gff_path, "fasta", None) self._check_gff(args_pro.tsss) self.multiparser.combine_gff(args_pro.fastas, self.tss_path, "fasta", "TSS") self.helper.check_make_folder(self.out_fasta) self.helper.check_make_folder(self.tmp_folder) prefixs = [] log.write("Running .TSS_upstream.py to extract the upstream " "sequences of TSSs.\n") log.write("The following files are generated:\n") for tss in os.listdir(self.tss_path): prefix = tss.replace("_TSS.gff", "") prefixs.append(prefix) self.helper.check_make_folder(os.path.join(args_pro.output_folder, prefix)) self.helper.check_make_folder(os.path.join(self.out_fasta, prefix)) input_path = os.path.join(self.out_fasta, prefix) fasta = self._get_fasta_file(args_pro.fastas, prefix) self._get_upstream(args_pro, prefix, tss, fasta) self._move_and_merge_fasta(input_path, prefix) self._split_fasta_by_strain(input_path) for file_ in os.listdir(input_path): log.write("\t" + os.path.join(input_path, file_) + "\n") if args_pro.combine: self._combine_file(prefixs, args_pro) for file_ in os.listdir(os.path.join(self.out_fasta, "allfasta")): log.write("\t" + os.path.join( self.out_fasta, "allfasta", file_) + "\n") input_fastas = self._get_used_tss_type(args_pro) self._run_program(prefixs, args_pro, log, input_fastas) print("Generating the tables") self._gen_table(args_pro.output_folder, prefixs, args_pro.combine, args_pro.program, log) self._remove_files(args_pro)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/meme.py
meme.py
import math import shutil import matplotlib as mpl from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper import numpy as np mpl.use('Agg') import matplotlib.pyplot as plt plt.style.use('ggplot') def plot(utr, utr_pri, utr_sec, filename, source, utr_type, base_5utr): '''plot the distribution of length of UTRs''' bin_num = np.arange(0, 300, 5) if utr_type == "5utr": if source and (base_5utr != "transcript"): n, bins, hist1 = plt.hist(utr, bin_num, color="#FF9999", label='secondary', edgecolor='black', linewidth=1) n, bins, hist2 = plt.hist(utr_pri, bin_num, color="#9999FF", label='primary', edgecolor='black', linewidth=1) plt.legend((hist2[0], hist1[0]), ("Primary TSSs", "Secondary TSSs")) else: n, bins, hist1 = plt.hist(utr, bin_num, color="#FF9999") plt.xlabel("5'UTR_length") elif utr_type == "3utr": n, bins, hist = plt.hist(utr, bin_num, color="#9999FF", label='3\'UTR', edgecolor='black', linewidth=1) plt.legend([hist[0]], ["3'UTR"]) plt.xlabel("3'UTR_length") plt.ylabel("Amount") plt.savefig(filename) plt.clf() def get_feature(cds): if "protein_id" in cds.attributes.keys(): cds_name = cds.attributes["protein_id"] elif "locus_tag" in cds.attributes.keys(): cds_name = cds.attributes["locus_tag"] else: strand = Helper().get_strand_name(cds.strand) cds_name = "".join([cds.feature, ":", str(cds.start), "-", str(cds.end), "_", strand]) return cds_name def check_ta(tas, utr_start, utr_end, seq_id, strand): '''chekc transcript to verify the UTR''' detect = False for ta in tas: if (ta.seq_id == seq_id) and \ (ta.strand == strand): if (ta.start <= utr_start) and \ (ta.end >= utr_end): detect = True break return detect, ta def import_utr(tss, utr_strain, utr_all, start, end, tas, length, args_utr): if args_utr.source: if "Primary" in tss.attributes["type"]: if args_utr.base_5utr.lower() == "both": detect, ta = check_ta(tas, start, end, tss.seq_id, tss.strand) elif args_utr.base_5utr.lower() == "tss": detect = True if detect: utr_strain["pri"][tss.seq_id].append(length) utr_all["pri"].append(length) elif "Secondary" in tss.attributes["type"]: if args_utr.base_5utr.lower() == "both": detect, ta = check_ta(tas, start, end, tss.seq_id, tss.strand) elif args_utr.base_5utr.lower() == "tss": detect = True if detect: utr_strain["sec"][tss.seq_id].append(length) utr_all["sec"].append(length) else: if args_utr.base_5utr.lower() == "both": detect, ta = check_ta(tas, start, end, tss.seq_id, tss.strand) elif args_utr.base_5utr.lower() == "tss": detect = True if detect: utr_strain["all"][tss.seq_id].append(length) utr_all["all"].append(length) if args_utr.base_5utr.lower() == "tss": ta = None return detect, ta def get_print_string_5utr(num_utr, name_utr, length, tss, cds_name, locus_tag, ta, source, out, start, end, utrs_tss): if "Name" not in tss.attributes.keys(): tss.attributes["Name"] = (tss.feature + ":" + str(tss.start) + "-" + str(tss.end) + "_" + tss.strand) attribute_string = ";".join( ["=".join(items) for items in [ ("ID", "_".join([tss.seq_id, "utr5", str(num_utr)])), ("Name", "_".join(["5'UTR", name_utr])), ("length", str(length)), ("associated_cds", cds_name), ("associated_gene", locus_tag), ("associated_tss", tss.attributes["Name"])]]) if source: attribute_string = ";".join([ attribute_string, "=".join(["tss_type", tss.attributes["type"]])]) if ta is not None: if "ID" in ta.attributes.keys(): attribute_string = ";".join([attribute_string, "Parent=" + ta.attributes["ID"]]) else: attribute_string = ";".join([ attribute_string, "=".join(["Parent", "Transcript:" + str(ta.start) + "-" + str(ta.end) + "_" + ta.strand])]) out.write("{0}\tANNOgesic\t5UTR\t{1}\t{2}\t.\t{3}\t.\t{4}\n".format( tss.seq_id, start, end, tss.strand, attribute_string)) utrs_tss.append({"strain": tss.seq_id, "start": start, "end": end, "strand": tss.strand}) def get_5utr(tss, near_cds, utr_strain, utr_all, tas, num_utr, num, num_tss, cds_name, locus_tag, out, args_utr, utrs_tss, check_cdss, pres): '''print and import the 5UTR information''' detect = False if tss.strand == "+": start = tss.start end = near_cds.start - 1 length = end - start + 1 if str(near_cds.start) + "+" not in check_cdss: detect, ta = import_utr(tss, utr_strain, utr_all, start, end, tas, length, args_utr) check_cdss.append(str(near_cds.start) + "+") else: start = near_cds.end + 1 end = tss.end length = end - start + 1 if (str(near_cds.end) + "-" not in check_cdss) or [num == num_tss]: check_cdss.append(str(near_cds.end) + "-") if pres["tss"] is not None: detect, ta = import_utr(pres["tss"], utr_strain, utr_all, pres["start"], pres["end"], tas, pres["len"], args_utr) pres["start"] = start pres["end"] = end pres["len"] = length pres["tss"] = tss if detect: name_utr = '%0*d' % (5, num_utr) if length >= 0: get_print_string_5utr(num_utr, name_utr, length, tss, cds_name, locus_tag, ta, args_utr.source, out, start, end, utrs_tss) num_utr += 1 return num_utr def detect_cds(cdss, gene): '''get the information of CDS''' detect = False for cds in cdss: if "Parent" in cds.attributes.keys(): if gene.attributes["ID"] in cds.attributes["Parent"].split(","): detect = True near_cds = cds check_utr = True cds_name = get_feature(cds) if not detect: if "locus_tag" in cds.attributes.keys(): if gene.attributes["locus_tag"] == cds.attributes["locus_tag"]: near_cds = cds cds_name = cds.attributes["locus_tag"] check_utr = True detect = True elif (gene.seq_id == cds.seq_id) and \ (gene.strand == cds.strand): if (gene.start >= cds.start) and \ (gene.end <= cds.end): near_cds = cds cds_name = get_feature(cds) check_utr = True detect = True if not detect: return None, None, False else: return near_cds, cds_name, check_utr def read_file(tss_file, gff_file, ta_file, term_file): genes = [] cdss = [] terms = [] tsss = [] tas = [] source = False gff_f = open(gff_file, "r") for entry in Gff3Parser().entries(gff_f): if (entry.feature == "CDS"): cdss.append(entry) elif (entry.feature == "gene"): genes.append(entry) if ta_file is not None: for entry in Gff3Parser().entries(open(ta_file)): tas.append(entry) tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) if term_file is not None: for entry_term in Gff3Parser().entries(open(term_file)): terms.append(entry_term) terms = sorted(terms, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) if tss_file is not None: for entry in Gff3Parser().entries(open(tss_file)): if "type" in entry.attributes.keys(): if (entry.attributes["type"] != "Orphan"): source = True tsss.append(entry) tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return genes, cdss, terms, tsss, tas, source def check_associated_TSSpredator(genes, tss, cdss, check_utr, cds_name, locus): '''get the associated TSS which is generated from TSSpredator''' near_cds = None for gene in genes: if (tss.seq_id == gene.seq_id) and ( tss.strand == gene.strand): if "locus_tag" in gene.attributes.keys(): if gene.attributes["locus_tag"] == locus: for cds in cdss: if "Parent" in cds.attributes.keys(): if (gene.attributes["ID"] in cds.attributes["Parent"].split(",")): near_cds = cds check_utr = True cds_name = get_feature(cds) if not check_utr: if "locus_tag" in cds.attributes.keys(): if (gene.attributes["locus_tag"] == cds.attributes["locus_tag"]): near_cds = cds cds_name = cds.attributes["locus_tag"] check_utr = True elif (gene.seq_id == cds.seq_id) and ( gene.strand == cds.strand): if (gene.start >= cds.start) and ( gene.end <= cds.end): near_cds = cds cds_name = get_feature(cds) check_utr = True return check_utr, cds_name, near_cds def get_5utr_from_TSSpredator(tss, genes, cdss): '''It is for TSS file which is generated from ANNOgesic''' check_utr = False cds_name = "NA" if ("Primary" in tss.attributes["type"]) or ( "Secondary" in tss.attributes["type"]): ass_gene = tss.attributes["associated_gene"].split(",") tss_type = tss.attributes["type"].split(",") for index in range(len(tss_type)): if (tss_type[index] == "Primary") or ( tss_type[index] == "Secondary"): locus_tag = ass_gene[index] check_utr, cds_name, near_cds = check_associated_TSSpredator( genes, tss, cdss, check_utr, cds_name, locus_tag) if not check_utr: for cds in cdss: if (cds.seq_id == tss.seq_id) and ( cds.strand == tss.strand): strand = Helper().get_strand_name(cds.strand) if locus_tag == (cds.feature + ":" + str(cds.start) + "-" + str(cds.end) + "_" + strand): near_cds = cds cds_name = get_feature(cds) check_utr = True if check_utr: utr_datas = {"check": check_utr, "cds_name": cds_name, "near_cds": near_cds, "locus": locus_tag} else: utr_datas = {"check": False, "cds_name": None, "near_cds": None, "locus": None} else: utr_datas = {"check": False, "cds_name": None, "near_cds": None, "locus": None} return utr_datas def detect_feature_5utr(feas, tss, cdss, length, check_cds): locus_tag = None near_cds = None cds_name = None check_utr = False for fea in feas: if (tss.seq_id == fea.seq_id) and ( tss.strand == fea.strand): if (tss.strand == "+") and ( (fea.start - tss.start) <= length) and ( tss.start <= fea.start): if "locus_tag" in fea.attributes.keys(): locus_tag = fea.attributes["locus_tag"] elif "ID" in fea.attributes.keys(): locus_tag = fea.attributes["ID"] else: locus_tag = (fea.feature + ":" + str(fea.start) + "-" + str(fea.end) + "_" + fea.strand) if check_cds: near_cds, cds_name, check_utr = detect_cds(cdss, fea) else: near_cds = fea cds_name = get_feature(fea) check_utr = True break elif (tss.strand == "-") and ( (tss.start - fea.end) <= length) and ( tss.start >= fea.end): if "locus_tag" in fea.attributes.keys(): locus_tag = fea.attributes["locus_tag"] elif "ID" in fea.attributes.keys(): locus_tag = fea.attributes["ID"] else: locus_tag = (fea.feature + ":" + str(fea.start) + "-" + str(fea.end) + "_" + fea.strand) if check_cds: near_cds, cds_name, check_utr = detect_cds(cdss, fea) else: near_cds = fea cds_name = get_feature(fea) check_utr = True break return near_cds, cds_name, check_utr, locus_tag def get_5utr_from_other(tss, genes, cdss, length): '''It is for TSS file which is not generated from ANNOgesic''' check_utr = False cds_name = "NA" if len(genes) != 0: near_cds, cds_name, check_utr, locus_tag = detect_feature_5utr( genes, tss, cdss, length, True) else: near_cds, cds_name, check_utr, locus_tag = detect_feature_5utr( cdss, tss, cdss, length, False) if check_utr: utr_datas = {"check": check_utr, "cds_name": cds_name, "near_cds": near_cds, "locus": locus_tag} else: utr_datas = {"check": False, "cds_name": None, "near_cds": None, "locus": None} return utr_datas def get_attribute_string(num_utr, length, cds, gene_name, ta, id_name, name, feature, feature_name): name_utr = '%0*d' % (5, num_utr) cds_name = get_feature(cds) attribute_string = ";".join( ["=".join(items) for items in [ ("ID", "_".join([ta.seq_id, id_name, str(num_utr)])), ("Name", "_".join([name, name_utr])), ("length", str(length)), ("associated_cds", cds_name), ("associated_gene", gene_name)]]) if feature == "Parent": if "ID" in ta.attributes.keys(): attribute_string = ";".join([attribute_string, "Parent=" + ta.attributes["ID"]]) else: attribute_string = ";".join( [attribute_string, "Parent=" + (feature, feature_name + str(ta.start) + "-" + str(ta.end) + "_" + ta.strand)]) elif feature == "associated_term": attribute_string = ";".join( [attribute_string, "associated_term=" + (feature_name + str(ta.start) + "-" + str(ta.end) + "_" + ta.strand)]) return attribute_string def get_gene_name(genes, cds): gene_name = "NA" for gene in genes: if (cds.seq_id == gene.seq_id) and ( cds.strand == gene.strand): if ("Parent" in cds.attributes.keys()) and ( "ID" in gene.attributes.keys()): if gene.attributes["ID"] in cds.attributes["Parent"].split(","): if "locus_tag" in gene.attributes.keys(): gene_name = gene.attributes["locus_tag"] else: gene_name = "Gene:" + str(gene.start) + "-" + \ str(gene.end) + "_" + gene.strand break if ((cds.start >= gene.start) and ( cds.end <= gene.end)): if "locus_tag" in gene.attributes.keys(): gene_name = gene.attributes["locus_tag"] else: gene_name = ("Gene:" + str(gene.start) + "-" + str(gene.end) + "_" + gene.strand) break return gene_name def set_utr_strain(ta, type_, utr_strain): if ta.seq_id not in utr_strain[type_].keys(): utr_strain[type_][ta.seq_id] = [] def check_repeat(start, end, strain, strand, utrs_tss): for utr in utrs_tss: if (utr["strain"] == strain) and ( utr["strand"] == strand): if ((utr["start"] >= start) and ( utr["end"] <= end)) or ( (utr["start"] <= start) and ( utr["end"] >= end)) or ( (utr["start"] >= start) and ( utr["start"] <= end) and ( utr["end"] >= end)) or ( (utr["start"] <= start) and ( utr["end"] >= start) and ( utr["end"] <= end)): return True return False def compare_ta(tas, genes, cdss, utr_strain, utr_all, out, args_utr, utrs_tss, num_utr): '''Comparing CDS and trancript to find the 5UTR''' for ta in tas: detect = False set_utr_strain(ta, "all", utr_strain) set_utr_strain(ta, "pri", utr_strain) set_utr_strain(ta, "sec", utr_strain) for cds in cdss: if (ta.seq_id == cds.seq_id) and ( ta.strand == cds.strand): if ta.strand == "+": if ((cds.start - ta.start) <= args_utr.length) and ( (cds.start - ta.start) >= 0): if ((ta.start <= cds.start) and ( ta.end >= cds.end)) or ( (ta.start <= cds.start) and ( ta.end <= cds.end) and ( ta.end >= cds.start)): if (not check_repeat(ta.start, cds.start, ta.seq_id, ta.strand, utrs_tss)): length = cds.start - ta.start utr_strain["all"][ta.seq_id].append(length) utr_all["all"].append(length) gene_name = get_gene_name(genes, cds) string = get_attribute_string( num_utr, length, cds, gene_name, ta, "utr5", "5'UTR", "Parent", "Transcript:") detect = True start = ta.start end = cds.start - 1 break else: if ((ta.end - cds.end) <= args_utr.length) and ( (ta.end - cds.end) >= 0): if ((ta.start <= cds.start) and ( ta.end >= cds.end)) or ( (ta.start >= cds.start) and ( ta.start <= cds.end) and ( ta.end >= cds.end)): if (not check_repeat(cds.end, ta.end, ta.seq_id, ta.strand, utrs_tss)): near_cds = cds detect = True if (ta.strand == "-") and (detect): length = ta.end - near_cds.end utr_strain["all"][ta.seq_id].append(length) utr_all["all"].append(length) gene_name = get_gene_name(genes, near_cds) string = get_attribute_string( num_utr, length, near_cds, gene_name, ta, "utr5", "5'UTR", "Parent", "Transcript:") start = near_cds.end + 1 end = ta.end if detect: if end - start > 0: string = ";".join([string, "associated_tss=NA", "tss_type=NA"]) out.write("{0}\tANNOgesic\t5UTR\t{1}" "\t{2}\t.\t{3}\t.\t{4}\n".format( ta.seq_id, start, end, ta.strand, string)) num_utr += 1 return num_utr def detect_5utr(tss_file, gff_file, ta_file, out_file, args_utr): '''detection of 5UTR''' num_utr = 0 utr_all = {"all": [], "pri": [], "sec": []} utr_strain = {"all": {}, "pri": {}, "sec": {}} pre_seq_id = "" out = open(out_file, "w") out.write("##gff-version 3\n") genes, cdss, terms, tsss, tas, source = read_file( tss_file, gff_file, ta_file, None) utrs_tss = [] check_cdss = [] pres = {"check": None, "tss": None, "start": -1, "end": -1, "len": -1} if (args_utr.source) and (not source): args_utr.source = False if (args_utr.base_5utr.upper() == "TSS") or ( args_utr.base_5utr.lower() == "both"): num = 0 for tss in tsss: num = num + 1 if args_utr.source: utr_datas = get_5utr_from_TSSpredator(tss, genes, cdss) else: utr_datas = get_5utr_from_other(tss, genes, cdss, args_utr.length) if utr_datas["check"]: if tss.seq_id != pre_seq_id: pre_seq_id = tss.seq_id utr_strain["pri"][tss.seq_id] = [] utr_strain["sec"][tss.seq_id] = [] utr_strain["all"][tss.seq_id] = [] num_utr = get_5utr(tss, utr_datas["near_cds"], utr_strain, utr_all, tas, num_utr, num, len(tsss), utr_datas["cds_name"], utr_datas["locus"], out, args_utr, utrs_tss, check_cdss, pres) if (args_utr.base_5utr.lower() == "transcript") or ( args_utr.base_5utr.lower() == "both"): num_utr = compare_ta(tas, genes, cdss, utr_strain, utr_all, out, args_utr, utrs_tss, num_utr) out.close() Helper().sort_gff(out_file, out_file + "sort") shutil.move(out_file + "sort", out_file) name = (gff_file.split("/"))[-1].replace(".gff", "") plot(utr_all["all"], utr_all["pri"], utr_all["sec"], "_".join([name, "all_5utr_length.png"]), args_utr.source, "5utr", args_utr.base_5utr) if len(utr_strain["all"]) > 1: for strain in utr_strain["all"].keys(): plot(utr_strain["all"][strain], utr_strain["pri"][strain], utr_strain["sec"][strain], "_".join([strain, "5utr_length.png"]), args_utr.source, "5utr", args_utr.base_5utr) def compare_term(ta, terms, fuzzy): '''Comparing of transcript and terminator to get the terminator which is associated with transcript''' for term in terms: if ta.strand == term.strand: if term.strand == "+": if (math.fabs(ta.end - term.start) <= fuzzy) or \ ((ta.end >= term.start) and (ta.end <= term.end)): return term else: if (math.fabs(ta.start - term.end) <= fuzzy) or \ ((ta.start >= term.start) and (ta.start <= term.end)): return term def get_3utr(ta, near_cds, utr_all, utr_strain, attributes, num_utr, out, args_utr, utrs_ta): '''print the 3UTR''' if ta.strand == "+": start = near_cds.end + 1 end = ta.end length = ta.end - near_cds.end utr_all.append(length) utr_strain[ta.seq_id].append(length) else: start = ta.start end = near_cds.start - 1 length = near_cds.start - ta.start utr_all.append(length) utr_strain[ta.seq_id].append(length) attributes.append("=".join(["length", str(length)])) if "ID" not in ta.attributes.keys(): attributes.append("=".join([ "Parent", "Transcript:" + str(ta.start) + "-" + str(ta.end) + "_" + ta.strand])) else: attributes.append("=".join([ "Parent", ta.attributes["ID"]])) attribute = ";".join(attributes) if (length <= args_utr.length) and (length > 0): name_utr = '%0*d' % (5, num_utr) name = "=".join(["Name", "_".join(["3'UTR", name_utr])]) id_ = "ID=" + ta.seq_id + "_utr3_" + str(num_utr) num_utr += 1 attribute_string = ";".join([id_, name, attribute]) out.write("\t".join([ta.seq_id, "ANNOgesic", "3UTR", str(start), str(end), ta.score, ta.strand, ta.phase, attribute_string]) + "\n") utrs_ta.append({"strain": ta.seq_id, "start": start, "end": end, "strand": ta.strand}) return num_utr def get_gene_string(gene, attributes): if "locus_tag" in gene.attributes.keys(): attributes.append("=".join(["associated_gene", gene.attributes["locus_tag"]])) else: gene_string = (gene.feature + ":" + str(gene.start) + "-" + str(gene.end) + "_" + gene.strand) attributes.append("=".join(["associated_gene", gene_string])) def get_near_cds(cdss, genes, ta, attributes, utr_length): '''Get the associated CDS of terminator''' detect = False near_cds = None for cds in cdss: if (ta.seq_id == cds.seq_id) and ( ta.strand == cds.strand) and ( cds.feature == "CDS"): if ta.strand == "+": if (cds.end <= ta.end) and ( (ta.end - cds.end) <= utr_length) and ( (ta.end - cds.end) > 0): if (near_cds == None): near_cds = cds else: if (cds.end > near_cds.end): near_cds = cds detect = True else: if (cds.start >= ta.start) and ( (cds.start - ta.start) <= utr_length) and ( (cds.start - ta.start) > 0): if (near_cds == None): near_cds = cds else: if (cds.start < near_cds.start): near_cds = cds detect = True if detect: check_gene = False for gene in genes: if ("Parent" in near_cds.attributes.keys()) and ("ID" in gene.attributes.keys()): if gene.attributes["ID"] in near_cds.attributes["Parent"].split(","): get_gene_string(gene, attributes) check_gene = True break else: if (gene.seq_id == near_cds.seq_id) and ( gene.strand == near_cds.strand): if ((gene.start >= near_cds.start) and ( gene.end <= near_cds.end)) or ( (gene.start <= near_cds.start) and ( gene.end >= near_cds.end)) or ( (gene.start >= near_cds.start) and ( gene.start <= near_cds.end) and ( gene.end >= near_cds.end)) or ( (gene.start <= near_cds.start) and ( gene.end >= near_cds.start) and ( gene.end <= near_cds.end)): get_gene_string(gene, attributes) check_gene = True break if not check_gene: attributes.append("assoicated_gene=NA") if "protein_id" in near_cds.attributes.keys(): attributes.append("=".join(["associated_cds", near_cds.attributes["protein_id"]])) elif "locus_tag" in near_cds.attributes.keys(): attributes.append("=".join(["associated_cds", near_cds.attributes["locus_tag"]])) else: cds_string = (near_cds.feature + ":" + str(near_cds.start) + "-" + str(near_cds.end) + "_" + near_cds.strand) attributes.append("=".join(["associated_cds", cds_string])) else: near_cds = None return near_cds def compare_term_3utr(terms, cdss, genes, utr_all, utr_strain, args_utr, out, utrs_ta, num_utr): '''Comparing of terminator and 3UTR to get the relationship''' for term in terms: detect = False if term.seq_id not in utr_strain.keys(): utr_strain[term.seq_id] = [] for cds in cdss: if (cds.seq_id == term.seq_id) and ( cds.strand == term.strand): if term.strand == "+": if (term.end >= cds.end) and ( ((term.end - cds.end) <= args_utr.length) or ( ((term.start - cds.end) <= args_utr.length) and ( term.start >= cds.end))): if (not check_repeat(cds.end, term.end, cds.seq_id, cds.strand, utrs_ta)): detect = True near_cds = cds else: if (term.start <= cds.start) and ( ((cds.start - term.start) <= args_utr.length) or ( ((cds.start - term.end) <= args_utr.length) and ( cds.start >= term.end))): if (not check_repeat(term.start, cds.start, cds.seq_id, cds.strand, utrs_ta)): length = term.start - cds.start utr_strain[term.seq_id].append(length) utr_all.append(length) gene_name = get_gene_name(genes, cds) string = get_attribute_string( num_utr, length, cds, gene_name, term, "utr3", "3'UTR", "associated_term", "Terminator:") detect = True start = term.start end = cds.start - 1 break if (term.strand == "+") and detect: length = term.end - near_cds.end utr_strain[term.seq_id].append(length) utr_all.append(length) gene_name = get_gene_name(genes, near_cds) string = get_attribute_string( num_utr, length, near_cds, gene_name, term, "utr3", "3'UTR", "associated_term", "Terminator:") detect = True start = near_cds.end + 1 end = term.end if detect: if end - start > 0: out.write("{0}\tANNOgesic\t3UTR\t{1}\t" "{2}\t.\t{3}\t.\t{4}\n".format( term.seq_id, start, end, term.strand, string)) num_utr += 1 return num_utr def detect_3utr(ta_file, gff_file, term_file, out_file, args_utr): '''For detection of 3UTR''' num_utr = 0 utr_all = [] utr_strain = {} pre_seq_id = "" out = open(out_file, "w") out.write("##gff-version 3\n") genes, cdss, terms, tsss, tas, args_utr.source = read_file( None, gff_file, ta_file, term_file) utrs_ta = [] if (args_utr.base_3utr == "transcript") or ( args_utr.base_3utr == "both"): for ta in tas: if term_file is not None: term = compare_term(ta, terms, args_utr.fuzzy) else: term = None attributes = [] if term_file is not None: if term is not None: attributes.append( "=".join(["associated_term", "Terminator:" + str(term.start) + "-" + str(term.end) + "_" + term.strand])) else: attributes.append("=".join(["associated_term", "NA"])) near_cds = get_near_cds(cdss, genes, ta, attributes, args_utr.length) if ta.seq_id != pre_seq_id: pre_seq_id = ta.seq_id utr_strain[ta.seq_id] = [] if near_cds is not None: num_utr = get_3utr(ta, near_cds, utr_all, utr_strain, attributes, num_utr, out, args_utr, utrs_ta) if (args_utr.base_3utr == "terminator") or ( args_utr.base_3utr == "both"): num_utr = compare_term_3utr(terms, cdss, genes, utr_all, utr_strain, args_utr, out, utrs_ta, num_utr) out.close() Helper().sort_gff(out_file, out_file + "sort") shutil.move(out_file + "sort", out_file) name = (gff_file.split("/"))[-1].replace(".gff", "") plot(utr_all, None, None, "_".join([name, "all_3utr_length.png"]), None, "3utr", "3utr") if len(utr_strain) > 1: for strain in utr_strain.keys(): plot(utr_strain[strain], None, None, "_".join([strain, "3utr_length.png"]), None, "3utr", "3utr")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/detect_utr.py
detect_utr.py
import os import csv import shutil from copy import deepcopy from annogesiclib.gff3 import Gff3Parser def import_data(row, type_, args_srna, term_path): if type_ == "gff": data = {"strain": row[0], "name": row[1], "start": int(row[2]), "end": int(row[3]), "strand": row[4], "conds": row[5], "detect": row[6], "tss_pro": row[7], "end_pro": row[8], "avg": float(row[9]), "overlap_CDS": row[11], "overlap_percent": row[12]} if (term_path is not None) and (args_srna.promoter_table is None): data["track"] = row[10] data["with_term"] = row[13] elif (term_path is None) and (args_srna.promoter_table is not None): data["track"] = row[10] data["promoter"] = row[13] elif (term_path is not None) and ( args_srna.promoter_table is not None): data["track"] = row[10] data["with_term"] = row[13] data["promoter"] = row[14] else: data["track"] = row[10] if args_srna.import_info is None: data["track"] = row[10] return data elif type_ == "nr": if len(row) == 9: return {"strain": row[0], "name": row[1], "strand": row[2], "start": int(row[3]), "end": int(row[4]), "hits": "|".join(row[5:9])} elif len(row) == 6: return {"strain": row[0], "name": row[1], "strand": row[2], "start": int(row[3]), "end": int(row[4]), "hits": row[5]} elif type_ == "sRNA": if len(row) == 8: return {"strain": row[0], "name": row[1], "strand": row[2], "start": int(row[3]), "end": int(row[4]), "hits": "|".join(row[5:])} elif len(row) == 6: return {"strain": row[0], "name": row[1], "strand": row[2], "start": int(row[3]), "end": int(row[4]), "hits": row[5]} def merge_info(blasts): first = True finals = [] if len(blasts) != 0: for blast in blasts: if first: repeat = 0 first = False pre_blast = deepcopy(blast) else: if (pre_blast["strain"] == blast["strain"]) and ( pre_blast["strand"] == blast["strand"]) and ( pre_blast["start"] == blast["start"]) and ( pre_blast["end"] == blast["end"]): if (repeat < 2): pre_blast["hits"] = ";".join([ pre_blast["hits"], blast["hits"]]) repeat += 1 else: repeat = 0 finals.append(pre_blast) pre_blast = blast.copy() finals.append(pre_blast) return finals def compare_srna_table(srna_tables, srna, final, args_srna): '''Get the information from sRNA table which has more details''' for table in srna_tables: tsss = [] pros = [] cands = [] if (table["strain"] == srna.seq_id) and ( table["strand"] == srna.strand) and ( table["start"] == srna.start) and ( table["end"] == srna.end): final = dict(final, **table) start_datas = table["tss_pro"].split(";") end_datas = table["end_pro"].split(";") tsss.append(table["start"]) pros.append(table["end"]) for data in start_datas: if "TSS" in data: if table["start"] != int(data.split(":")[1][:-2]): tsss.append(int(data.split(":")[1][:-2])) elif "Cleavage" in data: if table["end"] != int(data.split(":")[1][:-2]): pros.append(int(data.split(":")[1][:-2])) for data in end_datas: if "Cleavage" in data: if table["end"] != int(data.split(":")[1][:-2]): pros.append(int(data.split(":")[1][:-2])) for tss in tsss: for pro in pros: if ((pro - tss) >= args_srna.min_len) and ( (pro - tss) <= args_srna.max_len): cands.append("-".join([str(tss), str(pro)])) final["candidates"] = ";".join(cands) if ("tex" in table["conds"]) and ( "frag" in table["conds"]): final["type"] = "TEX+/-;Fragmented" elif ("tex" in table["conds"]): final["type"] = "TEX+/-" elif ("frag" in table["conds"]): final["type"] = "Fragmented" return final def compare_blast(blasts, srna, final, hit): for blast in blasts: if (srna.seq_id == blast["strain"]) and ( srna.strand == blast["strand"]) and ( srna.start == blast["start"]) and ( srna.end == blast["end"]): final[hit] = blast["hits"] return final def compare_promoter(final, args_srna): '''modify the score of sRNA by comparing with promoter''' if "promoter" in final.keys(): if final["promoter"] != "NA": final["score"] = final["avg"]*args_srna.rank_promoter else: final["score"] = final["avg"] else: final["score"] = final["avg"] return final def check_keys(ref_key, final_key, srna, final): if ref_key in srna.attributes.keys(): final[final_key] = srna.attributes[ref_key] else: final[final_key] = "NA" def compare(srnas, srna_tables, nr_blasts, srna_blasts, args_srna): '''Check sRNA candidate pass the filters or not''' finals = [] for srna in srnas: final = {} check_keys("2d_energy", "energy", srna, final) check_keys("nr_hit", "nr_hit_num", srna, final) check_keys("sRNA_hit", "sRNA_hit_num", srna, final) check_keys("sORF", "sORF", srna, final) check_keys("with_term", "with_term", srna, final) check_keys("end_pro", "end_pro", srna, final) check_keys("promoter", "promoter", srna, final) if srna.attributes["sRNA_type"] == "intergenic": final["utr"] = "Intergenic" elif srna.attributes["sRNA_type"] == "in_CDS": final["utr"] = "In_CDS" elif srna.attributes["sRNA_type"] == "antisense": final["utr"] = "Antisense" else: if "," in srna.attributes["sRNA_type"]: final["utr"] = "5'UTR_derived;3'UTR_derived" elif srna.attributes["sRNA_type"] == "5utr": final["utr"] = "5'UTR_derived" elif srna.attributes["sRNA_type"] == "3utr": final["utr"] = "3'UTR_derived" elif srna.attributes["sRNA_type"] == "interCDS": final["utr"] = "InterCDS" final = compare_srna_table(srna_tables, srna, final, args_srna) final = compare_blast(nr_blasts, srna, final, "nr_hit") final = compare_blast(srna_blasts, srna, final, "sRNA_hit") final = compare_promoter(final, args_srna) finals.append(final) return finals def change_srna_name(final): '''get the proper name of sRNA''' names = [] num = 0 for hit in final["sRNA_hit"].split(";"): if hit != "NA": hit_name = hit.split("|")[-3] hit_name = hit_name[0].upper() + hit_name[1:] num += 1 if "Sau" in hit_name: sau = hit_name.split("-") if len(sau) == 1: hit_name = hit_name[:3] + "-" + hit_name[3:] if hit_name not in names: names.append(hit_name) if num == 3: break return names def print_file(finals, out, srnas, out_gff): rank = 1 for final in finals: names = [final["name"]] if "nr_hit" not in final.keys(): final["nr_hit"] = "NA" if "sRNA_hit" not in final.keys(): final["sRNA_hit"] = "NA" if "with_term" not in final.keys(): final["with_term"] = "NA" if "promoter" not in final.keys(): final["promoter"] = "NA" if final["sRNA_hit"] != "NA": names = change_srna_name(final) length = final["end"] - final["start"] out.write("\t".join([str(rank), final["strain"], "/".join(names), str(final["start"]), str(final["end"]), final["strand"], final["tss_pro"], final["end_pro"], final["candidates"], final["type"], str(final["avg"]), final["track"], final["energy"], final["utr"], final["sORF"], final["nr_hit_num"], final["sRNA_hit_num"], final["nr_hit"], final["sRNA_hit"], final["overlap_CDS"], str(final["overlap_percent"]), final["with_term"], final["promoter"], str(length)]) + "\n") rank += 1 for srna in srnas: for final in finals: if (srna.seq_id == final["strain"]) and ( srna.start == final["start"]) and ( srna.end == final["end"]) and ( srna.strand == final["strand"]): if ("sRNA_hit" in final.keys()): if final["sRNA_hit"] != "NA": names = change_srna_name(final) srna.attributes["Name"] = "/".join(names) srna.attributes["gene"] = "/".join(names) attribute_string = ";".join( ["=".join(items) for items in srna.attributes.items()]) out_gff.write("\t".join([srna.info_without_attributes, attribute_string]) + "\n") def read_table(srna_table_file, nr_blast, srna_blast_file, args_srna, term_path): srna_tables = [] nr_blasts = [] srna_blasts = [] f_h = open(srna_table_file, "r") for row in csv.reader(f_h, delimiter='\t'): srna_tables.append(import_data(row, "gff", args_srna, term_path)) f_h.close() if os.path.exists(nr_blast): f_h = open(nr_blast, "r") for row in csv.reader(f_h, delimiter='\t'): nr_blasts.append(import_data(row, "nr", args_srna, term_path)) f_h.close() if os.path.exists(srna_blast_file): f_h = open(srna_blast_file, "r") for row in csv.reader(f_h, delimiter='\t'): srna_blasts.append(import_data(row, "sRNA", args_srna, term_path)) f_h.close() return srna_tables, nr_blasts, srna_blasts def read_gff(srna_gff): srnas = [] for entry in Gff3Parser().entries(open(srna_gff)): srnas.append(entry) srnas = sorted(srnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return srnas def gen_srna_table(srna_gff, srna_table_file, nr_blast, srna_blast_file, args_srna, out_file, term_path): '''generate the sRNA table for more details''' srnas = read_gff(srna_gff) srna_tables, nr_blasts, srna_blasts = read_table( srna_table_file, nr_blast, srna_blast_file, args_srna, term_path) out = open(out_file, "w") tmp_gff = out_file.replace(".csv", ".gff") out_gff = open(tmp_gff, "w") out_gff.write("##gff-version 3\n") out.write("\t".join([ "Rank", "Genome", "Name", "Start", "End", "Strand", "Start_with_TSS/Cleavage_site", "End_with_cleavage", "Candidates", "Lib_type", "Best_avg_coverage", "Track/Coverage", "Normalized_secondary_energy_change(by_length)", "sRNA_types", "Conflict_sORF", "nr_hit_number", "sRNA_hit_number", "nr_hit_top3|ID|e-value|score", "sRNA_hit|e-value|score", "Overlap_CDS", "Overlap_percent", "End_with_terminator", "Associated_promoter", "sRNA_length"]) + "\n") nr_blasts = merge_info(nr_blasts) srna_blasts = merge_info(srna_blasts) finals = compare(srnas, srna_tables, nr_blasts, srna_blasts, args_srna) sort_finals = sorted(finals, key=lambda x: (x["score"]), reverse=True) print_file(sort_finals, out, srnas, out_gff) out_gff.close() shutil.move(tmp_gff, srna_gff) def print_best(detect, out, srna): no_print = False for key, value in detect.items(): if not value: no_print = True if not no_print: out.write(srna.info + "\n") def check_energy(srna, import_info, energy, detect): '''check the folding energy of sRNA''' if import_info is not None: if ("sec_str" in import_info) and ( "2d_energy" in srna.attributes.keys()): if float(srna.attributes["2d_energy"]) < energy: detect["energy"] = True else: detect["energy"] = True else: detect["energy"] = True def check_tss(import_info, srna, detect): '''check the sRNA is associated with TSS or not''' if import_info is not None: if "tss" in import_info: if "with_TSS" in srna.attributes.keys(): if srna.attributes["with_TSS"] != "NA": detect["TSS"] = True elif (srna.attributes["sRNA_type"] != "intergenic") and ( srna.attributes["sRNA_type"] != "in_CDS") and ( srna.attributes["sRNA_type"] != "antisense"): if (("3utr" in srna.attributes["sRNA_type"]) or ( "interCDS" in srna.attributes["sRNA_type"])) and ( srna.attributes["start_cleavage"] != "NA"): detect["TSS"] = True else: detect["TSS"] = True else: detect["TSS"] = True def check_nr_hit(srna, nr_hits_num, detect, import_info): '''check the sRNA has hit in nr database or not''' if import_info is not None: if ("nr_hit" in srna.attributes.keys()) and ( "blast_nr" in import_info): if (srna.attributes["nr_hit"] == "NA") or ( int(srna.attributes["nr_hit"]) <= nr_hits_num): detect["nr_hit"] = True else: detect["nr_hit"] = True else: detect["nr_hit"] = True def check_sorf(import_info, srna, detect): '''check the sRNA is overlap with sORF or not''' if import_info is not None: if ("sorf" in import_info): if ("sORF" in srna.attributes.keys()): if srna.attributes["sORF"] == "NA": detect["sORF"] = True else: detect["sORF"] = True else: detect["sORF"] = True def check_srna_hit(srna, import_info, detect): '''check the sRNA has hit in sRNA database or not''' if import_info is not None: if ("sRNA_hit" in srna.attributes.keys()) and ( "blast_srna" in import_info): if (srna.attributes["sRNA_hit"] != "NA"): for key in detect.keys(): detect[key] = True else: count = 0 for value in detect.values(): if value: count += 1 if count == 4: detect["sRNA_hit"] = True else: detect["sRNA_hit"] = True else: detect["sRNA_hit"] = True def check_term(import_info, srna, detect): '''check the sRNA is associated with terminator or not''' if import_info is not None: if "term" in import_info: if ("with_term" in srna.attributes.keys()): if srna.attributes["with_term"] != "NA": detect["term"] = True elif ("end_cleavage" in srna.attributes.keys()): if srna.attributes["end_cleavage"] != "NA": detect["term"] = True else: detect["term"] = True else: detect["term"] = True def check_promoter(import_info, srna, detect): '''check the sRNA is associated with promoter or not''' if import_info is not None: if "promoter" in import_info: if ("promoter" in srna.attributes.keys()): if srna.attributes["promoter"] != "NA": detect["promoter"] = True else: detect["promoter"] = True else: detect["promoter"] = True def gen_best_srna(srna_file, out_file, args_srna): '''generate the best sRNA candidates''' srnas = read_gff(srna_file) out = open(out_file, "w") out.write("##gff-version 3\n") for srna in srnas: detect = {"energy": False, "TSS": False, "nr_hit": False, "sRNA_hit": False, "sORF": False, "term": False, "promoter": False} check_energy(srna, args_srna.import_info, args_srna.energy, detect) check_tss(args_srna.import_info, srna, detect) check_nr_hit(srna, args_srna.nr_hits_num, detect, args_srna.import_info) check_sorf(args_srna.import_info, srna, detect) check_srna_hit(srna, args_srna.import_info, detect) check_term(args_srna.import_info, srna, detect) check_promoter(args_srna.import_info, srna, detect) print_best(detect, out, srna) out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gen_srna_output.py
gen_srna_output.py
class Paths(object): '''Setup the folders of ANNOgesic''' def __init__(self, base_path="."): self.base_path = base_path self._set_folder_names() def _set_folder_names(self): """Set the name of folders used in a project.""" self.input_folder = "%s/input" % (self.base_path) self.output_folder = "%s/output" % (self.base_path) self._set_input_folder_names() self._set_reference_input_folder_names() self._set_wig_folder_names() self._set_bam_folder_names() self._set_output_folder_names() self._set_target_folder_names() self._set_tsspredator_folder_names() self._set_transterm_folder_names() self._set_processing_folder_names() self._set_transcript_folder_names() self._set_utr_folder_names() self._set_srna_folder_names() self._set_sorf_folder_names() self._set_operon_folder_names() self._set_circrna_folder_names() self._set_goterm_folder_names() self._set_starget_folder_names() self._set_snp_folder_names() self._set_ppi_folder_names() self._set_sublocal_folder_names() self._set_ribos_folder_names() self._set_thermo_folder_names() self._set_crispr_folder_names() self.version_path = "%s/used_annogesic_version.txt" % (self.base_path) def _set_input_folder_names(self): self.reference_input_folder = "%s/references" % self.input_folder self.wig_folder = "%s/wigs" % self.input_folder self.mutation_table_folder = "%s/mutation_tables" % self.input_folder self.database_folder = "%s/databases" % self.input_folder self.manual_TSS_folder = "%s/manual_TSSs" % self.input_folder self.manual_pro_folder = "%s/manual_processing_sites" % ( self.input_folder) self.read_folder = "%s/reads" % self.input_folder self.bam_folder = "%s/BAMs" % self.input_folder self.riborfam_folder = "%s/riboswitch_ID_file" % self.input_folder self.thermorfam_folder = "%s/RNA_thermometer_ID_file" % self.input_folder def _set_output_folder_names(self): self.target_folder = "%s/updated_references" % self.output_folder self.ratt_folder = "%s/annotation_transfer" % self.output_folder self.tsspredator_folder = "%s/TSSs" % self.output_folder self.utr_folder = "%s/UTRs" % self.output_folder self.transterm_folder = "%s/terminators" % self.output_folder self.transcript_output_folder = ( "%s/transcripts" % self.output_folder) self.processing_site_folder = "%s/processing_sites" % self.output_folder self.srna_folder = "%s/sRNAs" % self.output_folder self.sorf_folder = "%s/sORFs" % self.output_folder self.promoter_output_folder = "%s/promoters" % ( self.output_folder) self.operon_output_folder = "%s/operons" % self.output_folder self.circrna_output_folder = "%s/circRNAs" % self.output_folder self.goterm_output_folder = "%s/GO_terms" % self.output_folder self.starget_output_folder = "%s/sRNA_targets" % self.output_folder self.snp_output_folder = "%s/SNP_calling" % self.output_folder self.ppi_output_folder = "%s/PPI_networks" % self.output_folder self.sublocal_output_folder = "%s/subcellular_localization" % ( self.output_folder) self.ribos_output_folder = "%s/riboswitches" % self.output_folder self.thermo_output_folder = "%s/RNA_thermometers" % self.output_folder self.crispr_output_folder = "%s/crisprs" % self.output_folder def _set_transcript_folder_names(self): self.transcript_base_folder = "%s/transcripts" % ( self.output_folder) self.transcript_gff_folder = "%s/gffs" % self.transcript_base_folder self.transcript_stat_folder = "%s/statistics" % ( self.transcript_base_folder) self.transcript_table_folder = "%s/tables" % ( self.transcript_base_folder) def _set_reference_input_folder_names(self): self.reference_base_folder = "%s/references" % self.input_folder self.ref_annotation_folder = "%s/annotations" % ( self.reference_base_folder) self.ref_fasta_folder = "%s/fasta_files" % ( self.reference_base_folder) def _set_wig_folder_names(self): self.wig_base_folder = "%s/wigs" % self.input_folder self.frag_folder = "%s/fragment" % ( self.wig_base_folder) self.tex_folder = "%s/tex_notex" % ( self.wig_base_folder) def _set_bam_folder_names(self): self.bam_base_folder = "%s/BAMs" % self.input_folder self.bam_ref_folder = "%s/BAMs_map_related_genomes" % self.bam_base_folder self.bam_tar_folder = "%s/BAMs_map_reference_genomes" % self.bam_base_folder self.bam_ref_frag_folder = "%s/fragment" % ( self.bam_ref_folder) self.bam_tar_frag_folder = "%s/fragment" % ( self.bam_tar_folder) self.bam_ref_tex_folder = "%s/tex_notex" % ( self.bam_ref_folder) self.bam_tar_tex_folder = "%s/tex_notex" % ( self.bam_tar_folder) def _set_target_folder_names(self): self.target_base_folder = "%s/updated_references" % self.output_folder self.tar_fasta_folder = "%s/fasta_files" % ( self.target_base_folder) self.tar_annotation_folder = "%s/annotations" % ( self.target_base_folder) def _set_tsspredator_folder_names(self): self.tsspredator_base_folder = "%s/TSSs" % self.output_folder self.tss_to_gff_folder = "%s/gffs" % ( self.tsspredator_base_folder) self.tss_statistics_folder = "%s/statistics" % ( self.tsspredator_base_folder) self.tss_Master_folder = "%s/MasterTables" % ( self.tsspredator_base_folder) self.tss_config_folder = "%s/configs" % ( self.tsspredator_base_folder) def _set_processing_folder_names(self): self.processing_base_folder = "%s/processing_sites" % self.output_folder self.processing_to_gff_folder = "%s/gffs" % ( self.processing_base_folder) self.processing_statistics_folder = "%s/statistics" % ( self.processing_base_folder) self.processing_screenshot_folder = "%s/screenshots" % ( self.processing_base_folder) self.processing_Master_folder = "%s/MasterTables" % ( self.processing_base_folder) self.processing_config_folder = "%s/configs" % ( self.processing_base_folder) def _set_transterm_folder_names(self): self.transterm_base_folder = "%s/terminators" % self.output_folder self.term_to_gff_folder = "%s/gffs" % ( self.transterm_base_folder) self.term_to_table_folder = "%s/tables" % ( self.transterm_base_folder) self.transtermhp_folder = "%s/transtermhp_results" % ( self.transterm_base_folder) self.term_statistics_folder = "%s/statistics" % ( self.transterm_base_folder) def _set_utr_folder_names(self): self.utr_base_folder = "%s/UTRs" % self.output_folder self.utr5_folder = "%s/5UTRs" % ( self.utr_base_folder) self.utr3_folder = "%s/3UTRs" % ( self.utr_base_folder) self.utr3_stat_folder = "%s/statistics" % ( self.utr3_folder) self.utr3_gff_folder = "%s/gffs" % ( self.utr3_folder) self.utr5_stat_folder = "%s/statistics" % ( self.utr5_folder) self.utr5_gff_folder = "%s/gffs" % ( self.utr5_folder) def _set_srna_folder_names(self): self.srna_base_folder = "%s/sRNAs" % self.output_folder self.srna_gff_folder = "%s/gffs" % ( self.srna_base_folder) self.srna_table_folder = "%s/tables" % ( self.srna_base_folder) self.srna_plot_folder = "%s/figs" % ( self.srna_base_folder) self.srna_sec_plot_folder = "%s/figs/sec_plots" % ( self.srna_base_folder) self.srna_dot_plot_folder = "%s/figs/dot_plots" % ( self.srna_base_folder) self.srna_mountain_folder = "%s/figs/mountain_plots" % ( self.srna_base_folder) self.srna_blast_folder = "%s/blast_results_and_misc" % ( self.srna_base_folder) self.srna_stat_folder = "%s/statistics" % ( self.srna_base_folder) self.srna_gff_class_folder = "%s/for_classes" % ( self.srna_gff_folder) self.srna_gff_best_folder = "%s/best_candidates" % ( self.srna_gff_folder) self.srna_gff_all_folder = "%s/all_candidates" % ( self.srna_gff_folder) self.srna_table_class_folder = "%s/for_classes" % ( self.srna_table_folder) self.srna_table_best_folder = "%s/best_candidates" % ( self.srna_table_folder) self.srna_table_all_folder = "%s/all_candidates" % ( self.srna_table_folder) def _set_sorf_folder_names(self): self.sorf_base_folder = "%s/sORFs" % self.output_folder self.sorf_gff_folder = "%s/gffs" % ( self.sorf_base_folder) self.sorf_table_folder = "%s/tables" % ( self.sorf_base_folder) self.sorf_stat_folder = "%s/statistics" % ( self.sorf_base_folder) self.sorf_gff_best_folder = "%s/best_candidates" % ( self.sorf_gff_folder) self.sorf_gff_all_folder = "%s/all_candidates" % ( self.sorf_gff_folder) self.sorf_table_best_folder = "%s/best_candidates" % ( self.sorf_table_folder) self.sorf_table_all_folder = "%s/all_candidates" % ( self.sorf_table_folder) def _set_operon_folder_names(self): self.operon_base_folder = "%s/operons" % self.output_folder self.operon_gff_folder = "%s/gffs" % ( self.operon_base_folder) self.operon_table_folder = "%s/tables" % ( self.operon_base_folder) self.operon_statistics_folder = "%s/statistics" % ( self.operon_base_folder) def _set_circrna_folder_names(self): self.circrna_base_folder = "%s/circRNAs" % self.output_folder self.circrna_align_folder = "%s/segemehl_alignment_files" % ( self.circrna_base_folder) self.circrna_splice_folder = "%s/segemehl_splice_results" % ( self.circrna_base_folder) self.circrna_circ_folder = "%s/circRNA_tables" % ( self.circrna_base_folder) self.circrna_stat_folder = "%s/statistics" % ( self.circrna_base_folder) self.circrna_gff_folder = "%s/gffs" % ( self.circrna_base_folder) def _set_goterm_folder_names(self): self.goterm_base_folder = "%s/GO_terms" % self.output_folder self.goterm_all_folder = "%s/all_CDSs" % self.goterm_base_folder self.goterm_express_folder = "%s/expressed_CDSs" % ( self.goterm_base_folder) self.goterm_express_result_folder = "%s/GO_term_results" % ( self.goterm_express_folder) self.goterm_express_stat_folder = "%s/statistics" % ( self.goterm_express_folder) self.goterm_all_result_folder = "%s/GO_term_results" % ( self.goterm_all_folder) self.goterm_all_stat_folder = "%s/statistics" % ( self.goterm_all_folder) def _set_starget_folder_names(self): self.starget_base_folder = "%s/sRNA_targets" % self.output_folder self.starget_RNAplex_folder = "%s/RNAplex_results" % ( self.starget_base_folder) self.starget_RNAup_folder = "%s/RNAup_results" % ( self.starget_base_folder) self.starget_IntaRNA_folder = "%s/IntaRNA_results" % ( self.starget_base_folder) self.starget_merge_folder = "%s/merged_results" % ( self.starget_base_folder) self.starget_srna_seq_folder = "%s/sRNA_seqs" % ( self.starget_base_folder) self.starget_target_seq_folder = "%s/target_seqs" % ( self.starget_base_folder) def _set_snp_folder_names(self): self.snp_base_folder = "%s/SNP_calling" % self.output_folder self.ref_snp_folder = "%s/compare_related_and_reference_genomes" % self.snp_base_folder self.tar_snp_folder = "%s/mutations_of_reference_genomes" % self.snp_base_folder self.snp_ref_stat_folder = "%s/statistics" % ( self.ref_snp_folder) self.snp_tar_stat_folder = "%s/statistics" % ( self.tar_snp_folder) self.snp_ref_table_folder = "%s/SNP_tables" % ( self.ref_snp_folder) self.snp_tar_table_folder = "%s/SNP_tables" % ( self.tar_snp_folder) self.snp_ref_raw_folder = "%s/SNP_raw_outputs" % ( self.ref_snp_folder) self.snp_tar_raw_folder = "%s/SNP_raw_outputs" % ( self.tar_snp_folder) self.snp_ref_seq_folder = "%s/seqs" % ( self.ref_snp_folder) self.snp_tar_seq_folder = "%s/seqs" % ( self.tar_snp_folder) self.snp_ref_seq_extend_BAQ_folder = "%s/extend_BAQ" % ( self.snp_ref_seq_folder) self.snp_tar_seq_extend_BAQ_folder = "%s/extend_BAQ" % ( self.snp_tar_seq_folder) self.snp_ref_seq_with_BAQ_folder = "%s/with_BAQ" % ( self.snp_ref_seq_folder) self.snp_tar_seq_with_BAQ_folder = "%s/with_BAQ" % ( self.snp_tar_seq_folder) self.snp_ref_seq_without_BAQ_folder = "%s/without_BAQ" % ( self.snp_ref_seq_folder) self.snp_tar_seq_without_BAQ_folder = "%s/without_BAQ" % ( self.snp_tar_seq_folder) def _set_ppi_folder_names(self): self.ppi_base_folder = "%s/PPI_networks" % self.output_folder self.ppi_all_folder = "%s/all_results" % ( self.ppi_base_folder) self.ppi_best_folder = "%s/best_results" % ( self.ppi_base_folder) self.ppi_fig_folder = "%s/figures" % ( self.ppi_base_folder) def _set_sublocal_folder_names(self): self.sublocal_base_folder = "%s/subcellular_localization" % ( self.output_folder) self.sublocal_all_folder = "%s/all_CDSs" % self.sublocal_base_folder self.sublocal_express_folder = "%s/expressed_CDSs" % ( self.sublocal_base_folder) self.sublocal_all_results_folder = "%s/psortb_results" % ( self.sublocal_all_folder) self.sublocal_all_stat_folder = "%s/statistics" % ( self.sublocal_all_folder) self.sublocal_express_results_folder = "%s/psortb_results" % ( self.sublocal_express_folder) self.sublocal_express_stat_folder = "%s/statistics" % ( self.sublocal_express_folder) def _set_ribos_folder_names(self): self.ribos_base_folder = "%s/riboswitches" % self.output_folder self.ribos_gff_folder = "%s/gffs" % ( self.ribos_base_folder) self.ribos_stat_folder = "%s/statistics" % ( self.ribos_base_folder) self.ribos_table_folder = "%s/tables" % ( self.ribos_base_folder) self.ribos_rfam_folder = "%s/scan_Rfam_results" % ( self.ribos_base_folder) def _set_thermo_folder_names(self): self.thermo_base_folder = "%s/RNA_thermometers" % self.output_folder self.thermo_gff_folder = "%s/gffs" % ( self.thermo_base_folder) self.thermo_stat_folder = "%s/statistics" % ( self.thermo_base_folder) self.thermo_table_folder = "%s/tables" % ( self.thermo_base_folder) self.thermo_rfam_folder = "%s/scan_Rfam_results" % ( self.thermo_base_folder) def _set_crispr_folder_names(self): self.crispr_base_folder = "%s/crisprs" % self.output_folder self.crispr_gff_folder = "%s/gffs" % ( self.crispr_base_folder) self.crispr_stat_folder = "%s/statistics" % ( self.crispr_base_folder) self.crispr_data_folder = "%s/CRT_results" % ( self.crispr_base_folder) def required_folders(self, folder_type): if (folder_type == "root"): return (self.required_base_folders() + self.required_input_folders() + self.required_reference_input_folders() + self.required_wig_folders() + self.required_bam_folders()) else: return (self.required_base_folders() + self.required_input_folders() + self.required_reference_input_folders() + self.required_wig_folders() + self.required_bam_folders() + self.required_output_folders(folder_type)) def required_base_folders(self): return [self.input_folder, self.output_folder] def required_input_folders(self): return [self.reference_input_folder, self.wig_folder, self.mutation_table_folder, self.read_folder, self.bam_folder, self.database_folder, self.manual_TSS_folder, self.manual_pro_folder, self.riborfam_folder, self.thermorfam_folder] def required_output_folders(self, folder_type): folder_dict = {"get_target_fasta": ( [self.target_folder] + self.required_target_folders()), "annotation_transfer": ( [self.ratt_folder] + self.required_target_folders()), "TSS": ( [self.tsspredator_folder] + self.required_tsspredator_folders()), "processing": ( [self.processing_site_folder] + self.required_processing_folders()), "terminator": ( [self.transterm_folder] + self.required_transterm_folders()), "transcript": ( [self.transcript_output_folder] + self.required_transcript_folders()), "utr": [self.utr_folder] + self.required_utr_folders(), "srna": ( [self.srna_folder] + self.required_srna_folders()), "sorf": ( [self.sorf_folder] + self.required_sorf_folders()), "promoter": [self.promoter_output_folder], "circrna": ( [self.circrna_output_folder] + self.required_circrna_folders()), "go_term": ( [self.goterm_output_folder] + self.required_goterm_folders()), "srna_target": ( [self.starget_output_folder] + self.required_starget_folders()), "snp": ( [self.snp_output_folder] + self.required_snp_folders()), "ppi_network": ( [self.ppi_output_folder] + self.required_ppi_folders()), "subcellular_localization": ( [self.sublocal_output_folder] + self.required_sublocal_folders()), "riboswitch": ( [self.ribos_output_folder] + self.required_ribos_folders()), "thermometer": ( [self.thermo_output_folder] + self.required_thermo_folders()), "crispr": ( [self.crispr_output_folder] + self.required_crispr_folders()), "operon": ( [self.operon_output_folder] + self.required_operon_folders())} return folder_dict[folder_type] def required_reference_input_folders(self): return [self.ref_annotation_folder, self.ref_fasta_folder] def required_wig_folders(self): return [self.tex_folder, self.frag_folder] def required_bam_folders(self): return [self.bam_ref_folder, self.bam_tar_folder, self.bam_ref_tex_folder, self.bam_tar_tex_folder, self.bam_ref_frag_folder, self.bam_tar_frag_folder] def required_target_folders(self): return [self.tar_annotation_folder, self.tar_fasta_folder] def required_tsspredator_folders(self): return [self.tss_to_gff_folder, self.tss_statistics_folder, self.tss_Master_folder, self.tss_config_folder] def required_transterm_folders(self): return [self.term_to_gff_folder, self.term_statistics_folder, self.transtermhp_folder, self.term_to_table_folder] def required_processing_folders(self): return [self.processing_to_gff_folder, self.processing_statistics_folder, self.processing_Master_folder, self.processing_config_folder] def required_transcript_folders(self): return [self.transcript_gff_folder, self.transcript_stat_folder, self.transcript_table_folder] def required_utr_folders(self): return [self.utr5_folder, self.utr3_folder, self.utr5_stat_folder, self.utr5_gff_folder, self.utr3_stat_folder, self.utr3_gff_folder] def required_srna_folders(self): return [self.srna_gff_folder, self.srna_plot_folder, self.srna_sec_plot_folder, self.srna_dot_plot_folder, self.srna_mountain_folder, self.srna_table_folder, self.srna_blast_folder, self.srna_stat_folder, self.srna_gff_class_folder, self.srna_gff_best_folder, self.srna_gff_all_folder, self.srna_table_class_folder, self.srna_table_best_folder, self.srna_table_all_folder] def required_sorf_folders(self): return [self.sorf_gff_folder, self.sorf_table_folder, self.sorf_stat_folder, self.sorf_gff_best_folder, self.sorf_gff_all_folder, self.sorf_table_best_folder, self.sorf_table_all_folder] def required_operon_folders(self): return [self.operon_gff_folder, self.operon_table_folder, self.operon_statistics_folder] def required_circrna_folders(self): return [self.circrna_align_folder, self.circrna_splice_folder, self.circrna_circ_folder, self.circrna_stat_folder, self.circrna_gff_folder] def required_goterm_folders(self): return [self.goterm_all_folder, self.goterm_express_folder, self.goterm_express_result_folder, self.goterm_express_stat_folder, self.goterm_all_result_folder, self.goterm_all_stat_folder] def required_starget_folders(self): return [self.starget_RNAplex_folder, self.starget_RNAup_folder, self.starget_IntaRNA_folder, self.starget_merge_folder, self.starget_srna_seq_folder, self.starget_target_seq_folder] def required_snp_folders(self): return [self.ref_snp_folder, self.tar_snp_folder, self.snp_ref_stat_folder, self.snp_tar_stat_folder, self.snp_ref_table_folder, self.snp_tar_table_folder, self.snp_ref_raw_folder, self.snp_tar_raw_folder, self.snp_ref_seq_folder, self.snp_tar_seq_folder, self.snp_ref_seq_extend_BAQ_folder, self.snp_tar_seq_extend_BAQ_folder, self.snp_ref_seq_with_BAQ_folder, self.snp_tar_seq_with_BAQ_folder, self.snp_ref_seq_without_BAQ_folder, self.snp_tar_seq_without_BAQ_folder] def required_ppi_folders(self): return [self.ppi_all_folder, self.ppi_best_folder, self.ppi_fig_folder] def required_sublocal_folders(self): return [self.sublocal_all_folder, self.sublocal_express_folder, self.sublocal_all_results_folder, self.sublocal_all_stat_folder, self.sublocal_express_results_folder, self.sublocal_express_stat_folder] def required_ribos_folders(self): return [self.ribos_gff_folder, self.ribos_table_folder, self.ribos_stat_folder, self.ribos_rfam_folder] def required_thermo_folders(self): return [self.thermo_gff_folder, self.thermo_table_folder, self.thermo_stat_folder, self.thermo_rfam_folder] def required_crispr_folders(self): return [self.crispr_gff_folder, self.crispr_stat_folder, self.crispr_data_folder]
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/paths.py
paths.py
import os import sys import csv import shutil from subprocess import call from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.gff3 import Gff3Parser class Crispr(object): '''Detection of CRISPR''' def __init__(self, args_cris): self.multiparser = Multiparser() self.helper = Helper() self.gff_parser = Gff3Parser() self.gff_path = os.path.join(args_cris.gffs, "tmp") self.fasta_path = os.path.join(args_cris.fastas, "tmp") self.stat_folder = os.path.join(args_cris.out_folder, "statistics") self.gff_out = os.path.join(args_cris.out_folder, "gffs") self.all_out = os.path.join(args_cris.out_folder, "gffs", "all_candidates") self.best_out = os.path.join(args_cris.out_folder, "gffs", "best_candidates") self.helper.check_make_folder(self.all_out) self.helper.check_make_folder(self.best_out) self.data_folder = os.path.join(args_cris.out_folder, "CRT_results") self.helper.check_make_folder(self.data_folder) self.helper.check_make_folder(self.stat_folder) def _run_crt(self, args_cris, log): '''Running CRT''' print("Running CRT") log.write("Using CRT to predict CRISPRs.\n") log.write("Please make sure the version of CRT is at least 1.2.\n") for seq in os.listdir(self.fasta_path): prefix = ".".join(seq.split(".")[:-1]) log.write(" ".join([ "java", "-cp", args_cris.crt_path, "crt", "-minNR", str(args_cris.min_num_r), "-minRL", str(args_cris.min_len_r), "-maxRL", str(args_cris.max_len_r), "-minSL", str(args_cris.min_len_s), "-maxSL", str(args_cris.max_len_s), "-searchWL", str(args_cris.win_size), os.path.join(self.fasta_path, seq), os.path.join(self.data_folder, prefix + ".txt")]) + "\n") call(["java", "-cp", args_cris.crt_path, "crt", "-minNR", str(args_cris.min_num_r), "-minRL", str(args_cris.min_len_r), "-maxRL", str(args_cris.max_len_r), "-minSL", str(args_cris.min_len_s), "-maxSL", str(args_cris.max_len_s), "-searchWL", str(args_cris.win_size), os.path.join(self.fasta_path, seq), os.path.join(self.data_folder, prefix + ".txt")]) log.write("\t" + os.path.join(self.data_folder, prefix + ".txt") + " is generated.\n") def _read_gff(self, txt): gffs = [] gh = open(os.path.join(self.gff_path, txt.replace(".txt", ".gff")), "r") for entry in Gff3Parser().entries(gh): if (self.helper.feature_without_notgene(entry)): gffs.append(entry) gh.close() return gffs def _compare_gff(self, strain, start, end, gffs, bh, indexs, ignore_hypo): '''Compare CRISPR and genome annotation to remove the false positives''' overlap = False id_ = None for gff in gffs: if (gff.seq_id == strain): if ((gff.start <= start) and (gff.end >= end)) or ( (gff.start >= start) and (gff.end <= end)) or ( (gff.start <= start) and (gff.end > start) and ( gff.end <= end)) or ( (gff.start >= start) and (gff.start < end) and ( gff.end >= end)): if "product" in gff.attributes.keys(): if ((not ignore_hypo) and ("hypothetical protein" in gff.attributes["product"])) or ( "hypothetical protein" not in gff.attributes["product"]): overlap = True if not overlap: id_ = "CRISPR_" + str(indexs["best"]) attribute = ";".join(["ID=" + strain + "_" + id_, "method=CRT"]) bh.write("\t".join([strain, "ANNOgesic", "CRISPR", str(start), str(end), ".", ".", ".", attribute]) + "\n") indexs["best"] += 1 return overlap, id_ def _print_repeat(self, row, strain, file_h, indexs, id_, best): '''Print the repeat units''' if best: num = indexs["re_best"] else: num = indexs["re_all"] if (not row[0].startswith("-")) and ( not row[0].startswith("Repeats:")) and ( not row[0].startswith("CRISPR")) and ( not row[0].startswith("POSITION")): start = row[0].strip() end = str(int(start) + len(row[2].strip()) - 1) attribute = ";".join(["ID=" + strain + "_Repeat_" + str(num), "method=CRT", "Parent=" + id_]) file_h.write("\t".join([strain, "ANNOgesic", "repeat_unit", start, end, ".", ".", ".", attribute]) + "\n") num += 1 if row[0].startswith("Repeats:"): indexs["run"] = False return num def _convert_gff(self, ignore_hypo): '''Convert the final CRT output to gff format''' for txt in os.listdir(self.data_folder): gffs = self._read_gff(txt) fh = open(os.path.join(self.data_folder, txt), "r") oh = open(os.path.join( self.all_out, txt.replace(".txt", "_CRISPR.gff")), "w") bh = open(os.path.join( self.best_out, txt.replace(".txt", "_CRISPR.gff")), "w") indexs = {"all": 0, "re_all": 0, "best": 0, "re_best": 0, "run": False} for row in csv.reader(fh, delimiter='\t'): if len(row) != 0: if row[0].startswith("ORGANISM:"): strain = row[0].split(" ")[-1] elif row[0].startswith("CRISPR"): end = row[0].split("-")[-1].strip() start = row[0].split("-")[0].split(":")[-1].strip() id_ = "CRISPR_" + str(indexs["all"]) attribute = ";".join(["ID=" + strain + "_" + id_, "method=CRT"]) oh.write("\t".join([ strain, "ANNOgesic", "CRISPR", start, end, ".", ".", ".", attribute]) + "\n") overlap, over_id = self._compare_gff( strain, int(start), int(end), gffs, bh, indexs, ignore_hypo) indexs["all"] += 1 indexs["run"] = True if indexs["run"]: indexs["re_all"] = self._print_repeat( row, strain, oh, indexs, id_, False) if not overlap: indexs["re_best"] = self._print_repeat( row, strain, bh, indexs, over_id, True) fh.close() oh.close() bh.close() def _stat_and_correct(self, stats, folder): '''do statistics and print the final gff file''' for gff in os.listdir(folder): prefix = gff.replace("_CRISPR.gff", "") stats[prefix] = {"all": {"cri": 0, "re": {}}} gh = open(os.path.join(folder, gff), "r") oh = open("tmp_cri.gff", "w") oh.write("##gff-version 3\n") cr_num = 0 re_num = 0 first = True for entry in Gff3Parser().entries(gh): if entry.seq_id not in stats[prefix].keys(): stats[prefix][entry.seq_id] = {"cri": 0, "re": {}} if entry.feature == "CRISPR": id_ = "CRISPR_" + str(cr_num) attribute = ";".join(["ID=" + entry.seq_id + "_" + id_, "method=CRT", "Name=" + id_]) cr_num += 1 if first: first = False else: if repeat not in stats[prefix][entry.seq_id]["re"].keys(): stats[prefix][entry.seq_id]["re"][repeat] = 1 else: stats[prefix][entry.seq_id]["re"][repeat] += 1 if repeat not in stats[prefix]["all"]["re"].keys(): stats[prefix]["all"]["re"][repeat] = 1 else: stats[prefix]["all"]["re"][repeat] += 1 repeat = 0 stats[prefix][entry.seq_id]["cri"] += 1 stats[prefix]["all"]["cri"] += 1 elif entry.feature == "repeat_unit": attribute = ";".join(["ID=" + entry.seq_id + "_Repeat_" + str(re_num), "method=CRT", "Parent=" + id_, "Name=Repeat_" + str(re_num)]) re_num += 1 repeat += 1 oh.write("\t".join([entry.info_without_attributes, attribute]) + "\n") if not first: if repeat not in stats[prefix][entry.seq_id]["re"].keys(): stats[prefix][entry.seq_id]["re"][repeat] = 1 else: stats[prefix][entry.seq_id]["re"][repeat] += 1 if repeat not in stats[prefix]["all"]["re"].keys(): stats[prefix]["all"]["re"][repeat] = 1 else: stats[prefix]["all"]["re"][repeat] += 1 gh.close() oh.close() os.remove(os.path.join(folder, gff)) shutil.move("tmp_cri.gff", os.path.join(folder, gff)) def _print_file(self, sh, cri_res_all, cri_res_best): sh.write("\tthe number of CRISPR - {0}\n".format( cri_res_all["cri"])) for index, num in cri_res_all["re"].items(): sh.write("\t\tCRISPR with {0} repeat units - {1}\n".format( index, num)) sh.write("\tthe number of CRISPR which not overlap " "with genome annotation - {0}\n".format( cri_res_best["cri"])) for index, num in cri_res_best["re"].items(): sh.write("\t\tCRISPR with {0} repeat units - {1}\n".format( index, num)) def _print_stat(self, stats): '''print the statistics file''' for prefix, strains in stats["all"].items(): sh = open(os.path.join(self.stat_folder, prefix + ".csv"), "w") if len(strains) == 1: sh.write("No CRISPR can be detected") elif len(strains) <= 2: for strain, cri_res in strains.items(): if strain != "all": sh.write(strain + ":\n") self._print_file(sh, cri_res, stats["best"][prefix][strain]) else: sh.write("All strains:\n") self._print_file(sh, stats["all"][prefix]["all"], stats["best"][prefix]["all"]) for strain, cri_res in strains.items(): if strain != "all": sh.write(strain + ":\n") if strain not in stats["best"][prefix].keys(): stats["best"][prefix][strain] = {"cri": 0, "re": {}} self._print_file(sh, cri_res, stats["best"][prefix][strain]) sh.close() def run_crispr(self, args_cris, log): '''detection of CRISPR''' self.multiparser.parser_fasta(args_cris.fastas) self.multiparser.parser_gff(args_cris.gffs, None) self._run_crt(args_cris, log) log.write("Converting the results to gff3 format.\n") log.write("The following files are generated:\n") self._convert_gff(args_cris.ignore_hypo) print("All candidates:") self.multiparser.combine_gff(args_cris.gffs, self.all_out, None, "CRISPR") print("Best candidates:") self.multiparser.combine_gff(args_cris.gffs, self.best_out, None, "CRISPR") for folder in (self.all_out, self.best_out): for file_ in os.listdir(folder): log.write("\t" + os.path.join(folder, file_) + "\n") stats = {"all": {}, "best": {}} log.write("Doing statistics and update results.\n") self._stat_and_correct(stats["all"], self.all_out) self._stat_and_correct(stats["best"], self.best_out) self._print_stat(stats) log.write("The following files are generated:\n") for file_ in os.listdir(self.stat_folder): log.write("\t" + os.path.join(self.stat_folder, file_) + "\n") log.write("The following files are updated:\n") for folder in (self.all_out, self.best_out): for file_ in os.listdir(folder): log.write("\t" + os.path.join(folder, file_) + "\n") self.helper.remove_tmp_dir(args_cris.gffs) self.helper.remove_tmp_dir(args_cris.fastas)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/crispr.py
crispr.py
import os import sys from annogesiclib.projectcreator import ProjectCreator from annogesiclib.paths import Paths from annogesiclib.get_input import get_file from annogesiclib.converter import Converter from annogesiclib.get_target_fasta import TargetFasta from annogesiclib.ratt import RATT from annogesiclib.tsspredator import TSSpredator from annogesiclib.optimize import optimize_tss from annogesiclib.color_png import ColorPNG from annogesiclib.terminator import Terminator from annogesiclib.transcript import TranscriptDetection from annogesiclib.utr import UTRDetection from annogesiclib.srna import sRNADetection from annogesiclib.sorf import sORFDetection from annogesiclib.meme import MEME from annogesiclib.operon import OperonDetection from annogesiclib.circrna import CircRNADetection from annogesiclib.goterm import GoTermFinding from annogesiclib.srna_target import sRNATargetPrediction from annogesiclib.snp import SNPCalling from annogesiclib.ppi import PPINetwork from annogesiclib.sublocal import SubLocal from annogesiclib.ribos import Ribos from annogesiclib.crispr import Crispr from annogesiclib.merge_feature import run_merge from annogesiclib.overlap import deal_overlap from annogesiclib.screen import Screen from annogesiclib.args_container import ArgsContainer from annogesiclib.helper import Helper project_creator = ProjectCreator() class Controller(object): """Manage the actions of the subcommands. The Controller take care of providing the argumentes like path names and the parallel processing of tasks. """ def __init__(self, args): """Create an instance.""" self._args = args if (len(args.__dict__) > 3): if not os.path.exists(args.project_path): print("Error: --project_path does not exists!") sys.exit() self._paths = Paths(args.project_path) self.args_container = ArgsContainer() self.helper = Helper() def check_folder(self, folders, flags, log): '''Check the emtpy or wrong assigned folder''' for folder, flag in zip(folders, flags): if folder is None: log.write("{0} of {1} is not found. Please check it!".format( folder, flag)) print("Error: {0} of {1} is not found. Please check it!".format( folder, flag)) sys.exit() else: if os.path.exists(folder): if len(os.listdir(folder)) == 0: log.write("{0} is a empty folder!".format(flag)) print("Error: {0} is a empty folder!".format(flag)) sys.exit() else: log.write("{0} of {1} is not found. Please check it!".format( folder, flag)) print("Error: {0} of {1} is not found. Please check it!".format( folder, flag)) sys.exit() def check_multi_files(self, input_files, flags, log): if input_files is not None: for files, flag in zip(input_files, flags): if files is not None: for file_ in files: if not os.path.exists(file_): print("Error: {0} in {1} does " "not exist!".format(file_, flag)) log.write(file_ + " does not exists\n") sys.exit() else: log.write(file_ + " exists\n") def check_parameter(self, paras, names, log): '''Check the parameter is assigned correct or not''' for i in range(len(paras)): if paras[i] is None: print("Error: {0} can not be None. " "Please check it!".format(names[i])) log.write(file_ + " need to be assigned.\n") sys.exit() def check_execute_file(self, exe, log): detect = False exe_folder = "" log.write("Checking " + exe + "\n") if os.path.exists(exe): detect = True full_exe = os.path.realpath(exe) log.write(full_exe + " is found.\n") else: exes = [] for folder in os.environ["PATH"].split(":"): if os.path.isfile(os.path.join(folder, exe)): exe_folder = folder detect = True full_exe = exe if os.path.join(folder, exe) not in exes: exes.append(os.path.join(folder, exe)) log.write(os.path.join(folder, exe) + " is found.\n") if not detect: if os.path.exists(os.path.realpath(exe)): full_exe = os.path.realpath(exe) log.write(full_exe + " is found.\n") else: print("Error: {0} can't be found!".format(exe)) print("Please assign the correct path!") sys.exit() if (os.path.isfile(full_exe)) or ( os.path.isfile(os.path.join(exe_folder, exe))): log.write("The execute path is " + os.popen( "which " + exe).read()) return full_exe else: log.write(full_exe + " is not found.\n") print("Error: {0} is not a file!".format(exe)) sys.exit() def check_file(self, files, names, require, log): '''Check the path of file''' for i in range(len(files)): if require: if files[i] is None: print("Error: {0} can not be None. " "Please check it!".format(names[i])) log.write(names[i] + " is None.\n") sys.exit() else: if not os.path.isfile(files[i]): print("Error: {0} is not found. " "Please check it!".format(files[i])) log.write(files[i] + " does not exist.\n") sys.exit() else: if files[i] is not None: if not os.path.isfile(files[i]): print("Error: {0} is not found. " "Please check it!".format(files[i])) log.write(files[i] + " does not exist.\n") sys.exit() def create_project(self, version): """Create a new project.""" project_creator.create_root_folder(self._args.project_path) project_creator.create_subfolders(self._paths.required_folders("root")) project_creator.create_version_file( self._paths.version_path, version) sys.stdout.write("Created folder \"%s\" and required subfolders.\n" % ( self._args.project_path)) def get_input(self): """Download required files from website.""" print("Running get input files") log = open(os.path.join(self._paths.reference_input_folder, "log.txt"), "w") if self._args.ftp_path is None: print("Error: Please assign the path for downloading the data!") sys.exit() annotation_folder = self._paths.ref_annotation_folder fasta_folder = self._paths.ref_fasta_folder self.helper.check_make_folder(self._paths.ref_annotation_folder) self.helper.check_make_folder(self._paths.ref_fasta_folder) if self._args.ref_gff is True: log.write("Get gff files\n") get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "gff", log) get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "_genomic.gff.gz", log) if self._args.ref_fasta is True: log.write("Get fasta files\n") get_file(self._args.ftp_path, self._paths.ref_fasta_folder, "fna", log) get_file(self._args.ftp_path, self._paths.ref_fasta_folder, "_genomic.fna.gz", log) if self._args.ref_gbk is True: log.write("Get gbk files\n") get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "gbk", log) get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "gbff", log) get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "_genomic.gbff.gz", log) if self._args.ref_ptt is True: log.write("Get ptt files\n") get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "ptt", log) if self._args.ref_rnt is True: log.write("Get rnt files\n") get_file(self._args.ftp_path, self._paths.ref_annotation_folder, "rnt", log) if self._args.convert_embl is True: annotation_files = os.listdir(self._paths.ref_annotation_folder) if len(annotation_files) == 0: sys.stdout.write("No gff files!!\n") else: log.write("Running converter.py for converting gbk file " "to embl formet\n") Converter().convert_gbk2embl(self._paths.ref_annotation_folder) log.close() def get_target_fasta(self): """Get target fasta""" print("Running update genome fasta") project_creator.create_subfolders( self._paths.required_folders("get_target_fasta")) log = open(os.path.join(self._paths.target_folder, "log.txt"), "w") self.check_multi_files([self._args.related_fasta_files], ["--related_fasta_files"], log) self.check_file([self._args.mutation_table], ["--mutation_table"], True, log) target = TargetFasta(self._paths.tar_fasta_folder, self._args.related_fasta_files) target.get_target_fasta( self._args.mutation_table, self._paths.tar_fasta_folder, self._args.related_fasta_files, self._args.updated_seq_name, self._paths.target_base_folder, log) log.close() def ratt(self): """Run RATT to transfer annotation file from reference to target.""" print("Running annotation transfer") project_creator.create_subfolders( self._paths.required_folders("get_target_fasta")) project_creator.create_subfolders( self._paths.required_folders("annotation_transfer")) log = open(os.path.join(self._paths.ratt_folder, "log.txt"), "w") if (self._args.transfer_type != "Strain") and ( self._args.transfer_type != "Assembly") and ( self._args.transfer_type != "Species") and ( self._args.transfer_type != "Assembly.Repetitive") and ( self._args.transfer_type != "Strain.Repetitive") and ( self._args.transfer_type != "Species.Repetitive") and ( self._args.transfer_type != "Multiple") and ( self._args.transfer_type != "Free"): log.write("Incorrect --transfer_type. Please assign 'Assembly', 'Species', " "'Assembly.Repetitive', 'Strain.Repetitive', 'Species.Repetitive', " "'Multiple' or 'Free'\n") print("Error: please assign correct --transfer_type!") sys.exit() if (self._args.related_embl_files is None) and ( self._args.related_gbk_files is None): print("Error: please assign proper embl or genbank files") log.write("--related_gbk_files and --related_embl_files can not be both None.\n") sys.exit() elif (self._args.related_embl_files is not None) and ( self._args.related_gbk_files is not None): log.write("Please choose --related_gbk_files as input or " "--related_embl_files as input. Do not assign both.\n") print("Error: please choose embl as input or genbank as input") sys.exit() self._args.ratt_path = self.check_execute_file(self._args.ratt_path, log) self.check_multi_files( [self._args.target_fasta_files, self._args.related_fasta_files], ["--target_fasta_files", "--closed_fasta_files"], log) self.check_parameter([self._args.element, self._args.compare_pair], ["--element", "--compare_pair"], log) args_ratt = self.args_container.container_ratt( self._args.ratt_path, self._args.element, self._args.transfer_type, self._args.related_embl_files, self._args.related_gbk_files, self._args.target_fasta_files, self._args.related_fasta_files, self._paths.ratt_folder, self._paths.tar_annotation_folder, self._args.compare_pair) ratt = RATT(args_ratt) ratt.annotation_transfer(args_ratt, log) log.close() def tsspredator(self): """Run TSSpredator for predicting TSS candidates.""" if self._args.program.lower() == "tss": print("Running TSS prediction") project_creator.create_subfolders( self._paths.required_folders("TSS")) out_folder = self._paths.tsspredator_folder log = open(os.path.join(self._paths.tsspredator_folder, "log.txt"), "w") log.write("Running TSS prediction.\n") elif self._args.program.lower() == "ps": print("Running processing site prediction") out_folder = self._paths.processing_site_folder project_creator.create_subfolders( self._paths.required_folders("processing")) log = open(os.path.join(self._paths.processing_site_folder, "log.txt"), "w") log.write("Running PS prediction.\n") else: print("Error: No such program!") sys.exit() self.check_multi_files( [self._args.fasta_files, self._args.annotation_files, self._args.compare_overlap_gff, self._args.manual_files, self._args.compare_transcript_files], ["--fasta_files", "--annotation_files", "--compare_overlap_gff", "--manual_files","--compare_transcript_files"], log) self.check_parameter([self._args.tex_notex_libs, self._args.condition_names], ["--tex_notex_libs", "--condition_names"], log) self._args.tsspredator_path = self.check_execute_file( self._args.tsspredator_path, log) args_tss = self.args_container.container_tsspredator( self._args.tsspredator_path, self._args.program, self._args.fasta_files, self._args.annotation_files, self._args.tex_notex_libs, self._args.condition_names, self._args.output_id, self._args.auto_load_optimized_parameters, self._args.genome_order, self._args.height, self._args.height_reduction, self._args.factor, self._args.factor_reduction, self._args.base_height, self._args.enrichment_factor, self._args.processing_factor, self._args.replicate_tex, out_folder, self._args.validate_gene, self._args.manual_files, self._args.curated_sequence_length, self._args.compare_transcript_files, self._args.tolerance, self._args.utr_length, self._args.cluster, self._args.re_check_orphan, self._args.remove_overlap_feature, self._args.compare_overlap_gff, self._args.remove_low_expression) tsspredator = TSSpredator(args_tss) tsspredator.run_tsspredator(args_tss, log) log.close() def optimize(self): """opimize TSSpredator""" if self._args.program.lower() == "tss": print("Running optimization of TSS prediction") project_creator.create_subfolders( self._paths.required_folders("TSS")) out_folder = self._paths.tsspredator_folder if "optimized_TSSpredator" not in os.listdir(out_folder): os.mkdir(os.path.join(out_folder, "optimized_TSSpredator")) log = open(os.path.join(out_folder, "optimized_TSSpredator", "log.txt"), "w") log.write("Running optimization of TSS prediction\n") elif self._args.program.lower() == "ps": out_folder = self._paths.processing_site_folder project_creator.create_subfolders( self._paths.required_folders("processing")) if "optimized_TSSpredator" not in os.listdir(out_folder): os.mkdir(os.path.join(out_folder, "optimized_TSSpredator")) log = open(os.path.join(out_folder, "optimized_TSSpredator", "log.txt"), "w") log.write("Running optimization of PS prediction\n") print("Running optimization of processing site prediction") else: print("Error: No such program!") sys.exit() self.check_multi_files( [self._args.fasta_files, self._args.annotation_files, self._args.manual_files], ["--fasta_files", "--annotation_files", "--manual_files"], log) self._args.tsspredator_path = self.check_execute_file( self._args.tsspredator_path, log) self.check_parameter([self._args.tex_notex_libs, self._args.condition_names], ["--tex_notex_lib", "--condition_names"], log) args_ops = self.args_container.container_optimize( self._args.tsspredator_path, self._args.fasta_files, self._args.annotation_files, self._args.manual_files, out_folder, self._args.max_height, self._args.max_height_reduction, self._args.max_factor, self._args.max_factor_reduction, self._args.max_base_height, self._args.max_enrichment_factor, self._args.max_processing_factor, self._args.utr_length, self._args.tex_notex_libs, self._args.condition_names, self._args.output_id, self._args.cluster, self._args.curated_sequence_length, self._args.parallels, self._args.program, self._args.replicate_tex, self._args.steps) optimize_tss(args_ops, log) log.close() def color(self): """color the screenshots""" print("Running png files coloring") if not os.path.exists(os.path.join(self._args.screenshot_folder, "screenshots")): print("The folder -- screenshots needs to be found in " "{0}.".format(self._args.screenshot_folder)) sys.exit() log = open(os.path.join(self._args.screenshot_folder, "screenshots", "color_log.txt"), "w") self.check_parameter([self._args.track_number], ["--track_numer"], log) self.check_folder([self._args.screenshot_folder], ["--screenshot_folder"], log) self._args.imagemagick_covert_path = self.check_execute_file( self._args.imagemagick_covert_path, log) color = ColorPNG() color.generate_color_png( self._args.track_number, self._args.screenshot_folder, self._args.imagemagick_covert_path, log) log.close() def terminator(self): """Run TransTermHP and Gene converaged for detecting terminators""" print("Running terminator prediction") project_creator.create_subfolders( self._paths.required_folders("terminator")) log = open(os.path.join(self._paths.transterm_folder, "log.txt"), "w") if self._args.transterm_path is None: print("Please assign the path of transterm in TransTermHP.") self.check_multi_files( [self._args.fasta_files, self._args.annotation_files, self._args.transcript_files, self._args.srna_files], ["--fasta_files", "--annotation_files", "--transcript_files", "--srna_files"], log) for prop in ("transterm_path", "expterm_path", "rnafold_path"): setattr(self._args, prop, self.check_execute_file(getattr(self._args, prop), log)) args_term = self.args_container.container_terminator( self._args.transterm_path, self._args.expterm_path, self._args.rnafold_path, self._paths.transterm_folder, self._args.fasta_files, self._args.annotation_files, self._args.transcript_files, self._args.srna_files, self._args.decrease, self._args.highest_coverage, self._args.tolerance_detect_coverage, self._args.tolerance_within_transcript, self._args.tolerance_downstream_transcript, self._args.tolerance_within_gene, self._args.tolerance_downstream_gene, self._paths.transtermhp_folder, self._args.tex_notex_libs, self._args.frag_libs, self._args.tex_notex, self._args.replicate_tex, self._args.replicate_frag, self._args.min_loop_length, self._args.max_loop_length, self._args.min_stem_length, self._args.max_stem_length, self._args.min_u_tail, self._args.miss_rate, self._args.mutation_u_tail, self._args.keep_multi_term, self._args.window_size, self._args.window_shift) terminator = Terminator(args_term) terminator.run_terminator(args_term, log) log.close() def transcript(self): """Run Transcript detection""" project_creator.create_subfolders( self._paths.required_folders("transcript")) log = open(os.path.join(self._paths.transcript_output_folder, "log.txt"), "w") print("Running transcript detection") self.check_multi_files( [self._args.annotation_files, self._args.tss_files, self._args.terminator_files], ["--annotation_files", "--tss_files", "--terminator_files"], log) args_tran = self.args_container.container_transcript( self._args.tex_notex, self._args.modify_transcript, self._args.length, self._args.annotation_files, self._args.height, self._args.width, self._args.tolerance, self._args.tolerance_coverage, self._args.replicate_tex, self._args.replicate_frag, self._paths.transcript_output_folder, self._args.tss_files, self._args.tss_tolerance, self._args.tex_notex_libs, self._args.frag_libs, self._args.compare_feature_genome, self._args.terminator_files, self._args.terminator_tolerance, self._args.max_length_distribution) transcript = TranscriptDetection(args_tran) transcript.run_transcript(args_tran, log) def utr_detection(self): """Run UTR detection.""" print("Running UTR detection") project_creator.create_subfolders(self._paths.required_folders("utr")) log = open(os.path.join(self._paths.utr_folder, "log.txt"), "w") self.check_multi_files( [self._args.annotation_files, self._args.terminator_files, self._args.transcript_files, self._args.tss_files], ["--annotation_files", "--terminator_files", "--transcript_files", "--tss_files"], log) args_utr = self.args_container.container_utr( self._args.tss_files, self._args.annotation_files, self._args.transcript_files, self._args.terminator_files, self._args.terminator_tolerance, self._paths.utr_folder, self._args.tss_source, self._args.base_5utr, self._args.utr_length, self._args.base_3utr) utr = UTRDetection(args_utr) utr.run_utr_detection(args_utr, log) def _check_filter_input(self, files, info, filters, log): if files is None: print("Error: The {0} has to be provided " "if \"{1}\" in --filter_info!".format(info, filters)) log.write("The {0} has to be provided " "if \"{1}\" in --filter_info!\n".format(info, filters)) sys.exit() def _check_database(self, database, flag, info, log): wrong = False if database is None: wrong = True elif not os.path.isfile(database): if (os.path.isfile(database + ".fa")) or ( os.path.isfile(database + ".fna")) or ( os.path.isfile(database + ".fasta")): wrong = False else: wrong = True if wrong: print("Error: {0} is required if {1} is in --filter_info. " "But the assignment of {0} is empty or wrong. " "Please check the {0} or remove {1} from " "--filter_info!".format(flag, info)) log.write("{0} is required if {1} is in --filter_info. " "But the assignment of {0} is empty or wrong. " "Please check the {0} or remove {1} from " "--filter_info!\n".format(flag, info)) sys.exit() def srna_detection(self): """sRNA_detection.""" print("Running sRNA prediction") project_creator.create_subfolders(self._paths.required_folders("srna")) log = open(os.path.join(self._paths.srna_folder, "log.txt"), "w") self.check_multi_files( [self._args.annotation_files, self._args.transcript_files, self._args.fasta_files, self._args.sorf_files, self._args.terminator_files, self._args.promoter_tables, self._args.processing_site_files], ["--annotation_files", "--transcript_files", "--fasta_files", "--sorf_files", "--terminator_files", "--promoter_tables", "--processing_site_files"], log) for info in self._args.filter_info: if "sec_str" == info: if not self._args.compute_sec_structures: log.write("If you want to use secondary structure to " "filter the false positive, " "--compute_sec_structure need to be switch on.\n") print("Error: --compute_sec_structures is not switch on, " "but sec_str is still in --filter_info.") sys.exit() self._check_filter_input( self._args.fasta_files, "fasta file", "sec_str", log) for prop in ("rnafold_path", "relplot_path", "mountain_path"): setattr(self._args, prop, self.check_execute_file(getattr(self._args, prop), log)) elif ("blast_nr" == info) or ( "blast_srna"== info): for prop in ("blastn_path", "blastx_path", "makeblastdb_path"): setattr(self._args, prop, self.check_execute_file(getattr(self._args, prop), log)) if ("blast_nr" == info): self._check_database(self._args.nr_database_path, "--nr_database_path", "blast_nr", log) if ("blast_srna" == info): self._check_database(self._args.srna_database_path, "--srna_database_path", "blast_srna", log) elif "sorf" == info: self._check_filter_input( self._args.sorf_files, "sORF", "sorf", log) elif "term" == info: self._check_filter_input(self._args.terminator_files, "terminator", "term", log) elif "promoter" == info: self._check_filter_input(self._args.promoter_tables, "Promoter", "promoter", log) elif "tss" == info: self._check_filter_input(self._args.tss_files, "TSS", "tss", log) else: if "none" != info.lower(): print("Error: Please check the --filter_info, " "invalid value was assigned!") log.write("invalid value was assigned to --filter_info.\n") sys.exit() log.write("--filter_info and databases are assigned correctly.\n") if self._args.utr_derived_srna: if self._args.tss_files is None: print("Error: The TSS has to be provided " "if you want to compute UTR-derived sRNA!") log.write("The TSS has to be provided " "if you want to compute UTR-derived sRNA!\n") sys.exit() if self._args.search_poly_u != 0: if self._args.fasta_files is None: print("Error: The fasta files have to be provided " "if you want to extend 3'end of sRNA by " "searching poly U tail!") log.write("The fasta files have to be provided " "if you want to extend 3'end of sRNA by " "searching poly U tail!\n") sys.exit() if ((self._args.nr_format) and (self._args.nr_database_path is None)): print("Error: The function for format of nr database was " "switched ont, but no nr database was assigned.") sys.exit() if ((self._args.srna_format) and (self._args.srna_database_path is None)): print("Error: The function for format of srna database was " "switched ont, but no srna database was assigned.") sys.exit() args_srna = self.args_container.container_srna( self._args.rnafold_path, self._args.relplot_path, self._args.mountain_path, self._args.blastn_path, self._args.blastx_path, self._args.makeblastdb_path, self._paths.srna_folder, self._args.utr_derived_srna, self._args.annotation_files, self._args.tss_files, self._args.transcript_files, self._args.tss_intergenic_antisense_tolerance, self._args.tss_5utr_tolerance, self._args.tss_3utr_tolerance, self._args.tss_intercds_tolerance, self._args.filter_info, self._args.processing_site_files, self._args.fasta_files, self._args.mountain_plot, self._args.nr_format, self._args.srna_format, self._args.srna_database_path, self._args.nr_database_path, self._args.cutoff_energy, self._args.parallel_blast, self._args.blast_score_srna, self._args.blast_score_nr, self._args.min_intergenic_tex_coverage, self._args.min_intergenic_notex_coverage, self._args.min_intergenic_fragmented_coverage, self._args.min_complete_5utr_transcript_coverage, self._args.min_antisense_tex_coverage, self._args.min_antisense_notex_coverage, self._args.min_antisense_fragmented_coverage, self._args.min_utr_tex_coverage, self._args.min_utr_notex_coverage, self._args.min_utr_fragmented_coverage, self._args.max_length, self._args.min_length, self._args.tex_notex_libs, self._args.frag_libs, self._args.replicate_tex, self._args.replicate_frag, self._args.tex_notex, self._args.blast_e_nr, self._args.blast_e_srna, self._args.detect_srna_in_cds, self._args.decrease_intergenic_antisense, self._args.decrease_utr, self._args.tolerance_intergenic_antisense, self._args.tolerance_utr, self._args.cutoff_nr_hit, self._args.sorf_files, self._args.overlap_percent_cds, self._args.terminator_files, self._args.terminator_tolerance_in_srna, self._args.terminator_tolerance_out_srna, self._args.ignore_hypothetical_protein, self._args.tss_source, self._args.min_all_utr_coverage, self._args.promoter_tables, self._args.ranking_time_promoter, self._args.promoter_names, self._args.compute_sec_structures, self._args.search_poly_u, self._args.min_u_poly_u, self._args.mutation_poly_u, self._args.exclude_srna_in_annotation_file) srna = sRNADetection(args_srna) srna.run_srna_detection(args_srna, log) def sorf_detection(self): """sORF_detection.""" print("Running sORF prediction") project_creator.create_subfolders( self._paths.required_folders("sorf")) log = open(os.path.join(self._paths.sorf_folder, "log.txt"), "w") self.check_multi_files( [self._args.transcript_files, self._args.annotation_files, self._args.fasta_files, self._args.srna_files, self._args.tss_files], ["--transcript_files", "--annotation_files", "--fasta_files", "--srna_files", "--tss_files"], log) args_sorf = self.args_container.container_sorf( self._paths.sorf_folder, self._args.utr_derived_sorf, self._args.transcript_files, self._args.annotation_files, self._args.tss_files, self._args.utr_length, self._args.min_length, self._args.max_length, self._args.cutoff_intergenic_coverage, self._args.cutoff_antisense_coverage, self._args.cutoff_5utr_coverage, self._args.cutoff_3utr_coverage, self._args.cutoff_intercds_coverage, self._args.fasta_files, self._args.tex_notex_libs, self._args.frag_libs, self._args.tex_notex, self._args.replicate_tex, self._args.replicate_frag, self._args.srna_files, self._args.start_codon, self._args.stop_codon, self._args.cutoff_base_coverage, self._args.rbs_seq, self._args.tolerance_rbs, self._args.rbs_not_after_tss, self._args.print_all_combination, self._args.best_no_srna, self._args.best_no_tss, self._args.ignore_hypothetical_protein, self._args.min_rbs_distance, self._args.max_rbs_distance, self._args.tolerance_3end, self._args.tolerance_5end, self._args.contain_multi_stop) sorf = sORFDetection(args_sorf) sorf.run_sorf_detection(args_sorf, log) def meme(self): """promoter detectopn""" print("Running promoter detection") project_creator.create_subfolders( self._paths.required_folders("promoter")) log = open(os.path.join(self._paths.promoter_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.tss_files, self._args.fasta_files], ["--tss_files", "--fasta_files"], log) if not self._args.tss_source: self.check_multi_files([self._args.annotation_files], ["--annotation_files"], log) if (self._args.program == "both") or ( self._args.program == "meme"): self._args.meme_path = self.check_execute_file(self._args.meme_path, log) elif (self._args.program == "both") or ( self._args.program == "glam2"): self._args.glam2_path = self.check_execute_file(self._args.glam2_path, log) args_pro = self.args_container.container_promoter( self._args.meme_path, self._args.glam2_path, self._paths.promoter_output_folder, self._args.tex_libs, self._args.tss_files, self._args.fasta_files, self._args.num_motifs, self._args.nt_before_tss, self._args.motif_width, self._args.tss_source, self._args.annotation_files, self._args.end_run, self._args.combine_all, self._args.e_value, self._args.parallels, self._args.program, self._args.use_tss_type) meme = MEME(args_pro) meme.run_meme(args_pro, log) def operon(self): """operon detection""" print("Running operon detection") project_creator.create_subfolders( self._paths.required_folders("operon")) log = open(os.path.join(self._paths.operon_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.tss_files, self._args.annotation_files, self._args.transcript_files, self._args.terminator_files], ["--tss_files", "--annotation_files", "--transcript_files", "--terminator_files"], log) args_op = self.args_container.container_operon( self._args.tss_files, self._args.annotation_files, self._args.transcript_files, self._args.terminator_files, self._args.tss_tolerance, self._args.terminator_tolerance, self._args.min_length, self._paths.operon_output_folder, self._paths.operon_statistics_folder) operon = OperonDetection(args_op) operon.run_operon(args_op, log) def circrna(self): """circRNA detection""" print("Running circular RNA prediction") project_creator.create_subfolders( self._paths.required_folders("circrna")) log = open(os.path.join(self._paths.circrna_output_folder, "log.txt"), "w") if self._args.read_files: self._args.segemehl_path = self.check_execute_file( self._args.segemehl_path, log) for prop in ("testrealign_path", "samtools_path"): setattr(self._args, prop, self.check_execute_file(getattr(self._args, prop), log)) self.check_multi_files( [self._args.fasta_files, self._args.annotation_files], ["--fasta_files", "--annotation_files"], log) args_circ = self.args_container.container_circrna( self._args.parallels, self._args.fasta_files, self._args.annotation_files, self._args.bam_files, self._args.read_files, self._paths.circrna_stat_folder, self._args.support_reads, self._args.segemehl_path, self._args.testrealign_path, self._args.samtools_path, self._args.start_ratio, self._args.end_ratio, self._args.ignore_hypothetical_protein, self._paths.circrna_output_folder) circ = CircRNADetection(args_circ) circ.run_circrna(args_circ, log) def goterm(self): """Go term discovery""" print("Running GO term mapping") project_creator.create_subfolders( self._paths.required_folders("go_term")) log = open(os.path.join(self._paths.goterm_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.annotation_files, self._args.transcript_files], ["--annotation_files", "--transcript_files"], log) self.check_file([self._args.uniprot_id, self._args.go_obo, self._args.goslim_obo], ["--uniprot_id", "--go.obo", "--goslim_obo"], True, log) args_go = self.args_container.container_goterm( self._args.annotation_files, self._paths.goterm_output_folder, self._args.uniprot_id, self._args.go_obo, self._args.goslim_obo, self._args.transcript_files) goterm = GoTermFinding(args_go) goterm.run_go_term(args_go, log) def srna_target(self): """sRNA target prediction""" print("Running sRNA target prediction") project_creator.create_subfolders( self._paths.required_folders("srna_target")) log = open(os.path.join(self._paths.starget_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.fasta_files, self._args.srna_files, self._args.annotation_files], ["--fasta_files", "--srna_files", "--annotation_files"], log) if "RNAup" in self._args.program: self._args.rnaup_path = self.check_execute_file( self._args.rnaup_path, log) if "RNAplex" in self._args.program: for prop in ("rnaplfold_path", "rnaplex_path"): setattr(self._args, prop, self.check_execute_file(getattr(self._args, prop), log)) if "IntaRNA" in self._args.program: self._args.intarna_path = self.check_execute_file( self._args.intarna_path, log) if self._args.mode_intarna is None: print("Error: --mode_IntaRNA need to be assigned!") sys.exit() args_tar = self.args_container.container_srna_target( self._args.rnaplfold_path, self._args.rnaplex_path, self._args.rnaup_path, self._args.intarna_path, self._args.annotation_files, self._args.fasta_files, self._args.srna_files, self._args.query_srnas, self._args.program, self._args.interaction_length, self._args.window_size_target_rnaplex, self._args.span_target_rnaplex, self._args.window_size_srna_rnaplfold, self._args.span_srna_rnaplfold, self._args.unstructured_region_rnaplex_target, self._args.unstructured_region_rnaplex_srna, self._args.unstructured_region_rnaup, self._args.energy_threshold_rnaplex, self._args.duplex_distance_rnaplex, self._args.top, self._paths.starget_output_folder, self._args.parallels_rnaplex, self._args.parallels_rnaup, self._args.parallels_intarna, self._args.continue_rnaup, self._args.slide_window_size_srna_intarna, self._args.max_loop_length_srna_intarna, self._args.slide_window_size_target_intarna, self._args.max_loop_length_target_intarna, self._args.mode_intarna, self._args.potential_target_start, self._args.potential_target_end, self._args.target_feature) srnatarget = sRNATargetPrediction(args_tar) srnatarget.run_srna_target_prediction(args_tar, log) def snp(self): """SNP transcript detection""" print("Running SNP/mutations calling") project_creator.create_subfolders(self._paths.required_folders("snp")) log = open(os.path.join(self._paths.snp_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.fasta_files], ["--fasta_files"], log) if (self._args.bam_type != "related_genome") and ( self._args.bam_type != "reference_genome"): print("Error: Please assign \"related_genome\" or" " \"reference_genome\" to --bam_type!") sys.exit() if (self._args.ploidy != "haploid") and ( self._args.ploidy != "diploid"): print("Error: Please assign \"haploid\" or" " \"diploid\" to --chromosome_type!") if (self._args.caller != "c") and ( self._args.caller != "m"): print("Error: Please assign \"c\" or" " \"m\" to --caller!") for prop in ("bcftools_path", "samtools_path"): setattr(self._args, prop, self.check_execute_file(getattr(self._args, prop), log)) args_snp = self.args_container.container_snp( self._args.samtools_path, self._args.bcftools_path, self._args.bam_type, self._args.program, self._args.fasta_files, self._args.bam_files, self._args.quality, self._args.read_depth_range, self._paths.snp_output_folder, self._args.indel_fraction, self._args.ploidy, self._args.rg_tag, self._args.caller, self._args.filter_tag_info, self._args.dp4_cutoff) snp = SNPCalling(args_snp) snp.run_snp_calling(args_snp, log) def ppi(self): """PPI network retrieve""" project_creator.create_subfolders( self._paths.required_folders("ppi_network")) log = open(os.path.join(self._paths.ppi_output_folder, "log.txt"), "w") print("Running protein-protein interaction networks prediction") self.check_multi_files([self._args.annotation_files], ["--annotation_files"], log) self.check_parameter([self._args.query_strains, self._args.species_string], ["--query_strains", "--species_string"], log) args_ppi = self.args_container.container_ppi( self._args.annotation_files, self._args.query_strains, self._args.without_strain_pubmed, self._args.species_string, self._args.score, self._paths.ppi_output_folder, self._args.node_size, self._args.query) ppi = PPINetwork(self._paths.ppi_output_folder) ppi.retrieve_ppi_network(args_ppi, log) def sublocal(self): """Subcellular Localization prediction""" print("Running subcellular localization prediction") project_creator.create_subfolders( self._paths.required_folders("subcellular_localization")) log = open(os.path.join(self._paths.sublocal_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.annotation_files, self._args.fasta_files, self._args.transcript_files], ["--annotation_files", "--fasta_files", "--transcript_files"], log) if (self._args.bacteria_type != "positive") and ( self._args.bacteria_type != "negative"): print("Error: Please assign \"positive\" or" " \"negative\" to --bacteria_type!") sys.exit() self._args.psortb_path = self.check_execute_file(self._args.psortb_path, log) args_sub = self.args_container.container_sublocal( self._args.psortb_path, self._args.annotation_files, self._args.fasta_files, self._args.bacteria_type, self._args.difference_multi, self._paths.sublocal_output_folder, self._args.transcript_files) sublocal = SubLocal(args_sub) sublocal.run_sub_local(args_sub, log) def ribos(self): """riboswitch and RNA thermometer prediction""" print("Running riboswitch and RNA thermometer prediction") log_t = None log_r = None if (self._args.program == "both"): project_creator.create_subfolders( self._paths.required_folders("riboswitch")) project_creator.create_subfolders( self._paths.required_folders("thermometer")) log_r = open(os.path.join(self._paths.ribos_output_folder, "log.txt"), "w") log_t = open(os.path.join(self._paths.thermo_output_folder, "log.txt"), "w") self.check_file([self._args.riboswitch_id_file, self._args.rfam_path], ["--riboswitch_id_file", "--rfam_path"], True, log_r) self.check_file([self._args.rna_thermometer_id_file, self._args.rfam_path], ["--rna_thermometer_id_file", "--rfam_path"], True, log_t) ribos_path = self._paths.ribos_output_folder thermo_path = self._paths.thermo_output_folder elif (self._args.program == "thermometer"): project_creator.create_subfolders( self._paths.required_folders("thermometer")) log_t = open(os.path.join(self._paths.thermo_output_folder, "log.txt"), "w") self.check_file([self._args.rna_thermometer_id_file, self._args.rfam_path], ["--thermometer_id_file", "--rfam_path"], True, log_t) ribos_path = None thermo_path = self._paths.thermo_output_folder elif (self._args.program == "riboswitch"): project_creator.create_subfolders( self._paths.required_folders("riboswitch")) log_r = open(os.path.join(self._paths.ribos_output_folder, "log.txt"), "w") self.check_file([self._args.riboswitch_id_file, self._args.rfam_path], ["--riboswitch_id_file", "--rfam_path"], True, log_r) ribos_path = self._paths.ribos_output_folder thermo_path = None else: log.write("Please assign \"thermometer\", \"riboswitch\" " "or \"both\" in --program.\n") print("Error: Please assign \"thermometer\", \"riboswitch\" " "or \"both\" in --program!") sys.exit() for log in (log_t, log_r): if log is not None: self.check_multi_files( [self._args.annotation_files, self._args.fasta_files, self._args.tss_files, self._args.transcript_files], ["--annotation_files", "--fasta_files", "--tss_files", "--transcript_files"], log) self._args.cmscan_path = self.check_execute_file(self._args.cmscan_path, log) self._args.cmpress_path = self.check_execute_file(self._args.cmpress_path, log) args_ribo = self.args_container.container_ribos( self._args.program, self._args.rna_thermometer_id_file, self._args.cmscan_path, self._args.cmpress_path, self._args.riboswitch_id_file, self._args.annotation_files, self._args.fasta_files, self._args.tss_files, self._args.transcript_files, self._args.rfam_path, ribos_path, thermo_path, self._args.cutoff, self._args.output_all, self._paths.database_folder, self._args.tolerance, self._args.without_rbs, self._args.rbs_seq, self._args.tolerance_rbs, self._args.utr_length) ribos = Ribos(args_ribo) ribos.run_ribos(args_ribo, log_t, log_r) def crispr(self): """CRISPR prediction""" print("Running CRISPR prediction") project_creator.create_subfolders( self._paths.required_folders("crispr")) log = open(os.path.join(self._paths.crispr_output_folder, "log.txt"), "w") self.check_multi_files( [self._args.fasta_files, self._args.annotation_files], ["--fasta_files", "--annotation_files"], log) self._args.crt_path = self.check_execute_file(self._args.crt_path, log) args_cris = self.args_container.container_cris( self._args.fasta_files, self._args.annotation_files, self._args.crt_path, self._args.window_size, self._args.min_number_repeats, self._args.min_length_repeat, self._args.Max_length_repeat, self._args.min_length_spacer, self._args.Max_length_spacer, self._paths.crispr_output_folder, self._args.ignore_hypothetical_protein) cris = Crispr(args_cris) cris.run_crispr(args_cris, log) def merge(self): """Merge all features""" print("Merging all features to one gff file") merge_folder = os.path.join(self._paths.output_folder, "merge_all_features") self.helper.check_make_folder(merge_folder) log = open(os.path.join(merge_folder, "log.txt"), "w") other_features = self._args.other_features_files self.check_multi_files([[self._args.transcript_file], other_features], ["--transcript_file", "--other_features_files"], log) self.check_parameter([self._args.output_prefix], ["--output_prefix"], log) run_merge(merge_folder, self._args.transcript_file, self._args.other_features_files, self._args.terminator_tolerance, self._args.tss_tolerance, os.path.join(merge_folder, self._args.output_prefix), log) if self._args.source_for_overlapping is not None: deal_overlap(merge_folder, self._args.source_for_overlapping) def screen(self): """generate screenshot""" print("Running screenshot generation") out_folder = os.path.join(self._args.output_folder, "screenshots") if os.path.exists(out_folder): print("Error: The {0} already exists!".format( out_folder)) sys.exit() else: os.mkdir(out_folder) log = open(os.path.join(out_folder, "log.txt"), "w") self.check_file([self._args.main_gff, self._args.fasta_file], ["--main_gff", "--fasta_file"], True, log) if self._args.side_gffs is not None: for gff in (self._args.side_gffs): gff = gff.strip() if not os.path.isfile(gff): print("Error: The --side_gffs do not exist!") sys.exit() if self._args.output_folder is None: log.write("No --output_folder can be found.\n") print("Error: Please assign --output_folder!") sys.exit() if (self._args.present != "expand") and ( self._args.present != "collapse") and ( self._args.present != "squish"): log.write("Please assign \"expand\" or " "\"collapse\" or \"squish\" to --present.\n") print("Error: Please assign \"expand\" or " "\"collapse\" or \"squish\" to --present!") sys.exit() args_sc = self.args_container.container_screen( self._args.main_gff, self._args.side_gffs, self._args.fasta_file, self._args.height, self._args.tex_notex_libs, self._args.frag_libs, self._args.present, self._args.output_folder) screen = Screen(args_sc, out_folder) screen.screenshot(args_sc, log)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/controller.py
controller.py
import sys import os import gc import numpy as np from annogesiclib.gff3 import Gff3Parser from annogesiclib.coverage_detection import coverage_comparison from annogesiclib.coverage_detection import replicate_comparison, get_repmatch from annogesiclib.lib_reader import read_wig, read_libs from annogesiclib.gen_TSS_type import compare_tss_cds, fix_primary_type from annogesiclib.helper import Helper from annogesiclib.args_container import ArgsContainer def get_differential_cover(num, checks, cover_sets, poss, cover, args_srna, cover_pos): go_out = False if checks["detect_diff"]: if (num == args_srna.fuzzy_inter) or ( cover_sets["diff"] == 0) or ( (cover > cover_sets["diff"]) and ( cover / cover_sets["diff"]) > ( 1 + args_srna.decrease_inter)): poss["stop_point"] = cover_pos go_out = True elif (cover <= cover_sets["diff"]): if (cover / cover_sets["diff"]) <= ( args_srna.decrease_inter / 2): num += 1 else: num = 0 cover_sets["diff"] = cover cover_sets["low"] = cover elif (cover > cover_sets["diff"]) and ( (cover / cover_sets["diff"]) <= ( 1 + args_srna.decrease_inter)): num += 1 if (not checks["first"]) and (cover_sets["high"] > 0): if ((cover_sets["low"] / cover_sets["high"]) < args_srna.decrease_inter) and ( cover_sets["low"] > -1): checks["detect_diff"] = True cover_sets["diff"] = cover return go_out def check_coverage_pos(start, end, cover, cutoff_coverage, cover_sets, checks, poss, strand, cover_pos): go_out = False if (start <= cover_pos) and ( end >= cover_pos): if cover > cutoff_coverage: cover_sets["total"] = cover_sets["total"] + cover checks["first"] = coverage_comparison( cover, cover_sets, poss, checks["first"], strand, cover_pos) else: cover_sets["total"] = cover_sets["total"] + cover checks["first"] = coverage_comparison( cover, cover_sets, poss, checks["first"], strand, cover_pos) else: if (strand == "+") and (cover_pos > end): poss["stop_point"] = cover_pos go_out = True elif (strand == "-") and (cover_pos < start): poss["stop_point"] = cover_pos go_out = True return go_out def check_start_and_end(start, end, covers): if (start - 2) < 0: c_start = 0 else: c_start = start - 2 if (end + 2) > len(covers): c_end = len(covers) else: c_end = end + 2 return c_start, c_end def get_best(wigs, strain, strand, start, end, type_, args_srna, cutoff): cover_sets = {"low": -1, "high": -1, "total": 0, "diff": 0} poss = {"high": 0, "low": 0, "stop_point": -1} srna_covers = {} for wig_strain, conds in wigs.items(): if wig_strain == strain: for cond, tracks in conds.items(): srna_covers[cond] = [] for lib_name, covers in tracks.items(): track = lib_name.split("|")[-3] lib_strand = lib_name.split("|")[-2] lib_type = lib_name.split("|")[-1] cover_sets["total"] = 0 cover_sets["diff"] = 0 checks = {"first": True, "detect_diff": False} num = 0 c_start, c_end = check_start_and_end(start, end, covers) covers = covers[c_start: c_end] if strand == "-": covers = covers[::-1] go_out = False pos = 0 for cover in covers: if strand == "+": cover_pos = c_start + pos else: cover_pos = c_end - pos if (lib_strand == strand): go_out = check_coverage_pos( start, end, cover, cutoff, cover_sets, checks, poss, strand, cover_pos) if go_out: break if type_ == "differential": go_out = get_differential_cover( num, checks, cover_sets, poss, cover, args_srna, cover_pos) if go_out: break pos += 1 if strand == "+": diff = poss["stop_point"] - start else: diff = end - poss["stop_point"] avg = cover_sets["total"] / float(diff + 1) if (avg > float(cutoff)): srna_covers[cond].append({"track": track, "high": cover_sets["high"], "low": cover_sets["low"], "avg": avg, "pos": poss["stop_point"], "type": lib_type}) return srna_covers def get_attribute_string(srna_datas, tss_pro, num, name, srna_type, strain): attribute_string = ";".join( ["=".join(items) for items in ( ["ID", strain + "_srna" + str(num)], ["Name", "_".join(["sRNA", name])], ["sRNA_type", srna_type])]) datas = tss_pro.split(";") tss = "" pro = "" for data in datas: if "TSS" in data: if len(tss) == 0: tss = tss + data else: tss = ";".join([tss, data]) elif "Cleavage" in data: if len(pro) == 0: pro = pro + data else: pro = ";".join([pro, data]) if len(tss) == 0: tss = "NA" if len(pro) == 0: pro = "NA" with_tss = "=".join(["with_TSS", tss]) with_pro = "=".join(["end_cleavage", pro]) if srna_datas is None: if (tss != "NA") and (pro != "NA"): attribute_string = ";".join([attribute_string, with_tss, with_pro]) elif (tss != "NA"): attribute_string = ";".join([attribute_string, with_tss]) elif (pro != "NA"): attribute_string = ";".join([attribute_string, with_pro]) else: srna_data_string = ";".join( ["=".join(items) for items in ( ["best_avg_coverage", str(srna_datas["best"])], ["best_high_coverage", str(srna_datas["high"])], ["best_low_coverage", str(srna_datas["low"])])]) if (tss != "NA") and (pro != "NA"): attribute_string = ";".join([attribute_string, with_tss, with_pro, srna_data_string]) elif (tss != "NA"): attribute_string = ";".join([attribute_string, with_tss, srna_data_string]) elif (pro != "NA"): attribute_string = ";".join([attribute_string, with_pro, srna_data_string]) else: attribute_string = ";".join([attribute_string, srna_data_string]) return attribute_string def print_file(string, tss, srna_datas, srna_type, args_srna, strain): name = '%0*d' % (5, args_srna.nums["uni"]) datas = string.split("\t") if (srna_datas is None): args_srna.out_table.write( "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t".format( datas[0], name, datas[3], datas[4], datas[6], "NA", "NA", "NA", "NA", "NA")) attribute_string = get_attribute_string( srna_datas, tss, args_srna.nums["uni"], name, srna_type, strain) args_srna.output.write("\t".join([string, attribute_string]) + "\n") args_srna.out_table.write(tss + "\n") else: args_srna.out_table.write( "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t".format( datas[0], name, datas[3], datas[4], datas[6], ";".join(srna_datas["conds"].keys()), ";".join(srna_datas["conds"].values()), srna_datas["best"], srna_datas["high"], srna_datas["low"])) attribute_string = get_attribute_string( srna_datas, tss, args_srna.nums["uni"], name, srna_type, strain) args_srna.output.write("\t".join([string, attribute_string]) + "\n") if srna_datas["detail"] is not None: args_srna.out_table.write(tss + "\t") first = True for data in srna_datas["detail"]: if first: args_srna.out_table.write( "{0}(avg={1};high={2};low={3})".format( data["track"], data["avg"], data["high"], data["low"])) first = False else: args_srna.out_table.write( ";{0}(avg={1};high={2};low={3})".format( data["track"], data["avg"], data["high"], data["low"])) args_srna.out_table.write("\n") args_srna.nums["uni"] += 1 def get_coverage(start, end, strain, wigs, strand, ta, tss, cutoff_coverage, notex, args_srna): srna_covers = get_best(wigs, strain, strand, start, end, "total", args_srna, cutoff_coverage) srna_datas = replicate_comparison(args_srna, srna_covers, strand, "normal", None, None, None, notex, cutoff_coverage, args_srna.texs) string = ("\t".join([str(field) for field in [ ta.seq_id, "ANNOgesic", "ncRNA", str(start), str(end), ".", ta.strand, "."]])) if srna_datas["best"] != 0: print_file(string, tss, srna_datas, ta.attributes["sRNA_type"], args_srna, strain) def check_pro(ta, start, end, srna_datas, type_, cutoff, wigs, notex, args_srna): '''check the processing site for long non-coding RNA''' pro_pos = -1 detect_pro = "NA" for pro in args_srna.pros: if (pro.seq_id == ta.seq_id) and ( pro.strand == ta.strand): if ta.strand == "+": if (pro.start >= ta.start) and (pro.start <= ta.end) and ( (pro.start - start) >= args_srna.min_len) and ( (pro.start - start) <= args_srna.max_len): pro_pos = pro.start detect_pro = "".join(["Cleavage:", str(pro.start), "_", pro.strand]) if pro.start > ta.end: break if ta.strand == "-": if (pro.start >= ta.start) and (pro.start <= ta.end) and ( (end - pro.start) >= args_srna.min_len) and ( (end - pro.start) <= args_srna.max_len): pro_pos = pro.start detect_pro = "".join(["Cleavage:", str(pro.start), "_", pro.strand]) break if pro.start > ta.end: break new_srna_datas = None if ta.strand == "+": if ((type_ == "within") and (srna_datas["pos"] < pro_pos)) or ( (type_ == "longer") and (pro_pos != -1)): srna_covers = get_best(wigs, ta.seq_id, ta.strand, start, pro_pos, "total", args_srna, cutoff) new_srna_datas = replicate_comparison( args_srna, srna_covers, ta.strand, "normal", None, None, None, notex, cutoff, args_srna.texs) if new_srna_datas["best"] <= cutoff: new_srna_datas = None else: if ((type_ == "within") and (srna_datas["pos"] > pro_pos) and ( pro_pos != -1)) or ( (type_ == "longer") and (pro_pos != -1)): srna_covers = get_best(wigs, ta.seq_id, ta.strand, pro_pos, end, "total", args_srna, cutoff) new_srna_datas = replicate_comparison( args_srna, srna_covers, ta.strand, "normal", None, None, None, notex, cutoff, args_srna.texs) if new_srna_datas["best"] <= cutoff: new_srna_datas = None return pro_pos, new_srna_datas, detect_pro def exchange_to_pro(args_srna, srna_datas, ta, start, end, cutoff, wigs, notex): detect = False if srna_datas["high"] != 0: if ((srna_datas["pos"] - start) >= args_srna.min_len) and ( (srna_datas["pos"] - start) <= args_srna.max_len): pro_pos, pro_datas, pro = check_pro( ta, start, end, srna_datas, "within", cutoff, wigs, notex, args_srna) if pro_datas is not None: srna_datas = pro_datas srna_datas["pos"] = pro_pos detect = True else: if srna_datas["best"] > cutoff: detect = True else: pro_pos, pro_datas, pro = check_pro( ta, start, end, srna_datas, "longer", cutoff, wigs, notex, args_srna) if pro_datas is not None: srna_datas = pro_datas srna_datas["pos"] = pro_pos detect = True else: pro = None return detect, srna_datas, pro def detect_wig_pos(wigs, ta, start, end, tss, cutoff, notex, args_srna): '''searching the coverage decrease''' srna_covers = get_best(wigs, ta.seq_id, ta.strand, start, end, "differential", args_srna, cutoff) srna_datas = replicate_comparison( args_srna, srna_covers, ta.strand, "normal", None, None, None, notex, cutoff, args_srna.texs) detect, srna_datas, pro = exchange_to_pro(args_srna, srna_datas, ta, start, end, cutoff, wigs, notex) if ta.strand == "+": if (detect) and ( (srna_datas["pos"] - start) >= args_srna.min_len) and ( (srna_datas["pos"] - start) <= args_srna.max_len): string = ("\t".join([str(field) for field in [ ta.seq_id, "ANNOgesic", "ncRNA", str(start), str(srna_datas["pos"]), ".", ta.strand, "."]])) if pro != "NA": tss = ";".join([tss, pro]) print_file(string, tss, srna_datas, ta.attributes["sRNA_type"], args_srna, ta.seq_id) else: if (detect) and ( (end - srna_datas["pos"]) >= args_srna.min_len) and ( (end - srna_datas["pos"]) <= args_srna.max_len): string = ("\t".join([str(field) for field in [ ta.seq_id, "ANNOgesic", "ncRNA", str(srna_datas["pos"]), str(end), ".", ta.strand, "."]])) if pro != "NA": tss = ";".join([tss, pro]) print_file(string, tss, srna_datas, ta.attributes["sRNA_type"], args_srna, ta.seq_id) def detect_longer(ta, args_srna, cdss, wigs_f, wigs_r): '''deal with the long non-coding RNA''' notex = None if len(args_srna.tsss) != 0: for tss in args_srna.tsss: cutoff = get_tss_type(tss, args_srna.cutoff_coverage, ta, cdss, args_srna.file_type, args_srna.break_tran) if notex is not None: notex = get_tss_type(tss, args_srna.notex, ta, cdss, "notex", args_srna.break_tran) if cutoff is not None: if (tss.strand == ta.strand) and ( tss.seq_id == ta.seq_id): if (tss.strand == "+"): compare_ta_tss( tss.start, ta.start - args_srna.fuzzy, ta.end, ta, tss, ta.end - tss.start, cutoff, notex, wigs_f, args_srna) if (tss.start >= ta.start - args_srna.fuzzy) and ( tss.start <= ta.end) and ( (ta.end - tss.start) > args_srna.max_len): if len(wigs_f) != 0: detect_wig_pos( wigs_f, ta, tss.start, ta.end, "".join(["TSS:", str(tss.start), "_", tss.strand]), cutoff, notex, args_srna) else: compare_ta_tss( tss.end, ta.start, ta.end + args_srna.fuzzy, ta, tss, tss.end - ta.start, cutoff, notex, wigs_r, args_srna) if (tss.end >= ta.start) and ( tss.end <= ta.end + args_srna.fuzzy) and ( tss.end - ta.start > args_srna.max_len): if len(wigs_r) != 0: detect_wig_pos( wigs_r, ta, ta.start, tss.end, "".join(["TSS:", str(tss.end), "_", tss.strand]), cutoff, notex, args_srna) if len(args_srna.tsss) == 0: cutoff = get_tss_type(None, args_srna.cutoff_coverage, ta, cdss, args_srna.file_type, args_srna.break_tran) if (len(wigs_f) != 0) and (len(wigs_r) != 0): if ta.strand == "+": detect_wig_pos(wigs_f, ta, ta.start, ta.end, "NA", cutoff, notex, args_srna) else: detect_wig_pos(wigs_r, ta, ta.start, ta.end, "NA", cutoff, notex, args_srna) def get_tss_type(tss, cutoff_coverage, ta, cdss, file_type, break_tran): '''get the cutoff based on the tss type for doing core computation If there are multiple TSS type, it will get the high cutoff one''' types = [] for type_, cover in cutoff_coverage.items(): if cover is not None: types.append(type_) cover = None break_ = False if tss is None: cover = cutoff_coverage["no_tss"] else: if ("type" in tss.attributes.keys()): for type_ in types: if (type_ in tss.attributes["type"].lower()): if cover is None: cover = cutoff_coverage[type_] cover, break_ = check_break_tran( tss, ta, cdss, cover, file_type, break_tran, type_) elif cover < cutoff_coverage[type_]: if (break_) and ((type_.lower() == "primary") or ( type_.lower() == "secondary")): pass else: cover = cutoff_coverage[type_] else: cover = cutoff_coverage["no_tss"] return cover def check_break_tran(tss, ta, cdss, cover, file_type, break_tran, type_): '''Check the Primary or Secondary TSS which associated with a non-contain CDS transcript''' break_ = False if ("type" in tss.attributes.keys()): if ("primary" in tss.attributes["type"].lower()) or ( "secondary" in tss.attributes["type"].lower()): overlap = False for cds in cdss: if (cds.seq_id == ta.seq_id) and ( cds.strand == ta.strand): if ((cds.start <= ta.start) and ( cds.end >= ta.end)) or ( (cds.start >= ta.start) and ( cds.end <= ta.end)) or ( (cds.start <= ta.start) and ( cds.end <= ta.end) and ( cds.end >= ta.start)) or ( (cds.start >= ta.start) and ( cds.start <= ta.end) and ( cds.end >= ta.end)): overlap = True elif (cds.start > ta.end): break if not overlap: break_ = True if (file_type == "tex"): tmp_cover = break_tran[0] elif (file_type == "notex"): tmp_cover = break_tran[1] elif (file_type == "frag"): tmp_cover = break_tran[2] if (type_.lower() == "primary") or ( type_.lower() == "secondary"): cover = tmp_cover else: if tmp_cover > cover: cover = tmp_cover return cover, break_ def compare_ta_tss(tss_pos, ta_start, ta_end, ta, tss, diff, cutoff_coverage, notex, wigs, args_srna): if (tss_pos >= ta_start) and (tss_pos <= ta_end) and ( diff >= args_srna.min_len) and (diff <= args_srna.max_len): if tss.strand == "+": start = tss_pos end = ta_end else: start = ta_start end = tss_pos if len(wigs) != 0: get_coverage(start, end, ta.seq_id, wigs, tss.strand, ta, "".join(["TSS:", str(tss.start), "_", tss.strand]), cutoff_coverage, notex, args_srna) else: string = "\t".join([str(field) for field in [ ta.seq_id, "ANNOgesic", "ncRNA", str(start), str(end), ta.score, ta.strand, ta.phase]]) print_file(string, "".join(["TSS:", str(tss.start), "_", tss.strand]), None, ta.attributes["sRNA_type"], args_srna, ta.seq_id) if args_srna.detects is not None: args_srna.detects["uni_with_tss"] = True def detect_include_tss(ta, args_srna, cdss, wigs_f, wigs_r): '''compare sRNA with TSS''' args_srna.detects["uni_with_tss"] = False notex = None for tss in args_srna.tsss: cutoff = get_tss_type(tss, args_srna.cutoff_coverage, ta, cdss, args_srna.file_type, args_srna.break_tran) if args_srna.notex is not None: notex = get_tss_type(tss, args_srna.notex, ta, cdss, "notex", args_srna.break_tran) if cutoff is not None: if (tss.strand == ta.strand) and (tss.seq_id == ta.seq_id): if (tss.strand == "+"): compare_ta_tss( tss.start, ta.start - args_srna.fuzzy, ta.end, ta, tss, ta.end - tss.start, cutoff, notex, wigs_f, args_srna) if tss.start > ta.end: break else: compare_ta_tss( tss.end, ta.start, ta.end + args_srna.fuzzy, ta, tss, tss.end - ta.start, cutoff, notex, wigs_r, args_srna) if (tss.end > ta.end + args_srna.fuzzy): break if not args_srna.detects["uni_with_tss"]: if (ta.strand == "+") and (len(wigs_f) != 0): get_coverage(ta.start, ta.end, ta.seq_id, wigs_f, "+", ta, "False", args_srna.cutoff_coverage["no_tss"], notex, args_srna) elif (ta.strand == "-") and (len(wigs_r) != 0): get_coverage(ta.start, ta.end, ta.seq_id, wigs_r, "-", ta, "False", args_srna.cutoff_coverage["no_tss"], notex, args_srna) elif (len(wigs_f) == 0) and (len(wigs_r) == 0): print_file( ta.info_without_attributes.replace("Transcript", "ncRNA"), "False", None, ta.attributes["sRNA_type"], args_srna, ta.seq_id) def get_proper_tss(tss_file, cutoff_coverage): '''get the associated TSS''' types = [] gff_parser = Gff3Parser() for type_, cover in cutoff_coverage.items(): if cover is not None: types.append(type_) tsss = [] num_tss = 0 if tss_file is not None: tss_f = open(tss_file, "r") for entry in gff_parser.entries(tss_f): if ("type" in entry.attributes.keys()): for type_ in types: if (type_ in entry.attributes["type"].lower()): tsss.append(entry) num_tss += 1 break else: tsss.append(entry) num_tss += 1 tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) tss_f.close() return tsss, num_tss def read_data(args_srna): cdss = [] tas = [] pros = [] genes = [] ncs = [] num_cds = 0 num_ta = 0 num_pro = 0 gff_parser = Gff3Parser() g_f = open(args_srna.gff_file, "r") for entry in gff_parser.entries(g_f): if (Helper().feature_without_notgene(entry)): import_ = False if args_srna.ex_srna: import_ = True else: if entry.feature != "ncRNA": import_ = True if import_: if ("product" in entry.attributes.keys()) and (args_srna.hypo): if "hypothetical protein" not in entry.attributes["product"]: cdss.append(entry) num_cds += 1 else: cdss.append(entry) num_cds += 1 if (entry.feature == "gene"): genes.append(entry) if args_srna.pro_file is not None: pro_f = open(args_srna.pro_file, "r") for entry in gff_parser.entries(pro_f): pros.append(entry) num_pro += 1 pros = sorted(pros, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) pro_f.close() t_h = open(args_srna.tran_file) for entry_ta in gff_parser.entries(t_h): tas.append(entry_ta) num_ta += 1 nums = {"cds": num_cds, "ta": num_ta, "pro": num_pro, "uni": 0} cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) g_f.close() t_h.close() return nums, cdss, tas, pros, genes, ncs def read_tss(tss_file): tsss = [] if tss_file is not None: tss_f = open(tss_file, "r") gff_parser = Gff3Parser() for entry in gff_parser.entries(tss_f): tsss.append(entry) tss_f.close() num_tss = None return tsss, num_tss def check_overlap(cds, ta): if ((cds.end < ta.end) and ( cds.end > ta.start) and ( cds.start <= ta.start)) or ( (cds.start > ta.start) and ( cds.start < ta.end) and ( cds.end >= ta.end)) or ( (cds.end >= ta.end) and ( cds.start <= ta.start)) or ( (cds.end <= ta.end) and ( cds.start >= ta.start)): return True def compare_ta_cds(cdss, ta, detects): for cds in cdss: if (cds.strand == ta.strand) and ( cds.seq_id == ta.seq_id): if check_overlap(cds, ta): detects["overlap"] = True ta.attributes["sRNA_type"] = "in_CDS" elif (cds.strand != ta.strand) and ( cds.seq_id == ta.seq_id): if check_overlap(cds, ta): detects["anti"] = True ta.attributes["sRNA_type"] = "antisense" if (not detects["overlap"]) and (not detects["anti"]): ta.attributes["sRNA_type"] = "intergenic" def check_srna_condition(ta, args_srna, cdss, wigs_f, wigs_r): '''check the long sRNA and get the coverage of sRNA and check TSS''' if ((ta.end - ta.start) >= args_srna.min_len) and ( (ta.end - ta.start) <= args_srna.max_len): if len(args_srna.tsss) != 0: detect_include_tss(ta, args_srna, cdss, wigs_f, wigs_r) else: if (ta.strand == "+") and (len(wigs_f) != 0): get_coverage(ta.start, ta.end, ta.seq_id, wigs_f, "+", ta, "NA", args_srna.cutoff_coverage["no_tss"], args_srna.notex["no_tss"], args_srna) elif (ta.strand == "-") and (len(wigs_r) != 0): get_coverage(ta.start, ta.end, ta.seq_id, wigs_r, "-", ta, "NA", args_srna.cutoff_coverage["no_tss"], args_srna.notex["no_tss"], args_srna) if (len(wigs_f) == 0) and (len(wigs_r) == 0): print_file(ta.info_without_attributes.replace("Transcript", "ncRNA"), "NA", None, ta.attributes["sRNA_type"], args_srna, ta.seq_id) if ((ta.end - ta.start) > args_srna.max_len): detect_longer(ta, args_srna, cdss, wigs_f, wigs_r) def get_cutoff(cutoffs, out_folder, file_type): '''set the cutoff of intergenic and antisense sRNA''' out = open(os.path.join(out_folder, "tmp_cutoff_inter"), "a") coverages = {} num_cutoff = 0 for cutoff in cutoffs: if (cutoff != "0") and (num_cutoff == 0): coverages["primary"] = float(cutoff) elif (cutoff != "0") and (num_cutoff == 1): coverages["secondary"] = float(cutoff) elif (cutoff != "0") and (num_cutoff == 2): coverages["internal"] = float(cutoff) elif (cutoff != "0") and (num_cutoff == 3): coverages["antisense"] = float(cutoff) elif (cutoff != "0") and (num_cutoff == 4): coverages["orphan"] = float(cutoff) num_cutoff += 1 low = None for cover in coverages.values(): if cover != 0: if low is None: low = cover elif cover < float(low): low = cover if low is None: print("Error: The cutoff of coverage can not be all 0...") sys.exit() coverages["no_tss"] = float(low) for tss, cover in coverages.items(): out.write("\t".join([file_type, tss, str(cover)]) + "\n") out.close() return coverages def get_intergenic_antisense_cutoff(args_srna): '''set the cutoff of intergenic and antisense sRNA also deal with the no tex library''' cutoff_coverage = get_cutoff(args_srna.cutoffs, args_srna.out_folder, args_srna.file_type) notex = None if args_srna.cut_notex is not None: notex = get_cutoff(args_srna.cut_notex, args_srna.out_folder, "notex") return cutoff_coverage, notex def free_memory(paras): for data in paras: del(data) gc.collect() def intergenic_srna(args_srna, libs, texs, wigs_f, wigs_r, tss_file): '''get intergenic and antisense sRNA''' inter_cutoff_coverage, inter_notex = get_intergenic_antisense_cutoff( args_srna) anti_cutoff_coverage, anti_notex = get_intergenic_antisense_cutoff( args_srna) nums, cdss, tas, pros, genes, ncs = read_data(args_srna) tsss, num_tss = read_tss(tss_file) detects = {"overlap": False, "uni_with_tss": False, "anti": False} output = open(args_srna.output_file, "w") out_table = open(args_srna.output_table, "w") output.write("##gff-version 3\n") for ta in tas: detects["overlap"] = False detects["anti"] = False compare_ta_cds(cdss, ta, detects) if (detects["overlap"]) and (not args_srna.in_cds): continue else: if not detects["anti"]: cutoff_coverage = inter_cutoff_coverage notex = inter_notex else: cutoff_coverage = anti_cutoff_coverage notex = anti_notex args_srna = ArgsContainer().extend_inter_container( args_srna, tsss, pros, nums, output, out_table, texs, detects, cutoff_coverage, notex) check_srna_condition(ta, args_srna, cdss, wigs_f, wigs_r) file_name = args_srna.output_file.split(".") file_name = file_name[0] + ".stat" output.close() out_table.close() paras = [tsss, tas, pros, genes, cdss] free_memory(paras)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sRNA_intergenic.py
sRNA_intergenic.py
import os import sys from annogesiclib.helper import Helper from annogesiclib.detect_utr import detect_3utr, detect_5utr from annogesiclib.multiparser import Multiparser class UTRDetection(object): '''detection of UTR''' def __init__(self, args_utr): self.helper = Helper() self.multiparser = Multiparser() self.tss_path = os.path.join(args_utr.tsss, "tmp") self.tran_path = os.path.join(args_utr.trans, "tmp") self.utr5_path = os.path.join(args_utr.out_folder, "5UTRs") self.utr3_path = os.path.join(args_utr.out_folder, "3UTRs") self.utr5_stat_path = os.path.join(self.utr5_path, "statistics") self.utr3_stat_path = os.path.join(self.utr3_path, "statistics") def _check_folder(self, folder): if folder is None: print("Error: Lack required files!") sys.exit() def _check_gff(self, folder): for gff in os.listdir(folder): if gff.endswith(".gff"): self.helper.check_uni_attributes(os.path.join(folder, gff)) def _compute_utr(self, args_utr, log): log.write("Running detect_utr.py to detect UTRs.\n") for gff in os.listdir(args_utr.gffs): if gff.endswith(".gff"): prefix = gff[:-4] tss = self.helper.get_correct_file( self.tss_path, "_TSS.gff", prefix, None, None) tran = self.helper.get_correct_file( self.tran_path, "_transcript.gff", prefix, None, None) if args_utr.terms: term = self.helper.get_correct_file( os.path.join(args_utr.terms, "tmp"), "_term.gff", prefix, None, None) else: term = None print("Computing 5'UTRs of {0}".format(prefix)) detect_5utr(tss, os.path.join(args_utr.gffs, gff), tran, os.path.join(self.utr5_path, "gffs", "_".join([prefix, "5UTR.gff"])), args_utr) print("Computing 3'UTRs of {0}".format(prefix)) detect_3utr(tran, os.path.join(args_utr.gffs, gff), term, os.path.join(self.utr3_path, "gffs", "_".join([prefix, "3UTR.gff"])), args_utr) self.helper.move_all_content( os.getcwd(), self.utr5_stat_path, ["_5utr_length.png"]) self.helper.move_all_content( os.getcwd(), self.utr3_stat_path, ["_3utr_length.png"]) log.write("The following files are generated:\n") for folder in (os.path.join(self.utr5_path, "gffs"), os.path.join(self.utr3_path, "gffs"), self.utr5_stat_path, self.utr3_stat_path): for file_ in os.listdir(folder): log.write("\t" + os.path.join(folder, file_) + "\n") def run_utr_detection(self, args_utr, log): self._check_folder(args_utr.tsss) self._check_folder(args_utr.gffs) self._check_folder(args_utr.trans) self._check_gff(args_utr.tsss) self._check_gff(args_utr.gffs) self._check_gff(args_utr.trans) self._check_gff(args_utr.terms) self.multiparser.parser_gff(args_utr.gffs, None) self.multiparser.parser_gff(args_utr.tsss, "TSS") self.multiparser.combine_gff(args_utr.gffs, self.tss_path, None, "TSS") self.multiparser.parser_gff(args_utr.trans, "transcript") self.multiparser.combine_gff(args_utr.gffs, self.tran_path, None, "transcript") if args_utr.terms: self.multiparser.parser_gff(args_utr.terms, "term") self.multiparser.combine_gff(args_utr.gffs, os.path.join(args_utr.terms, "tmp"), None, "term") self._compute_utr(args_utr, log) self.helper.remove_tmp_dir(args_utr.gffs) self.helper.remove_tmp_dir(args_utr.tsss) self.helper.remove_tmp_dir(args_utr.trans) self.helper.remove_tmp_dir(args_utr.terms) self.helper.remove_tmp(self.utr5_path) self.helper.remove_tmp(self.utr3_path)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/utr.py
utr.py
from annogesiclib.gff3 import Gff3Parser import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt def ellipse(x, y, angle, face, al, plt): ellipse_ = mpl.patches.Ellipse(xy=(x, y), width=0.8, height=0.3, angle=angle, facecolor=face, alpha=al) plt.gca().add_artist(ellipse_) return plt def line(x, y, angle, plt): line_ = mpl.patches.Ellipse(xy=(x, y), width=0.8, height=0.3, angle=angle, facecolor="none", edgecolor="#000000", linewidth=3) plt.gca().add_artist(line_) return plt def plot_text(plt, xy1, xy2, tss_type, size, color_text): plt.text(xy1, xy2, tss_type, ha="center", va="center", fontsize=15, fontweight='bold', color=color_text) def text_total(xy, tss_type, num, plt): plot_text(plt, xy[0], xy[1], tss_type, 15, "black") if tss_type != "Orphan": plot_text(plt, xy[0], xy[1] - 0.05, str(num), 15, "black") def text(xy, tss_type, num, plt): if (tss_type == "Primary") or ( tss_type == "Antisense") or ( tss_type == "Antisense_Primary"): plot_text(plt, xy[0], xy[1], str(num), 16, "white") else: plot_text(plt, xy[0], xy[1], str(num), 16, "black") def check_tss_class(total_types, strain, tss, tss_type): if tss_type not in total_types[strain].keys(): total_types[strain][tss_type] = 0 if tss_type in tss.attributes["type"]: total_types[strain][tss_type] += 1 def import_types(tsss): types = {"all": {}} total_types = {"all": {}} for strain, datas in tsss.items(): if strain not in types.keys(): types[strain] = {} total_types[strain] = {} for tss in datas: check_tss_class(total_types, strain, tss, "Primary") check_tss_class(total_types, strain, tss, "Secondary") check_tss_class(total_types, strain, tss, "Internal") check_tss_class(total_types, strain, tss, "Antisense") check_tss_class(total_types, strain, tss, "Orphan") sorted_types = sorted(tss.attributes["type"].split(",")) ty = None for tss_type in sorted_types: if ty is None: ty = tss_type else: if tss_type not in ty: ty = ty + "_" + tss_type if ty not in types[strain].keys(): types[strain][ty] = 0 types[strain][ty] += 1 return types, total_types def read_gff(tss_file): tsss = {"all": []} tss_num = {"all": 0} pre_strain = "" gff_parser = Gff3Parser() f_h = open(tss_file) for entry in gff_parser.entries(f_h): if pre_strain != entry.seq_id: tsss[entry.seq_id] = [] tss_num[entry.seq_id] = 0 pre_strain = entry.seq_id tsss[entry.seq_id].append(entry) tsss["all"].append(entry) tss_num[entry.seq_id] += 1 tss_num["all"] += 1 for strain in tsss.keys(): tsss[strain] = sorted(tsss[strain], key=lambda k: (k.seq_id, k.start)) f_h.close() return tsss, tss_num def plot(types, file_type, feature_name, total_types, tss_num): for strain, tss_types in types.items(): if len(types.keys()) <= 2: if strain == "all": continue plt.figure(figsize=(12, 6)) coordinate_total = {"Primary": (0.05, 0.85), "Secondary": (0.2, 0.95), "Internal": (0.575, 0.95), "Antisense": (0.7, 0.85), "Orphan": (0.8, 0.3)} if feature_name == "processing site": plot_text(plt, 0.05, 0.05, "Total processing sites", 15, "black") plot_text(plt, 0.05, 0, str(tss_num[strain]), 15, "black") elif feature_name == "TSS": plot_text(plt, 0.025, 0.05, "Total TSSs", 15, "black") plot_text(plt, 0.025, 0, str(tss_num[strain]), 15, "black") for tss_type, num in total_types[strain].items(): text_total(coordinate_total[tss_type], tss_type, num, plt) ellipse(0.5, 0.4, 70, "#E83241", 1.0, plt) ellipse(0.25, 0.4, -70, "#6648DC", 0.8, plt) ellipse(0.38, 0.495, 70, "#13C139", 0.5, plt) ellipse(0.37, 0.495, -70, "#E8D632", 0.4, plt) circ = mpl.patches.Ellipse(xy=(0.8, 0.2), width=0.09, height=0.15, facecolor='none', edgecolor="#000000", linewidth=3) plt.gca().add_artist(circ) line(0.25, 0.4, -70, plt) line(0.37, 0.495, -70, plt) line(0.5, 0.4, 70, plt) line(0.38, 0.495, 70, plt) coordinates = {"Primary": (0.15, 0.5), "Secondary": (0.275, 0.75), "Internal": (0.476, 0.75), "Antisense": (0.625, 0.5), "Primary_Secondary": (0.225, 0.625), "Internal_Primary": (0.25, 0.225), "Antisense_Primary": (0.375, 0.075), "Internal_Secondary": (0.375, 0.625), "Antisense_Secondary": (0.5, 0.225), "Antisense_Internal": (0.525, 0.625), "Internal_Primary_Secondary": (0.3, 0.45), "Antisense_Primary_Secondary": (0.42, 0.18), "Antisense_Internal_Primary": (0.335, 0.18), "Antisense_Internal_Secondary": (0.45, 0.45), "Antisense_Internal_Primary_Secondary": (0.375, 0.3), "Orphan": (0.8, 0.19)} for tss_type, xy in coordinates.items(): if tss_type not in tss_types.keys(): tss_types[tss_type] = 0 text(xy, tss_type, tss_types[tss_type], plt) plt.axis('off') plt.savefig("_".join([file_type, "venn", strain + ".png"])) plt.clf() def plot_venn(tss_file, file_type): if file_type == "processing": feature_name = "processing site" else: feature_name = "TSS" types = {"all": {}} total_types = {"all": {}} tsss, tss_num = read_gff(tss_file) types, total_types = import_types(tsss) plot(types, file_type, feature_name, total_types, tss_num)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/plot_TSS_venn.py
plot_TSS_venn.py
import csv import itertools def _boolean(data): if data == "False": result = False else: result = True return result def row_to_location(row): if row[4] == "0": sub = False nosub = True else: sub = True nosub = False tss = _boolean(row[6]) term = _boolean(row[8]) return {"have no sub-operons": nosub, "have sub-operons": sub, "start with tss": tss, "stop with terminator": term} def plus_num(num_total, strain, type_): num_total["total"][type_] += 1 num_total[strain][type_] += 1 num_total["total"]["total"] += 1 num_total[strain]["total"] += 1 def print_stat(operons, total_num, class_operon, out): num_features = {} out.write("Total number of operons is {0}\n".format(total_num)) out.write("The sub operon and features:\n") for operon in operons: for it in range(1, 5): for features in itertools.combinations(operon.keys(), it): check_key = 0 for key in features: if operon[key]: if it == 1: if key in num_features.keys(): num_features[key] += 1 else: num_features[key] = 1 check_key += 1 if (check_key == it) and (it != 1): key = " and ".join(features) if key in num_features.keys(): num_features[key] += 1 else: num_features[key] = 1 for key, value in num_features.items(): out.write("\tthe number of operons which {0} = {1} ({2})\n".format( key, value, float(value) / float(total_num))) out.write("mono/polycistronic:\n") out.write("\tmonocistronic: {0} ({1})\n".format( class_operon["mono"], float(class_operon["mono"]) / float(class_operon["total"]))) out.write("\tpolycistronic: {0} ({1})\n".format( class_operon["poly"], float(class_operon["poly"]) / float(class_operon["total"]))) def stat(input_file, out_file): out = open(out_file, "w") operons = {} operons_all = [] tmp_id = "" f_h = open(input_file, "r") pre_seq_id = "" total_num = {} total_num_all = 0 class_operon = {} class_operon["total"] = {"na": 0, "mono": 0, "poly": 0, "total": 0} for row in csv.reader(f_h, delimiter="\t"): if row[0] != "Operon_ID": if row[0] != tmp_id: if pre_seq_id != row[1]: pre_seq_id = row[1] operons[row[1]] = [] total_num[row[1]] = 0 class_operon[row[1]] = {"na": 0, "mono": 0, "poly": 0, "total": 0} operons[row[1]].append(row_to_location(row)) operons_all.append(row_to_location(row)) total_num[row[1]] += 1 total_num_all += 1 if row[-1] == "NA": plus_num(class_operon, row[1], "na") elif len(row[-1].split(",")) == 1: plus_num(class_operon, row[1], "mono") elif len(row[-1].split(",")) > 1: plus_num(class_operon, row[1], "poly") tmp_id = row[0] if len(operons) > 1: out.write("All genomes:\n") print_stat(operons_all, total_num_all, class_operon["total"], out) for strain in operons.keys(): out.write("\n" + strain + ":\n") print_stat(operons[strain], total_num[strain], class_operon[strain], out) out.close() f_h.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/stat_operon.py
stat_operon.py
import os from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.detect_operon import operon from annogesiclib.stat_operon import stat from annogesiclib.combine_gff import combine_gff class OperonDetection(object): '''detection of operon''' def __init__(self, args_op): self.multiparser = Multiparser() self.helper = Helper() if args_op.tsss is not None: self.tss_path = os.path.join(args_op.tsss, "tmp") else: self.tss_path = None self.tran_path = os.path.join(args_op.trans, "tmp") self.table_path = os.path.join(args_op.output_folder, "tables") if args_op.terms is not None: self._check_gff(args_op.terms, "term") self.term_path = os.path.join(args_op.terms, "tmp") else: self.term_path = None def _check_gff(self, gffs, type_): for gff in os.listdir(gffs): if gff.endswith(".gff"): self.helper.check_uni_attributes(os.path.join(gffs, gff)) def _detect_operon(self, prefixs, args_op, log): log.write("Running detect_operon.py to detect operon.\n") log.write("The the following files are generated:\n") for prefix in prefixs: out_gff = os.path.join(args_op.output_folder, "gffs", "_".join([prefix, "operon.gff"])) out_table = os.path.join(self.table_path, "_".join([prefix, "operon.csv"])) print("Detecting operons of {0}".format(prefix)) if self.tss_path is None: tss = False else: tss = self.helper.get_correct_file( self.tss_path, "_TSS.gff", prefix, None, None) tran = self.helper.get_correct_file( self.tran_path, "_transcript.gff", prefix, None, None) gff = self.helper.get_correct_file( args_op.gffs, ".gff", prefix, None, None) if self.term_path is None: term = False else: term = self.helper.get_correct_file( self.term_path, "_term.gff", prefix, None, None) operon(tran, tss, gff, term, args_op.tss_fuzzy, args_op.term_fuzzy, args_op.length, out_table, out_gff) log.write("\t" + out_table + "\n") log.write("\t" + out_gff + "\n") def _check_and_parser_gff(self, args_op): self._check_gff(args_op.gffs, "gff") self._check_gff(args_op.trans, "tran") self.multiparser.parser_gff(args_op.gffs, None) self.multiparser.parser_gff(args_op.trans, "transcript") self.multiparser.combine_gff(args_op.gffs, self.tran_path, None, "transcript") if args_op.tsss is not None: self._check_gff(args_op.tsss, "tss") self.multiparser.parser_gff(args_op.tsss, "TSS") self.multiparser.combine_gff(args_op.gffs, self.tss_path, None, "TSS") if args_op.terms is not None: self._check_gff(args_op.terms, "term") self.multiparser.parser_gff(args_op.terms, "term") self.multiparser.combine_gff(args_op.gffs, self.term_path, None, "term") def _stat(self, table_path, stat_folder, log): log.write("Running stat_operon.py to do statistics.\n") for table in os.listdir(table_path): if table.endswith("_operon.csv"): filename = "_".join(["stat", table]) out_stat = os.path.join(stat_folder, filename) stat(os.path.join(table_path, table), out_stat) log.write("\t" + out_stat + "\n") def run_operon(self, args_op, log): self._check_and_parser_gff(args_op) prefixs = [] for gff in os.listdir(args_op.gffs): if gff.endswith(".gff"): prefixs.append(gff.replace(".gff", "")) self._detect_operon(prefixs, args_op, log) self._stat(self.table_path, args_op.stat_folder, log) self.helper.remove_tmp_dir(args_op.gffs) self.helper.remove_tmp_dir(args_op.tsss) self.helper.remove_tmp_dir(args_op.trans) if args_op.terms is not None: self.helper.remove_tmp_dir(args_op.terms)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/operon.py
operon.py
import os import csv import shutil from annogesiclib.gff3 import Gff3Parser def compare_srna_gff(gffs, strain, strand, start, end, srna_types, file_type): for gff in gffs: if (strain == gff.seq_id) and ( strand != gff.strand): if ((start <= gff.start) and ( end >= gff.end)) or ( (start >= gff.start) and ( end <= gff.end)) or ( (start <= gff.start) and ( end <= gff.end) and ( end >= gff.start)) or ( (start >= gff.start) and ( start <= gff.end) and ( end >= gff.end)): if file_type == "gff": if "antisense" not in srna_types: srna_types = srna_types + "," + "antisense" else: if "Antisense" not in srna_types: srna_types = srna_types + "," + "Antisense" return srna_types def srna_antisense(srna_gff, srna_table, gff_file): tmp_srna_gff = srna_gff + "tmp" tmp_srna_table = srna_table + "tmp" out = open(tmp_srna_gff, "w") out.write("##gff-version 3\n") out_t = open(tmp_srna_table, "w") out_t.write("\t".join(["Rank", "Genome", "Name", "Start", "End", "Strand", "Start_with_TSS/Cleavage_site", "End_with_cleavage", "Candidates", "Lib_type", "Best_avg_coverage", "Best_highest_coverage", "Best_lower_coverage", "Track/Coverage", "Normalized_secondary_energy_change(by_length)", "sRNA_types", "Confliction_of_sORF", "nr_hit_number", "sRNA_hit_number", "nr_hit_top3|ID|e-value", "sRNA_hit|e-value", "Overlap_CDS", "Overlap_percent", "End_with_terminator"]) + "\n") srnas = [] sf = open(srna_gff, "r") for entry in Gff3Parser().entries(sf): srnas.append(entry) tabs = [] fh = open(srna_table, "r") for row in csv.reader(fh, delimiter='\t'): if row[0] != "rank": tabs.append({"info": row, "strain": row[1], "strand": row[5], "start": int(row[3]), "end": int(row[4]), "srna_type": row[15]}) else: out_t.write("\t".join(row) + "\n") gffs = [] gf = open(gff_file, "r") for entry in Gff3Parser().entries(gf): gffs.append(entry) for srna in srnas: compare_srna_gff(gffs, srna.seq_id, srna.strand, srna.start, srna.end, srna.attributes["sRNA_type"], "gff") attribute_string = ";".join( ["=".join(items) for items in srna.attributes.items()]) out.write("\t".join([srna.info_without_attributes, attribute_string]) + "\n") for tab in tabs: compare_srna_gff(gffs, tab["strain"], tab["strand"], tab["start"], tab["end"], tab["srna_type"], "table") tab["info"][15] = tab["srna_type"] out_t.write("\t".join(tab["info"]) + "\n") os.remove(srna_gff) shutil.move(tmp_srna_gff, srna_gff) os.remove(srna_table) shutil.move(tmp_srna_table, srna_table)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sRNA_antisense.py
sRNA_antisense.py
import os import sys from annogesiclib.helper import Helper from annogesiclib.gff3 import Gff3Parser def assign_name(entry): if entry.attributes["locus_tag"] == entry.attributes["Name"]: return None else: return entry.attributes["Name"] def print_fasta(entry, seq, out, gene, seq_id): if gene is not None: if ("locus_tag" in gene.attributes.keys()): locus = gene.attributes["locus_tag"] else: locus = "NA" else: locus = "NA" if ("ID" in entry.attributes.keys()): out.write(">{0}_{1}-{2}_{3}\n{4}\n".format( "_".join([str(seq_id), locus, entry.attributes["ID"]]), entry.start, entry.end, entry.strand, seq)) else: out.write(">{0}_{1}-{2}_{3}\n{4}\n".format( "_".join([str(seq_id), locus, "NA"]) , entry.start, entry.end, entry.strand, seq)) def read_file(seq_file, gff_file, target_folder, features): fastas = [] cdss_f = [] cdss_r = [] genes = [] with open(seq_file, "r") as seq_f: for line in seq_f: if line.startswith(">"): continue else: line = line.strip() fastas.append(line) fasta = "".join(fastas) g_h = open(gff_file) for entry in Gff3Parser().entries(g_h): if os.path.exists(os.path.join(target_folder, "_".join([entry.seq_id, "target.fa"]))): os.remove(os.path.join(target_folder, "_".join([entry.seq_id, "target.fa"]))) for feature in features: if (entry.feature == feature) and (entry.strand == "+"): cdss_f.append(entry) elif (entry.feature == feature) and (entry.strand == "-"): cdss_r.append(entry) if entry.feature == "gene": genes.append(entry) g_h.close() cdss_f = sorted(cdss_f, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) cdss_r = sorted(cdss_r, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return fasta, cdss_f, cdss_r, genes, entry.seq_id def check_parent_gene(cds, genes): target_gene = None for gene in genes: if (gene.seq_id == cds.seq_id) and ( gene.strand == cds.strand) and ( gene.start > cds.end): break elif "Parent" in cds.attributes.keys(): if (gene.attributes["ID"] in cds.attributes["Parent"].split(",")): target_gene = gene if target_gene is None: for gene in genes: if (gene.seq_id == cds.seq_id) and ( gene.strand == cds.strand): if ((cds.start <= gene.start) and ( cds.end >= gene.end)) or ( (cds.start >= gene.start) and ( cds.end <= gene.end)) or ( (cds.start <= gene.start) and ( cds.end <= gene.end) and ( cds.end >= gene.start)) or ( (cds.start >= gene.start) and ( cds.start <= gene.end) and ( cds.end >= gene.end)): target_gene = gene if (cds.start == gene.start) and ( cds.end == gene.end): target_gene = gene break return target_gene def deal_cds_forward(cdss_f, target_folder, fasta, genes, tar_start, tar_end, seq_id): '''for forward strand''' pre_id = "" out = None for cds in cdss_f: if cds.seq_id != pre_id: out = open(os.path.join(target_folder, "_".join([cds.seq_id, "target.fa"])), "w") pre_id = cds.seq_id if (cds.start > tar_start): start = cds.start - tar_start else: start = 1 if ((cds.start + tar_end) < len(fasta)) and ( (cds.end - cds.start) >= tar_end): end = cds.start + tar_end - 1 elif cds.start + tar_end >= len(fasta): end = len(fasta) elif (cds.end - cds.start) < tar_end: end = cds.end seq = Helper().extract_gene(fasta, start, end, cds.strand) target = cds target_gene = check_parent_gene(cds, genes) print_fasta(target, seq, out, target_gene, seq_id) if out is not None: out.close() def deal_cds_reverse(cdss_r, target_folder, fasta, genes, tar_start, tar_end, seq_id): '''for the reverse strand''' pre_id = "" out = None for cds in cdss_r: if cds.seq_id != pre_id: out = open(os.path.join(target_folder, "_".join([cds.seq_id, "target.fa"])), "a") pre_id = cds.seq_id if (len(fasta) - cds.end > tar_start): end = cds.end + tar_start else: end = len(fasta) if ((cds.end - tar_end) > 1) and ((cds.end - cds.start) >= tar_end): start = cds.end - tar_end - 1 elif cds.end - tar_end < 1: start = 1 elif (cds.end - cds.start) < tar_end: start = cds.start seq = Helper().extract_gene(fasta, start, end, cds.strand) target = cds target_gene = check_parent_gene(cds, genes) print_fasta(target, seq, out, target_gene, seq_id) if out is not None: out.close() def potential_target(gff_file, seq_file, target_folder, args_tar, prefixs): '''get the sequence of the potential target of sRNA''' fasta, cdss_f, cdss_r, genes, seq_id = read_file(seq_file, gff_file, target_folder, args_tar.features) sort_cdss_f = sorted(cdss_f, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) deal_cds_forward(sort_cdss_f, target_folder, fasta, genes, args_tar.tar_start, args_tar.tar_end, prefixs.index(seq_id)) sort_cdss_r = sorted(cdss_r, reverse=True, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) deal_cds_reverse(sort_cdss_r, target_folder, fasta, genes, args_tar.tar_start, args_tar.tar_end, prefixs.index(seq_id))
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/potential_target.py
potential_target.py
import os import csv import shutil from annogesiclib.gff3 import Gff3Parser def read_file(gff_file, args_srna): srnas = [] for entry in Gff3Parser().entries(open(gff_file)): attributes = {} for key, value in entry.attributes.items(): if "promoter" not in key: attributes[key] = value entry.attributes = attributes srnas.append(entry) srnas = sorted(srnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh = open(args_srna.promoter_table, "r") pros = [] for row in csv.reader(fh, delimiter='\t'): if (row[0] != "Genome") and ( row[3] in args_srna.promoter_name): pros.append({"strain": row[0], "pos": row[1], "strand": row[2], "name": row[3]}) fh.close() return srnas, pros def print_table(srna_table, out_t, srnas): fh = open(srna_table, "r") for row in csv.reader(fh, delimiter='\t'): for srna in srnas: if (row[0] == srna.seq_id) and ( int(row[2]) == srna.start) and ( int(row[3]) == srna.end) and ( row[4] == srna.strand): if "promoter" in srna.attributes.keys(): promoter = [srna.attributes["promoter"]] else: promoter = ["NA"] out_t.write("\t".join(row + promoter) + "\n") def compare_srna_promoter(srna_gff, srna_table, args_srna): '''compare sRNA and promoter to find the sRNA which is associated with a promoter. it is for the ranking of sRNA''' srnas, pros = read_file(srna_gff, args_srna) out_g = open("tmp_srna.gff", "w") out_t = open("tmp_srna.csv", "w") out_g.write("##gff-version 3\n") for srna in srnas: tsss = [] detect = False if "with_TSS" in srna.attributes.keys(): if srna.attributes["with_TSS"] != "NA": datas = srna.attributes["with_TSS"].split(",") for data in datas: info = data.split(":")[-1] tss = info.split("_") tsss.append({"pos": tss[0], "strand": tss[-1]}) if len(tsss) != 0: for tss in tsss: for pro in pros: if (srna.seq_id == pro["strain"]) and ( tss["strand"] == pro["strand"]) and ( tss["pos"] == pro["pos"]): detect = True if "promoter" not in srna.attributes.keys(): srna.attributes["promoter"] = pro["name"] else: srna.attributes["promoter"] = ",".join([ srna.attributes["promoter"], pro["name"]]) if detect: out_g.write(srna.info + ";promoter=" + srna.attributes["promoter"] + "\n") else: out_g.write(srna.info + ";promoter=NA" + "\n") print_table(srna_table, out_t, srnas) os.remove(srna_gff) os.remove(srna_table) out_t.close() out_g.close() shutil.move("tmp_srna.gff", srna_gff) shutil.move("tmp_srna.csv", srna_table)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/compare_srna_promoter.py
compare_srna_promoter.py
from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def print_file(datas, out, feature): for data in datas: if feature not in data.attributes.keys(): data.attributes[feature] = "NA" else: data.attributes[feature] = ",".join(data.attributes[feature]) data.attribute_string = ";".join( ["=".join(items) for items in data.attributes.items()]) out.write("\t".join([data.info_without_attributes, data.attribute_string]) + "\n") def del_attributes(feature, entry): '''Remove to the useless attributes''' attributes = {} for key, value in entry.attributes.items(): if feature not in key: attributes[key] = value return attributes def srna_sorf_comparison(sRNA_file, sORF_file, sRNA_out, sORF_out): '''Comparison of sRNA and sORF. It can be a filter of sRNA detection''' sorfs = [] srnas = [] out_r = open(sRNA_out, "w") out_o = open(sORF_out, "w") out_r.write("##gff-version 3\n") out_o.write("##gff-version 3\n") for entry in Gff3Parser().entries(open(sRNA_file)): entry.attributes = del_attributes("sORF", entry) srnas.append(entry) srnas = sorted(srnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for entry in Gff3Parser().entries(open(sORF_file)): entry.attributes = del_attributes("sRNA", entry) sorfs.append(entry) sorfs = sorted(sorfs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for srna in srnas: for sorf in sorfs: if (srna.seq_id == sorf.seq_id) and (srna.strand == sorf.strand): if ((srna.start <= sorf.start) and ( srna.end >= sorf.end)) or ( (srna.start >= sorf.start) and ( srna.end <= sorf.end)) or ( (srna.start <= sorf.start) and ( srna.end >= sorf.start) and ( srna.end <= sorf.end)) or ( (srna.start >= sorf.start) and ( srna.start <= sorf.end) and ( srna.end >= sorf.end)): if "sORF" not in srna.attributes.keys(): srna.attributes["sORF"] = [] strand = Helper().get_strand_name(sorf.strand) srna.attributes["sORF"].append("".join([ "sORF:", str(sorf.start), "-", str(sorf.end), "_", strand])) if "sRNA" not in sorf.attributes.keys(): sorf.attributes["sRNA"] = [] strand = Helper().get_strand_name(srna.strand) sorf.attributes["sRNA"].append("".join([ "sRNA:", str(srna.start), "-", str(srna.end), "_", strand])) print_file(sorfs, out_o, "sRNA") print_file(srnas, out_r, "sORF") out_r.close() out_o.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/compare_sRNA_sORF.py
compare_sRNA_sORF.py
import os import csv from annogesiclib.gff3 import Gff3Parser def output_coverage(table_file, gff_file, cutoff_cover, stat_file, out_folder): out = open(os.path.join(out_folder, "tmp_srna_table"), "w") out_g = open(os.path.join(out_folder, "tmp_srna_gff"), "w") out.write("\t".join([ "Rank", "Genome", "Name", "Start", "End", "Strand", "Start_with_TSS/Cleavage_site", "End_with_cleavage", "Candidates", "Lib_type", "Best_avg_coverage", "Best_highest_coverage", "Best_lower_coverage", "Track/Coverage", "Normalized_secondary_energy_change(by_length)", "UTR_derived/Intergenic", "Confliction_of_sORF", "nr_hit_number", "sRNA_hit_number", "nr_hit_top3|ID|e-value", "sRNA_hit|e-value", "Overlap_CDS", "Overlap_percent", "End_with_terminator"]) + "\n") out_g.write("##gff-version 3\n") stat_out = open(stat_file, "w") nums = {5: 0} for i in range(10, 100, 10): nums[i] = 0 for i in range(100, 1000, 100): nums[i] = 0 for i in range(1000, 5000, 500): nums[i] = 0 gffs = [] gh = open(gff_file, "r") for entry in Gff3Parser().entries(gh): gffs.append(entry) fh = open(table_file, "r") rank = 1 new_gffs = [] for row in csv.reader(fh, delimiter='\t'): if row[0] != "rank": for cutoff in nums.keys(): if float(row[10]) >= cutoff: nums[cutoff] += 1 if float(row[10]) >= cutoff_cover: row[0] = str(rank) out.write("\t".join(row) + "\n") rank += 1 for gff in gffs: if (row[1] == gff.seq_id) and ( row[3] == str(gff.start)) and ( row[4] == str(gff.end)) and ( row[5] == gff.strand): new_gffs.append(gff) sort_gffs = sorted(new_gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for gff in sort_gffs: out_g.write(gff.info + "\n") coverlist = sorted(nums, key=lambda key: nums[key]) stat_out.write("coverage\tfrequency\n") for cover in coverlist: stat_out.write("\t".join([str(cover), str(nums[cover])]) + "\n")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/output_cutoff_table.py
output_cutoff_table.py
import os import shutil import csv from annogesiclib.multiparser import Multiparser from annogesiclib.seq_editer import SeqEditer from annogesiclib.helper import Helper class TargetFasta(object): '''detection of sRNA target interaction''' def __init__(self, tar_folder, ref_folder): self.multiparser = Multiparser() self.seq_editer = SeqEditer() self.helper = Helper() self.folders = {"tmp_tar": os.path.join(tar_folder, "tmp")} def gen_folder(self, out_folder, ref_files): new_ref_folder = os.path.join(out_folder, "tmp_reference") self.helper.check_make_folder(new_ref_folder) for file_ in ref_files: shutil.copy(file_, new_ref_folder) self.folders["tmp_ref"] = os.path.join(new_ref_folder, "tmp") self.multiparser.parser_fasta(new_ref_folder) if os.path.exists(os.path.join(out_folder, "fasta_files")): shutil.rmtree(os.path.join(out_folder, "fasta_files")) os.mkdir(os.path.join(out_folder, "fasta_files")) if os.path.exists(self.folders["tmp_tar"]): shutil.rmtree(self.folders["tmp_tar"]) os.mkdir(self.folders["tmp_tar"]) return new_ref_folder def get_target_fasta(self, mut_table, tar_folder, ref_files, out_name, out_folder, log): new_ref_folder = self.gen_folder(out_folder, ref_files) log.write("Running seq_editor.py for updating sequence.\n") self.seq_editer.modify_seq(self.folders["tmp_ref"], mut_table, self.folders["tmp_tar"], out_name) print("Updating the reference sequences") mh = open(mut_table, "r") pre_strain = None out = None strain_num = 0 for row in csv.reader(mh, delimiter='\t'): if not row[0].startswith("#"): if (pre_strain != row[0]): strain_num = strain_num + 1 tmp_tar_name = "_".join([out_name, row[0]]) + ".fa" fasta = os.path.join(out_folder, "fasta_files", tmp_tar_name) if out is not None: out.close() out = open(fasta, "w") if tmp_tar_name in os.listdir(self.folders["tmp_tar"]): with open(os.path.join( self.folders["tmp_tar"], tmp_tar_name)) as f_h: for line in f_h: out.write(line) else: print("Error: No updated information of {0}.fa".format( row[0])) pre_strain = row[0] out.close() out_seq = out_name + ".fa" if os.path.exists(out_seq): os.remove(out_seq) if strain_num == 1: o_s = open(out_seq, "w") for seq in os.listdir(os.path.join(out_folder, "fasta_files")): if seq.endswith(".fa"): with open(os.path.join( out_folder, "fasta_files", seq)) as t_h: for line in t_h: if len(line) != 0: if line.startswith(">"): o_s.write(">" + out_name + "\n") else: o_s.write(line) os.remove(os.path.join(out_folder, "fasta_files", seq)) o_s.close() else: for seq in os.listdir(os.path.join(out_folder, "fasta_files")): if seq.endswith(".fa"): os.system(" ".join(["cat", os.path.join( out_folder, "fasta_files", seq), ">>", out_seq])) os.remove(os.path.join(out_folder, "fasta_files", seq)) shutil.move(out_seq, os.path.join( out_folder, "fasta_files", out_seq)) shutil.rmtree(self.folders["tmp_tar"]) shutil.rmtree(self.folders["tmp_ref"]) if "tmp_reference" in os.listdir(out_folder): shutil.rmtree(new_ref_folder) log.write("\t" + os.path.join(out_folder, "fasta_files", out_seq) + " is generated.\n") print("Please use the new fasta files to remapping again.")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/get_target_fasta.py
get_target_fasta.py
import copy import numpy as np from annogesiclib.helper import Helper from annogesiclib.gff3 import Gff3Parser from annogesiclib.lib_reader import read_libs, read_wig from annogesiclib.coverage_detection import replicate_comparison, get_repmatch def check_start_and_end(start, end, covers): if (start - 2) < 0: c_start = 0 else: c_start = start - 2 if (end + 2) > len(covers): c_end = len(covers) else: c_end = end + 2 return c_start, c_end def get_coverage(sorf, wigs, strand, coverages, medianlist, cutoffs, min_cutoff): high_cover = -1 low_cover = -1 sorf_covers = {} for wig_strain, conds in wigs.items(): if wig_strain == sorf["strain"]: for cond, tracks in conds.items(): sorf_covers[cond] = [] for lib_name, covers in tracks.items(): track = lib_name.split("|")[-3] lib_strand = lib_name.split("|")[-2] lib_type = lib_name.split("|")[-1] total_cover = 0 first = True c_start, c_end = check_start_and_end( sorf["start"], sorf["end"], covers) covers = covers[c_start: c_end] if strand == "-": covers = covers[::-1] pos = 0 for cover in covers: if (lib_strand == strand): if strand == "+": cover_pos = c_start + pos else: cover_pos = c_end - pos if (sorf["start"] <= cover_pos) and ( sorf["end"] >= cover_pos): total_cover = total_cover + cover if first: first = False high_cover = cover low_cover = cover else: if high_cover < cover: high_cover = cover if low_cover > cover: low_cover = cover pos += 1 avg = total_cover / float(sorf["end"] - sorf["start"] + 1) if medianlist is not None: cutoff_cover = get_cutoff(sorf, track, coverages, medianlist, min_cutoff) else: cutoff_cover = coverages if cutoffs is not None: cutoffs[track] = cutoff_cover if avg > float(cutoff_cover): sorf_covers[cond].append({"track": track, "high": high_cover, "low": low_cover, "avg": avg, "type": lib_type, "pos": sorf["start"]}) return sorf_covers def import_sorf(inter, sorfs, start, end, type_, fasta, rbs): sorfs.append({"strain": inter.seq_id, "strand": inter.strand, "start": start, "end": end, "starts": [str(start)], "ends": [str(end)], "seq": fasta[start-1:end], "type": type_, "print": False, "rbs": rbs}) def detect_rbs_site(fasta, start, inter, args_sorf): '''detect the ribosome binding site''' detect = [] for ribo_seq in args_sorf.rbs_seq: pre_miss = len(ribo_seq) get = False for nts in range(0, start): num = 0 miss = 0 for index in range(len(ribo_seq)): if miss > args_sorf.fuzzy_rbs: break else: if fasta[nts:(nts + len(ribo_seq))][index] != ribo_seq[index]: miss += 1 if (miss <= args_sorf.fuzzy_rbs) and ( len(fasta[nts:(nts + len(ribo_seq))]) >= len(ribo_seq)): get = True if (miss <= pre_miss): if miss < pre_miss: detect = [] if inter.strand == "+": detect.append(inter.start + nts) else: detect.append(inter.start + (len(fasta) - nts) - 1) pre_miss = miss if get: break if len(detect) == 0: detect = ["NA"] return detect def check_terminal_seq(seq, start, end, args_sorf, source, inter, sorfs, rbs): '''check the sequence which are located at the two ends''' detect = None for i in [0, 1, -1, 2, -2]: fasta = Helper().extract_gene(seq, start + i, end + i, inter.strand) if (fasta[:3] in args_sorf.start_codon) and ( fasta[-3:] in args_sorf.stop_codon): detect = i if detect is not None: start = start + detect end = end + detect import_sorf(inter, sorfs, start, end, source, seq, rbs) def detect_start_stop(inters, seq, args_sorf): '''check the length is 3 -times or not''' sorfs = [] for inter in inters: if inter.start <= 0: inter.start = 1 if inter.end >= len(seq[inter.seq_id]): inter.end = len(seq[inter.seq_id]) fasta = Helper().extract_gene( seq[inter.seq_id], inter.start, inter.end, inter.strand) starts = [] stops = [] for frame in range(0, 3): for index in range(frame, len(fasta), 3): if fasta[index:index + 3] in args_sorf.start_codon: starts.append(index) elif fasta[index:index + 3] in args_sorf.stop_codon: stops.append(index) for start in starts: get_stop = False for stop in stops: if ((stop - start) > 0) and \ (((stop - start) % 3) == 0): if (not args_sorf.multi_stop) and (get_stop): break else: get_stop = True if ((stop - start) <= args_sorf.max_len) and \ ((stop - start) >= args_sorf.min_len): rbs = detect_rbs_site(fasta, start, inter, args_sorf) if (len(rbs) == 1) and (rbs[0] == "NA"): pass else: if (inter.source == "intergenic") or ( inter.source == "antisense"): if inter.strand == "+": check_terminal_seq( seq[inter.seq_id], inter.start + start, inter.start + stop + 2, args_sorf, inter.source, inter, sorfs, rbs) else: check_terminal_seq( seq[inter.seq_id], inter.start + (len(fasta) - stop - 3), inter.start + (len(fasta) - start - 1), args_sorf, inter.source, inter, sorfs, rbs) elif inter.source == "UTR_derived": if inter.strand == "+": check_terminal_seq( seq[inter.seq_id], inter.start + start, inter.start + stop + 2, args_sorf, inter.attributes["UTR_type"], inter, sorfs, rbs) else: check_terminal_seq( seq[inter.seq_id], inter.start + (len(fasta) - stop - 3), inter.start + (len(fasta) - start - 1), args_sorf, inter.attributes["UTR_type"], inter, sorfs, rbs) return sorfs def read_data(inter_gff, tss_file, srna_gff, fasta, utr_detect): seq = {} inters = [] tsss = [] srnas = [] fh = open(inter_gff) for entry in Gff3Parser().entries(fh): if ((entry.source == "UTR_derived") and ( utr_detect)) or ( (entry.source == "intergenic") or ( entry.source == "antisense")): inters.append(entry) inters = sorted(inters, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh.close() if tss_file is not None: fh = open(tss_file) for entry in Gff3Parser().entries(fh): tsss.append(entry) tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh.close() else: tsss = None if srna_gff is not None: fh = open(srna_gff) for entry in Gff3Parser().entries(fh): new = {} for key, value in entry.attributes.items(): if "sORF" not in key: new[key] = value entry.attributes = copy.deepcopy(new) srnas.append(entry) srnas = sorted(srnas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh.close() else: srnas = None with open(fasta, "r") as s_f: for line in s_f: line = line.strip() if line.startswith(">"): strain = line[1:] seq[strain] = "" else: seq[strain] = seq[strain] + line return inters, tsss, srnas, seq def check_tss(sorf, tss, utr_fuzzy, checks): if ((sorf["start"] - tss.start <= utr_fuzzy) and ( sorf["start"] - tss.start >= 0) and (sorf["strand"] == "+")) or ( (tss.start - sorf["end"] <= utr_fuzzy) and ( tss.start - sorf["end"] >= 0) and (sorf["strand"] == "-")): sorf["start_TSS"] = str(tss.start) + "_" + tss.strand sorf["with_TSS"].append("TSS:" + str(tss.start) + "_" + tss.strand) checks["start"] = True checks["import"] = True rbss = [] if (sorf["rbs"][0] != "NA"): if sorf["strand"] == "+": for rbs in sorf["rbs"]: if rbs >= tss.start: rbss.append(rbs) else: for rbs in sorf["rbs"]: if rbs <= tss.start: rbss.append(rbs) if len(rbss) != 0: checks["rbs"] = rbss def compare_sorf_tss(sorfs, tsss, tss_file, args_sorf): sorfs_all = [] sorfs_best = [] if tss_file is not None: for sorf in sorfs: checks = {"start": False, "rbs": False, "import": False} sorf["with_TSS"] = [] for tss in tsss: checks["import"] = False if (sorf["strain"] == tss.seq_id) and ( sorf["strand"] == tss.strand): if sorf["strand"] == "+": check_tss(sorf, tss, args_sorf.utr_length, checks) else: check_tss(sorf, tss, args_sorf.utr_length, checks) if not checks["import"]: if (tss.start <= sorf["start"]) and ( tss.start >= sorf["end"]): sorf["with_TSS"].append("TSS_" + str( tss.start) + tss.strand) if not checks["start"]: sorf["start_TSS"] = "NA" if len(sorf["with_TSS"]) == 0: sorf["with_TSS"] = ["NA"] if (checks["rbs"] and (not args_sorf.noafter_tss) and ( not args_sorf.no_tss)): sorf["rbs"] = checks["rbs"] sorfs_best.append(copy.deepcopy(sorf)) elif ((sorf["rbs"][0] != "NA") and (args_sorf.noafter_tss) and ( not args_sorf.no_tss) and (checks["start"])): sorfs_best.append(copy.deepcopy(sorf)) elif ((sorf["rbs"][0] != "NA") and (args_sorf.noafter_tss) and ( args_sorf.no_tss)): sorfs_best.append(copy.deepcopy(sorf)) sorfs_all.append(sorf) else: for sorf in sorfs: sorf["with_TSS"] = ["NA"] sorf["start_TSS"] = "NA" if sorf["rbs"][0] != "NA": sorfs_best.append(copy.deepcopy(sorf)) sorfs_all.append(sorf) return sorfs_all, sorfs_best def compare_sorf_srna(sorfs, srnas, srna_gff): if srna_gff is not None: for sorf in sorfs: sorf["srna"] = [] for srna in srnas: if (sorf["strain"] == srna.seq_id) and ( sorf["strand"] == srna.strand): if ((srna.start <= sorf["start"]) and ( srna.end >= sorf["end"])) or ( (srna.start >= sorf["start"]) and ( srna.end <= sorf["end"])) or ( (srna.start <= sorf["start"]) and ( srna.end >= sorf["start"]) and ( srna.end <= sorf["end"])) or ( (srna.start >= sorf["start"]) and ( srna.start <= sorf["end"]) and ( srna.end >= sorf["end"])): sorf["srna"].append("sRNA:" + str(srna.start) + "-" + str(srna.end) + "_" + srna.strand) if len(sorf["srna"]) == 0: sorf["srna"] = ["NA"] else: for sorf in sorfs: sorf["srna"] = ["NA"] def import_overlap(sorf2, final, sorf1, first): if final["start"] > sorf2["start"]: final["start"] = sorf2["start"] if final["end"] < sorf2["end"]: final["end"] = sorf2["end"] if first: final["candidate"] = [] final["candidate"].append("_".join(["-".join([ str(sorf1["start"]), str(sorf1["end"])]), "TSS:" + sorf1["start_TSS"], "RBS:" + str(sorf1["rbs"][0])])) first = False if "_".join(["-".join([str(sorf2["start"]), str(sorf2["end"])]), "TSS:" + sorf2["start_TSS"], "RBS:" + str(sorf2["rbs"][0])]) not in final["candidate"]: final["candidate"].append("_".join(["-".join([ str(sorf2["start"]), str(sorf2["end"])]), "TSS:" + sorf2["start_TSS"], "RBS:" + str(sorf2["rbs"][0])])) if str(sorf2["start"]) not in final["starts"]: final["starts"].append(str(sorf2["start"])) if str(sorf2["end"]) not in final["ends"]: final["ends"].append(str(sorf2["end"])) if sorf2["rbs"] != ["NA"]: if (len(final["rbs"]) == 1) and (final["rbs"] == ["NA"]): final["rbs"] = sorf2["rbs"] else: if sorf2["rbs"][0] not in final["rbs"]: final["rbs"] = final["rbs"] + sorf2["rbs"] if sorf2["srna"] != "NA": if final["srna"] == "NA": final["srna"] = sorf2["srna"] else: for over_srna in sorf2["srna"]: if (over_srna not in final["srna"]): final["srna"].append(over_srna) return first, final def merge(sorfs, seq): '''merge the overlapped sORF''' finals = [] for sorf1 in sorfs: final = copy.deepcopy(sorf1) first = True if not sorf1["print"]: sorf1["print"] = True for sorf2 in sorfs: overlap = False if (final["strain"] == sorf2["strain"]) and ( final["strand"] == sorf2["strand"]): if (final["start"] >= sorf2["start"]) and ( final["end"] <= sorf2["end"]): overlap = True elif (final["start"] >= sorf2["start"]) and ( final["start"] <= sorf2["end"]) and ( final["end"] >= sorf2["end"]): overlap = True elif (final["start"] <= sorf2["start"]) and ( final["end"] >= sorf2["start"]) and ( final["end"] <= sorf2["end"]): overlap = True elif (final["start"] <= sorf2["start"]) and ( final["end"] >= sorf2["end"]): overlap = True elif (sorf2["start"] > final["end"]): break if overlap: sorf2["print"] = True first, final = import_overlap(sorf2, final, sorf1, first) final["seq"] = Helper().extract_gene( seq[final["strain"]], final["start"], final["end"], final["strand"]) new = {} for key, value in final.items(): if "print" not in key: new[key] = value final = copy.deepcopy(new) finals.append(final) return finals def assign_utr_cutoff(coverages, utr_type, medians, track, min_cutoff): if track in medians.keys(): if coverages[utr_type] == "median": cutoff = medians[track]["median"] elif coverages[utr_type] == "mean": cutoff = medians[track]["mean"] else: cutoff = float(coverages[utr_type]) else: if (coverages[utr_type] != "median") and ( coverages[utr_type] != "mean"): cutoff = float(coverages[utr_type]) else: cutoff = min_cutoff return cutoff def get_cutoff(sorf, track, coverages, medians, min_cutoff): if sorf["type"] == "intergenic": cutoff_cover = float(coverages["inter"]) elif sorf["type"] == "antisense": cutoff_cover = float(coverages["anti"]) elif ("5utr" in sorf["type"]) and ("3utr" in sorf["type"]): cutoff_utr3 = assign_utr_cutoff( coverages, "3utr", medians[sorf["strain"]]["3utr"], track, min_cutoff) cutoff_utr5 = assign_utr_cutoff( coverages, "5utr", medians[sorf["strain"]]["5utr"], track, min_cutoff) cutoff_cover = min(cutoff_utr5, cutoff_utr3) elif ("5utr" in sorf["type"]): cutoff_cover = assign_utr_cutoff( coverages, "5utr", medians[sorf["strain"]]["5utr"], track, min_cutoff) elif ("3utr" in sorf["type"]): cutoff_cover = assign_utr_cutoff( coverages, "3utr", medians[sorf["strain"]]["3utr"], track, min_cutoff) elif ("interCDS" in sorf["type"]): cutoff_cover = assign_utr_cutoff( coverages, "interCDS", medians[sorf["strain"]]["interCDS"], track, min_cutoff) return cutoff_cover def get_attribute(num, name, start_tss, sorf, type_): if (type_ == "intergenic") or (type_ == "intergenic"): attribute_string = ";".join( ["=".join(items) for items in ( ["ID", sorf["strain"] + "_sorf" + str(num)], ["Name", "sORF_" + name], ["start_TSS", start_tss], ["with_TSS", ",".join(sorf["with_TSS"])], ["sRNA", ",".join(sorf["srna"])], ["rbs", ",".join(sorf["rbs"])], ["frame_shift", str(sorf["shift"])], ["sORF_type", type_])]) else: attribute_string = ";".join( ["=".join(items) for items in ( ["ID", sorf["strain"] + "_sorf" + str(num)], ["Name", "sORF_" + name], ["start_TSS", start_tss], ["with_TSS", ",".join(sorf["with_TSS"])], ["sORF_type", sorf["type"]], ["sRNA", ",".join(sorf["srna"])], ["rbs", ",".join(sorf["rbs"])], ["frame_shift", str(sorf["shift"])])]) return attribute_string def check_start_and_tss_point(sorf): '''searching the associated TSS''' tsss = [] for tss in sorf["with_TSS"]: if tss != "NA": if (int(tss.replace("TSS:", "")[:-2]) >= int(sorf["start"])) and ( int(tss.replace("TSS:", "")[:-2]) <= int(sorf["end"])): tsss.append(tss) else: tsss.append(tss) sorf["with_TSS"] = copy.deepcopy(tsss) def compare_rbs_start(sorf, min_rbs, max_rbs): '''searching the associated ribosome binding site''' detect = False if (len(sorf["rbs"]) == 1) and (sorf["rbs"][0] == "NA"): pass else: new_rbss = [] for rbs in sorf["rbs"]: if rbs != "NA": if sorf["strand"] == "+": for start in sorf["starts"]: if ((int(start) - int(rbs)) >= min_rbs + 6) and ( (int(start) - int(rbs)) <= max_rbs + 6) and ( rbs not in new_rbss): new_rbss.append(rbs) detect = True else: for end in sorf["ends"]: if ((int(rbs) - int(end)) >= min_rbs + 6) and ( (int(rbs) - int(end)) <= max_rbs + 6) and ( rbs not in new_rbss): new_rbss.append(rbs) detect = True if not detect: sorf["rbs"] = ["NA"] else: sorf["rbs"] = new_rbss return detect def gen_new_candidates(sorf, min_rbs, max_rbs): new_candidates = [] for start in sorf["starts"]: for end in sorf["ends"]: if ((int(end) - int(start) + 1) % 3) == 0: for rbs in sorf["rbs"]: if (sorf["strand"] == "+") and ( (int(start) - rbs) >= (min_rbs + 6)) and ( (int(start) - rbs) <= (max_rbs + 6)): if sorf["with_TSS"] == ["NA"]: new_candidates.append("_".join(["-".join([ start, end]), "NA", "RBS:" + str(rbs)])) else: for tss in sorf["with_TSS"]: if int(tss.split(":")[-1][:-2]) > int(start): break pre_tss = tss new_candidates.append("_".join(["-".join([ start, end]), pre_tss.replace("TSS_", "TSS:"), "RBS:" + str(rbs)])) elif (sorf["strand"] == "-") and ( (rbs - int(end)) >= (min_rbs + 6)) and ( (rbs - int(end)) <= (max_rbs + 6)): if sorf["with_TSS"] == ["NA"]: new_candidates.append("_".join(["-".join([ start, end]), "NA", "RBS:" + str(rbs)])) else: for tss in sorf["with_TSS"]: if int(tss.split(":")[-1][:-2]) <= int(start): break new_candidates.append("_".join(["-".join([ start, end]), tss.replace("TSS_", "TSS:"), "RBS:" + str(rbs)])) return new_candidates def check_candidates_srnas(sorf, min_rbs, max_rbs): '''assign the sRNA which overlap with sORF to corresponding candidates''' new_candidates = [] for cand in sorf["candidate"]: infos = cand.split("_") start = infos[0].split("-")[0] end = infos[0].split("-")[1] rbs = int(infos[-1].split(":")[-1]) if (start in sorf["starts"]) and ( end in sorf["ends"]) and ( rbs in sorf["rbs"]): new_candidates.append(cand) if len(new_candidates) == 0: new_candidates = gen_new_candidates(sorf, min_rbs, max_rbs) new_srnas = [] if (len(sorf["srna"]) == 1) and (sorf["srna"][0] == "NA"): pass else: for srna in sorf["srna"]: if srna != "NA": srna_strand = srna.split("_")[-1] if srna_strand == "r": strand = "-" else: strand = "+" srna_end = int(srna.split("_")[-2].split("-")[-1]) srna_start = int(srna.split("_")[-2].split("-")[0].split(":")[-1]) if (strand == sorf["strand"]): if ((srna_start <= int(sorf["start"])) and ( srna_end >= int(sorf["end"]))) or ( (srna_start >= int(sorf["start"])) and ( srna_end <= int(sorf["end"]))) or ( (srna_start <= int(sorf["start"])) and ( srna_end >= int(sorf["start"])) and ( srna_end <= int(sorf["end"]))) or ( (srna_start >= int(sorf["start"])) and ( srna_start <= int(sorf["end"])) and ( srna_end >= int(sorf["end"]))): new_srnas.append(srna) sorf["candidate"] = new_candidates if len(new_srnas) != 0: sorf["srna"] = new_srnas else: sorf["srna"] = ["NA"] def assign_sorf(sorf, starts, ends, fasta): sorf["starts"] = starts sorf["ends"] = ends sorf["start"] = min(map(int, starts)) sorf["end"] = max(map(int, ends)) sorf["seq"] = Helper().extract_gene( fasta[sorf["strain"]], sorf["start"], sorf["end"], sorf["strand"]) def check_start_end(sorf, args_sorf, fasta, run): '''check the start and end point which can form proper protein or not (3 times)''' if (len(sorf["rbs"]) == 1) and (sorf["rbs"][0] == "NA"): pass else: if sorf["strand"] == "+": starts = [] for start in sorf["starts"]: if int(start) < min(sorf["rbs"]): continue else: for rbs in sorf["rbs"]: if ((int(start) - int(rbs)) >= args_sorf.min_rbs + 6) and ( (int(start) - int(rbs)) <= args_sorf.max_rbs + 6) and ( start not in starts): starts.append(start) ends = [] for end in sorf["ends"]: for start in starts: if ((int(end) - int(start) + 1) % 3 == 0) and ( (int(end) - int(start) + 1) >= args_sorf.min_len) and ( (int(end) - int(start) + 1) <= args_sorf.max_len) and ( end not in ends): if end not in ends: ends.append(end) else: ends = [] for end in sorf["ends"]: if int(end) > max(sorf["rbs"]): continue else: for rbs in sorf["rbs"]: if ((int(rbs) - int(end)) >= args_sorf.min_rbs + 6) and ( (int(rbs) - int(end)) <= args_sorf.max_rbs + 6) and ( end not in ends): ends.append(end) starts = [] for start in sorf["starts"]: for end in ends: if ((int(end) - int(start) + 1) % 3 == 0) and ( (int(end) - int(start) + 1) >= args_sorf.min_len) and ( (int(end) - int(start) + 1) <= args_sorf.max_len) and ( start not in starts): if start not in starts: starts.append(start) if (len(starts) != 0) and (len(ends) != 0): assign_sorf(sorf, starts, ends, fasta) if run == "final": check_candidates_srnas(sorf, args_sorf.min_rbs, args_sorf.max_rbs) def detect_frame_shift(sorf): '''check the frame shift''' stand = sorf["starts"][0] shift = {"0": False, "1": False, "2": False} sorf["shift"] = 0 for start in sorf["starts"]: if ((int(start) - int(stand)) % 3) == 0: shift["0"] = True elif ((int(start) - int(stand)) % 3) == 1: shift["1"] = True elif ((int(start) - int(stand)) % 3) == 2: shift["2"] = True for key, value in shift.items(): if value: sorf["shift"] += 1 def print_file(sorf, sorf_datas, num, out_g, out_t, file_type, args_sorf): name = '%0*d' % (5, num) if (sorf["type"] == "intergenic") or (sorf["type"] == "antisense"): if (sorf["type"] == "intergenic"): type_ = "Intergenic" else: type_ = "Antisense" for index in range(len(sorf["rbs"])): if (sorf["rbs"][index] == "NA") and (len(sorf["rbs"]) == 1): pass else: sorf["rbs"][index] = "RBS_" + str(sorf["rbs"][index]) attribute_string = get_attribute(num, name, sorf["start_TSS"], sorf, type_.lower()) else: if ("3utr" in sorf["type"]) and ("5utr" in sorf["type"]): type_ = "3'UTR_derived;5'UTR_derived" elif ("3utr" in sorf["type"]): type_ = "3'UTR_derived" elif ("5utr" in sorf["type"]): type_ = "5'UTR_derived" elif ("interCDS" in sorf["type"]): type_ = "interCDS" for index in range(len(sorf["rbs"])): if (sorf["rbs"][index] == "NA") and (len(sorf["rbs"]) == 1): pass else: sorf["rbs"][index] = "RBS_" + str(sorf["rbs"][index]) attribute_string = get_attribute(num, name, sorf["start_TSS"], sorf, "utr") info = "\t".join([str(field) for field in [ sorf["strain"], "ANNOgesic", "sORF", str(sorf["start"]), str(sorf["end"]), ".", sorf["strand"], ".", attribute_string]]) out_g.write(info + "\n") if ("frag" in ";".join(sorf_datas["conds"].keys())) and ( "tex" in ";".join(sorf_datas["conds"].keys())): lib_type = "TEX+/-;Fragmented" elif ("frag" in ";".join(sorf_datas["conds"].keys())): lib_type = "Fragmented" elif ("tex" in ";".join(sorf_datas["conds"].keys())): lib_type = "TEX+/-" print_table(out_t, sorf, name, type_, lib_type, sorf_datas, args_sorf) def print_table(out_t, sorf, name, type_, lib_type, sorf_datas, args_sorf): out_t.write("\t".join([sorf["strain"], "sORF_" + name, str(sorf["start"]), str(sorf["end"]), sorf["strand"], type_, ";".join(sorf["with_TSS"]), ";".join(sorf["rbs"]), ";".join(sorf["starts"]), ";".join(sorf["ends"]), ";".join(sorf["srna"]), str(sorf["shift"]), lib_type, str(sorf_datas["best"])]) + "\t") first = True for data in sorf_datas["detail"]: if first: out_t.write("{0}({1})".format( data["track"], data["avg"])) first = False else: out_t.write(";{0}({1})".format( data["track"], data["avg"])) out_t.write("\t" + sorf["seq"]) if args_sorf.print_all: out_t.write("\t" + ";".join(sorf["candidate"])) out_t.write("\n") def get_inter_coverage(inters, inter_covers): for datas in inters: for cond, covers in datas.items(): for inter in covers: if inter["track"] not in inter_covers.keys(): inter_covers[inter["track"]] = [] inter_covers[inter["track"]].append(inter["avg"]) def detect_utr_type(inter, utr_type, med_inters, wigs, strand, background): '''detect the type of UTR-derived sORF''' if inter.attributes["UTR_type"] == utr_type: inter_datas = {} inter_datas["strain"] = inter.seq_id inter_datas["strand"] = inter.strand inter_datas["start"] = inter.start inter_datas["end"] = inter.end inter_datas = get_coverage(inter_datas, wigs, strand, background, None, None, background) med_inters[inter.seq_id][utr_type].append(inter_datas) def median_score(lst, cutoff): '''If the cutoff is assigned by percentage, it will get the corresponding number''' if type(cutoff) is str: if "p_" in cutoff: per = float(cutoff.split("_")[-1]) sortedLst = sorted(lst) lstLen = len(lst) index = int((lstLen - 1) * per) if lstLen != 0: return sortedLst[index] else: return 0 else: return cutoff def mean_score(lst): total = 0 for li in lst: total = total + li if len(lst) != 0: return (total / len(lst)) else: return 0 def validate_tss(starts, ends, sorf, utr_fuzzy): '''compare sORF with TSS''' tsss = [] start_pos = "NA" if sorf["with_TSS"][0] != "NA": for tss in sorf["with_TSS"]: tss_start = int(tss.replace("TSS:", "")[:-2]) if sorf["strand"] == "+": if (tss_start >= min(starts) - utr_fuzzy) and ( tss_start <= max(ends)): tsss.append(tss) if (tss_start >= min(starts) - utr_fuzzy) and ( tss_start <= min(starts)): start_pos = tss else: if (tss_start >= min(starts)) and ( tss_start <= max(ends) + utr_fuzzy): tsss.append(tss) if (tss_start <= min(ends) + utr_fuzzy) and ( tss_start >= min(ends)): start_pos = tss break else: tsss = ["NA"] return (tsss, start_pos) def validate_srna(starts, ends, sorf): '''compare sORF with sRNA''' srnas = [] for srna in sorf["srna"]: if srna == "NA": break else: datas = srna.split(":")[1][:-2].split("-") start = int(datas[0]) end = int(datas[1]) for index in range(0, len(starts)): if ((start <= starts[index]) and ( end >= ends[index])) or ( (start >= starts[index]) and ( end <= ends[index])) or ( (start >= starts[index]) and ( start <= ends[index]) and ( end >= ends[index])) or ( (start <= starts[index]) and ( end >= starts[index]) and ( end <= ends[index])): srnas.append(srna) break if len(srnas) == 0: srnas = ["NA"] return srnas def get_best(sorfs, tss_file, srna_file, args_sorf): '''based on the filers to get the best results''' final_sorfs = [] for sorf in sorfs: if (tss_file is not None): if (sorf["with_TSS"][0] != "NA") or (args_sorf.no_tss): cands = [] starts = [] ends = [] tmp_sorf = copy.deepcopy(sorf) for candidate in sorf["candidate"]: tss = candidate.split("_TSS:")[1].split("_RBS:")[0] rbs = candidate.split("_TSS:")[1].split("_RBS:")[-1] if (tss != "NA") or (args_sorf.no_tss): datas = candidate.split("_TSS:")[0].split("-") cands.append("-".join([ str(datas[0]), str(datas[1])]) + "_TSS:" + tss + "_RBS:" + rbs) starts.append(int(datas[0])) ends.append(int(datas[1])) tmp_sorf["start"] = min(starts) tmp_sorf["end"] = max(ends) tmp_sorf["starts"] = sorf["starts"] tmp_sorf["ends"] = sorf["ends"] tmp_sorf["candidate"] = cands tsss_datas = validate_tss(starts, ends, sorf, args_sorf.utr_length) tmp_sorf["with_TSS"] = tsss_datas[0] tmp_sorf["start_TSS"] = tsss_datas[1] if srna_file is not None: tmp_sorf["sRNA"] = validate_srna(starts, ends, sorf) if (args_sorf.no_srna) and ( tmp_sorf["sRNA"][0] == "NA"): final_sorfs.append(tmp_sorf) elif not args_sorf.no_srna: final_sorfs.append(tmp_sorf) elif srna_file is not None: tmp_sorf = copy.deepcopy(sorf) if (args_sorf.no_srna) and (tmp_sorf["sRNA"][0] == "NA"): final_sorfs.append(sorf) elif not args_sorf.no_srna: final_sorfs.append(tmp_sorf) if len(final_sorfs) == 0: final_sorfs = sorfs return final_sorfs def coverage_and_output(sorfs, mediandict, wigs, out_g, out_t, file_type, fasta, coverages, args_sorf, texs, run): '''get the coverage of sORF and print it out''' if run == "final": out_g.write("##gff-version 3\n") if args_sorf.print_all: out_t.write("\t".join([ "Genome", "Name", "Start", "End", "Strand", "Type", "TSS", "Ribosome_binding_site", "All_start_points", "All_stop_points", "Conflict_sRNA", "Frame_shift", "Lib_type", "Best_avg_coverage", "Track_detail", "Seq", "Combinations"]) + "\n") else: out_t.write("\t".join([ "Genome", "Name", "Start", "End", "Strand", "Type", "TSS", "Ribosome_binding_site", "All_start_points", "All_stop_points", "Conflict_sRNA", "Frame_shift", "Lib_type", "Best_avg_coverage", "Track_detail", "Seq"]) + "\n") num = 0 final_sorfs = [] for sorf in sorfs: if ((compare_rbs_start(sorf, args_sorf.min_rbs, args_sorf.max_rbs)) and ( file_type == "best")) or ( file_type == "all"): if file_type == "best": check_start_end(sorf, args_sorf, fasta, run) detect_frame_shift(sorf) cutoffs = {} if sorf["strand"] == "+": sorf_covers = get_coverage(sorf, wigs["forward"], "+", coverages, mediandict, cutoffs, args_sorf.background) else: sorf_covers = get_coverage(sorf, wigs["reverse"], "-", coverages, mediandict, cutoffs, args_sorf.background) if len(sorf_covers) != 0: sorf_info = replicate_comparison( args_sorf, sorf_covers, sorf["strand"], "sORF", None, cutoffs, None, cutoffs, None, texs) if len(sorf_info["conds"].keys()) != 0: if run != "final": final_sorfs.append(sorf) else: print_file(sorf, sorf_info, num, out_g, out_t, file_type, args_sorf) num += 1 if run != "final": return final_sorfs def detect_inter_type(inters, wigs, background): '''detect the types of intergenic sORF''' med_inters = {} strain = "" for inter in inters: if inter.seq_id != strain: strain = inter.seq_id med_inters[inter.seq_id] = {"5utr": [], "3utr": [], "interCDS": []} if (inter.source == "UTR_derived") and (inter.strand == "+"): detect_utr_type(inter, "5utr", med_inters, wigs["forward"], "+", background) detect_utr_type(inter, "3utr", med_inters, wigs["forward"], "+", background) detect_utr_type(inter, "interCDS", med_inters, wigs["forward"], "+", background) elif (inter.source == "UTR_derived") and (inter.strand == "-"): detect_utr_type(inter, "5utr", med_inters, wigs["reverse"], "-", background) detect_utr_type(inter, "3utr", med_inters, wigs["reverse"], "-", background) detect_utr_type(inter, "interCDS", med_inters, wigs["reverse"], "-", background) return med_inters def set_median(covers, mediandict, coverages): for strain, utrs in covers.items(): mediandict[strain] = {"3utr": {}, "5utr": {}, "interCDS": {}} for utr, tracks in utrs.items(): for track, avgs in tracks.items(): if track not in mediandict[strain][utr].keys(): mediandict[strain][utr][track] = {} mediandict[strain][utr][track] = {"median": median_score( avgs, coverages[utr])} for utr, value in coverages.items(): if type(value) is str: if "p_" in value: coverages[utr] = "median" def compute_candidate_best(sorfs_best): for sorf in sorfs_best: sorf["candidate"] = [] sorf["candidate"].append("_".join(["-".join([ str(sorf["start"]), str(sorf["end"])]), "TSS:" + sorf["start_TSS"], "RBS:" + str(sorf["rbs"][0])])) def set_coverage(args_sorf): '''set the cutoff based on different types''' if "n_" in args_sorf.cutoff_3utr: args_sorf.cutoff_3utr = float( args_sorf.cutoff_3utr.split("_")[-1]) if "n_" in args_sorf.cutoff_5utr: args_sorf.cutoff_5utr = float( args_sorf.cutoff_5utr.split("_")[-1]) if "n_" in args_sorf.cutoff_intercds: args_sorf.cutoff_intercds = float( args_sorf.cutoff_intercds.split("_")[-1]) coverages = {"3utr": args_sorf.cutoff_3utr, "5utr": args_sorf.cutoff_5utr, "inter": args_sorf.cutoff_inter, "interCDS": args_sorf.cutoff_intercds, "anti": args_sorf.cutoff_anti} return coverages def sorf_detection(fasta, srna_gff, inter_gff, tss_file, wig_f_file, wig_r_file, out_prefix, args_sorf): coverages = set_coverage(args_sorf) libs, texs = read_libs(args_sorf.libs, args_sorf.merge_wigs) inters, tsss, srnas, seq = read_data(inter_gff, tss_file, srna_gff, fasta, args_sorf.utr_detect) wigs = {"forward": read_wig(wig_f_file, "+", libs), "reverse": read_wig(wig_r_file, "-", libs)} med_inters = detect_inter_type(inters, wigs, args_sorf.background) inter_covers = {} mediandict = {} for strain, meds in med_inters.items(): inter_covers[strain] = {"5utr": {}, "3utr": {}, "interCDS": {}} for type_, covers in meds.items(): get_inter_coverage(covers, inter_covers[strain][type_]) set_median(inter_covers, mediandict, coverages) out_ag = open("_".join([out_prefix, "all.gff"]), "w") out_at = open("_".join([out_prefix, "all.csv"]), "w") out_bg = open("_".join([out_prefix, "best.gff"]), "w") out_bt = open("_".join([out_prefix, "best.csv"]), "w") sorfs = detect_start_stop(inters, seq, args_sorf) sorfs_all, sorfs_best = compare_sorf_tss(sorfs, tsss, tss_file, args_sorf) compare_sorf_srna(sorfs_all, srnas, srna_gff) compare_sorf_srna(sorfs_best, srnas, srna_gff) sorfs_all = sorted(sorfs_all, key=lambda k: (k["strain"], k["start"], k["end"], k["strand"])) sorfs_best = sorted(sorfs_best, key=lambda k: (k["strain"], k["start"], k["end"], k["strand"])) final_all = coverage_and_output( sorfs_all, mediandict, wigs, out_ag, out_at, "all", seq, coverages, args_sorf, texs, "first") final_best = coverage_and_output( sorfs_best, mediandict, wigs, out_bg, out_bt, "best", seq, coverages, args_sorf, texs, "first") final_all = merge(final_all, seq) final_best = merge(final_best, seq) final_best = get_best(final_best, tss_file, srna_gff, args_sorf) coverage_and_output(final_all, mediandict, wigs, out_ag, out_at, "all", seq, coverages, args_sorf, texs, "final") coverage_and_output(final_best, mediandict, wigs, out_bg, out_bt, "best", seq, coverages, args_sorf, texs, "final") out_ag.close() out_at.close() out_bg.close() out_bt.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sORF_detection.py
sORF_detection.py
import os import sys import csv import shutil from annogesiclib.seq_editer import SeqEditer from annogesiclib.helper import Helper class Multiparser(object): def __init__(self): self.seq_editer = SeqEditer() self.helper = Helper() self.tmp_fa = "tmp.fa" self.tmp_gff = "tmp.gff" self.tmp_wig_forward = "tmp_forward.wig" self.tmp_wig_reverse = "tmp_reverse.wig" def combine_fasta(self, ref_folder, tar_folder, ref_feature): '''combine multiple fasta files''' tar_merge = os.path.join(tar_folder, "merge_tmp") change = False if ref_feature is None: ref_feature = "" else: ref_feature = "_" + ref_feature self.helper.check_make_folder(tar_merge) for folder in os.listdir(ref_folder): files = [] if "_folder" in folder: datas = folder.split("_folder") if ref_feature == "": prefix = datas[0][:-4] elif ref_feature == "_fasta": if datas[0].endswith(".fa"): prefix = datas[0][:-3] elif datas[0].endswith(".fna"): prefix = datas[0][:-4] elif datas[0].endswith(".fasta"): prefix = datas[0][:-6] else: datas = datas[0][:-4] datas = datas.split(ref_feature) prefix = datas[0] print("Merging fasta files of " + prefix) for file_ in os.listdir("/".join([ref_folder, folder])): if ref_feature == "": files.append(file_[:-4]) elif ref_feature == "_fasta": files.append(file_[:-3]) else: filename = file_.split(ref_feature) files.append(filename[0]) for tar in os.listdir(tar_folder): if tar.endswith(".fa") or \ tar.endswith(".fna") or \ tar.endswith(".fasta"): filename = ".".join((tar.split("."))[:-1]) for file_ in files: if filename == file_: self.helper.merge_file( os.path.join(tar_folder, tar), os.path.join(tar_folder, self.tmp_fa)) change = True if change: change = False shutil.move(os.path.join(tar_folder, self.tmp_fa), os.path.join(tar_merge, prefix + ".fa")) self.helper.remove_all_content(tar_folder, ".fa", "file") self.helper.move_all_content(tar_merge, tar_folder, None) shutil.rmtree(tar_merge) def get_prefix(self, folder, ref_feature): datas = folder.split("_folder") if ref_feature == "": prefix = datas[0][:-4] elif ref_feature == "_fasta": if datas[0].endswith(".fa"): prefix = datas[0][:-3] elif datas[0].endswith(".fna"): prefix = datas[0][:-4] elif datas[0].endswith(".fasta"): prefix = datas[0][:-6] else: datas = datas[0][:-4] datas = datas.split(ref_feature) prefix = datas[0] return prefix def combine_wig(self, ref_folder, tar_folder, ref_feature, libs): '''combine multiple wig files''' tar_merge = os.path.join(tar_folder, "merge_tmp") change_f = False change_r = False if ref_feature is None: ref_feature = "" else: ref_feature = "_" + ref_feature self.helper.check_make_folder(tar_merge) for folder in os.listdir(ref_folder): files = [] if "_folder" in folder: prefix = self.get_prefix(folder, ref_feature) print("Merging wig files of " + prefix) for file_ in os.listdir(os.path.join(ref_folder, folder)): if ref_feature == "": files.append(file_[:-4]) elif ref_feature == "_fasta": files.append(file_[:-3]) else: filename = file_.split(ref_feature) files.append(filename[0]) for tar in os.listdir(tar_folder): filename = tar.split("_STRAIN_") for file_ in files: if (tar.endswith(".wig")) and ( file_ == filename[-1][:-4]): for lib in libs: if (filename[0] in lib) and (lib[-1] == "+"): self.helper.merge_file( os.path.join(tar_folder, tar), os.path.join(tar_folder, self.tmp_wig_forward)) change_f = True elif (filename[0] in lib) and (lib[-1] == "-"): self.helper.merge_file( os.path.join(tar_folder, tar), os.path.join(tar_folder, self.tmp_wig_reverse)) change_r = True if change_f and change_r: change_f = False change_r = False shutil.move(os.path.join(tar_folder, self.tmp_wig_forward), os.path.join(tar_merge, prefix + "_forward.wig")) shutil.move(os.path.join(tar_folder, self.tmp_wig_reverse), os.path.join(tar_merge, prefix + "_reverse.wig")) else: print("Error: comparing input files of {0} failed. " "Please check the seq IDs of all gff, fasta and wig " "files, they should be the same.\nPlease " "also check the wiggle files which should contain " "forward and reverse files.".format(prefix)) sys.exit() self.helper.remove_all_content(tar_folder, ".wig", "file") self.helper.move_all_content(tar_merge, tar_folder, None) shutil.rmtree(tar_merge) def combine_gff(self, ref_folder, tar_folder, ref_feature, tar_feature): '''combine multiple gff files''' tar_merge = os.path.join(tar_folder, "merge_tmp") change = False if tar_feature is None: tar_feature = "" else: tar_feature = "_" + tar_feature if ref_feature is None: ref_feature = "" else: ref_feature = "_" + ref_feature self.helper.check_make_folder(tar_merge) for folder in os.listdir(ref_folder): files = [] if "_folder" in folder: datas = folder.split("_folder") if ref_feature == "": prefix = datas[0][:-4] elif ref_feature == "_fasta": if datas[0].endswith(".fa"): prefix = datas[0][:-3] elif datas[0].endswith(".fna"): prefix = datas[0][:-4] elif datas[0].endswith(".fasta"): prefix = datas[0][:-6] else: datas = datas[0][:-4] datas = datas.split(ref_feature) prefix = datas[0] print("Merging gff files of " + prefix + tar_feature) for file_ in os.listdir(os.path.join(ref_folder, folder)): if ref_feature == "": files.append(file_[:-4]) elif ref_feature == "_fasta": files.append(file_[:-3]) else: filename = file_.split(ref_feature) files.append(filename[0]) for tar in os.listdir(tar_folder): for file_ in files: if (".gff" in tar) and ( file_ + tar_feature == tar[:-4]): self.helper.merge_file( os.path.join(tar_folder, tar), os.path.join(tar_folder, self.tmp_gff)) change = True if change: change = False shutil.move(os.path.join(tar_folder, self.tmp_gff), os.path.join(tar_folder, "merge_tmp", prefix + tar_feature + ".gff")) self.helper.remove_all_content(tar_folder, ".gff", "file") self.helper.move_all_content(tar_merge, tar_folder, None) shutil.rmtree(tar_merge) def parser_fasta(self, fastas): '''parser the fasta file based on strain''' par_tmp = os.path.join(fastas, "tmp") first = True out = None out_t = None detect = False for fasta in os.listdir(fastas): if (fasta.endswith(".fasta") or fasta.endswith(".fa") or fasta.endswith(".fna")): detect = True self.seq_editer.modify_header(os.path.join(fastas, fasta)) self.helper.check_make_folder(par_tmp) if not detect: print("Error: there are folders which conatin no fasta files! " "The files should end with .fa or .fna or .fasta!") sys.exit() for fasta in os.listdir(fastas): if ("_folder" not in fasta) and ("tmp" != fasta): if (fasta.endswith(".fa")) or \ (fasta.endswith(".fna")) or \ (fasta.endswith(".fasta")): out_path = os.path.join(fastas, fasta + "_folder") print("Parsing " + fasta) self.helper.check_make_folder(out_path) with open(os.path.join(fastas, fasta), "r") as f_f: for line in f_f: if line[0] == ">": line = line.strip() if ("|" in line) and ( len(line.split("|")) > 4): strain = line.split("|") name = strain[3] else: name = line[1:] if first: first = False else: out.close() out_t.close() out = open(os.path.join( out_path, name + ".fa"), "w") out_t = open(os.path.join( par_tmp, name + ".fa"), "w") out.write(">" + name + "\n") out_t.write(">" + name + "\n") else: out.write(line) out_t.write(line) if out is not None: out.close() if out_t is not None: out_t.close() def parser_gff(self, gff_folder, feature): '''parser gff file based on strain''' par_tmp = os.path.join(gff_folder, "tmp") out = None out_t = None first = True detect = False if feature is None: feature = "" else: feature = "_" + feature self.helper.check_make_folder(par_tmp) for filename in os.listdir(gff_folder): pre_seq_id = "" if ("_folder" not in filename) and ("tmp" != filename): out_path = os.path.join(gff_folder, filename + "_folder") if ".gff" in filename: detect = True print("Parsing " + filename) self.helper.check_make_folder(out_path) self.helper.sort_gff(os.path.join(gff_folder, filename), os.path.join(gff_folder, "tmp.gff")) f_h = open(os.path.join(gff_folder, "tmp.gff"), "r") lens = {} for row in csv.reader(f_h, delimiter="\t"): if not (row[0].startswith("#")): if (row[2] == "source") or ( row[2] == "region") or ( row[2] == "remark"): lens[row[0]] = int(row[4]) f_h.close() f_h = open(os.path.join(gff_folder, "tmp.gff"), "r") for row in csv.reader(f_h, delimiter="\t"): if row[0].startswith("#"): continue else: if ("|" in row[0]) and ( len(row[0].split("|")) > 4): strain = row[0].split("|") name = strain[3] else: name = row[0] if pre_seq_id == name: if name in lens.keys(): if lens[name] < int(row[4]): row[4] = str(lens[name]) out.write("\t".join([name] + row[1:]) + "\n") out_t.write("\t".join([name] + row[1:]) + "\n") else: if first: first = False else: out.close() out_t.close() out = open(os.path.join(out_path, name + feature + ".gff"), "w") out_t = open(os.path.join(par_tmp, name + feature + ".gff"), "w") pre_seq_id = name if name in lens.keys(): if lens[name] < int(row[4]): row[4] = str(lens[name]) out.write("\t".join([name] + row[1:]) + "\n") out_t.write("\t".join([name] + row[1:]) + "\n") f_h.close() if not detect: print("Error: There are folders which contain no gff3 files! " "The files should end with .gff!") sys.exit() if os.path.exists(os.path.join(gff_folder, "tmp.gff")): os.remove(os.path.join(gff_folder, "tmp.gff")) if out is not None: out.close() if out_t is not None: out_t.close() def parser_wig(self, wig_folder): '''parser the wig file based on strain''' par_tmp = os.path.join(wig_folder, "tmp") first = True out = None out_t = None detect = False self.helper.check_make_folder(par_tmp) for filename in os.listdir(wig_folder): track_info = "" if ("_folder" not in filename) and ("tmp" != filename): out_path = os.path.join(wig_folder, filename + "_folder") if ".wig" in filename: detect = True print("Parsing {0}".format(filename)) self.helper.check_make_folder(out_path) with open(os.path.join(wig_folder, filename), "r") as w_f: for line in w_f: if (not line.startswith("#")) and (len(line) != 0): line = line.split(" ") if (line[0] == "track"): track_info = " ".join(line) if (line[0] == "variableStep") or (line[0] == "fixedStep"): chrom = line[1].split("=") if ("|" in chrom[1]) and ( len(chrom[1].split("|")) > 4): strain = chrom[1].split("|") name = strain[3].strip() weird = True else: name = chrom[1].strip() weird = False if first: first = False else: out.close() out_t.close() out = open("".join([ os.path.join(out_path, filename[:-4]), "_STRAIN_", name, ".wig"]), "w") out_t = open("".join([ os.path.join(wig_folder, "tmp", filename[:-4]), "_STRAIN_", name, ".wig"]), "w") if track_info != "": out.write(track_info) out_t.write(track_info) if weird: f_line = "".join([line[0], " chrom=" + name, line[-1]]) else: f_line = " ".join(line) out.write(f_line) out_t.write(f_line) if (line[0] != "track") and ( line[0] != "variableStep"): out.write(" ".join(line)) out_t.write(" ".join(line)) if not detect: print("Error: There are folders which contain no wig files! " "The files should end with .wig!") sys.exit() if out is not None: out.close() if out_t is not None: out_t.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/multiparser.py
multiparser.py
from annogesiclib.splice_parser import SpliceParser from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def get_feature(cds): '''get proper feature name''' if "locus_tag" in cds.attributes.keys(): feature = cds.attributes["locus_tag"] elif "protein_id" in cds.attributes.keys(): feature = cds.attributes["protein_id"] elif "ID" in cds.attributes.keys(): strand = Helper().get_strand_name(cds.strand) feature = "".join([cds.attributes["ID"], ":", str(cds.start), "-", str(cds.end), "_", strand]) else: strand = Helper().get_strand_name(cds.strand) feature = "".join([cds.feature, ":", str(cds.start), "-", str(cds.end), "_", strand]) return feature def detect_conflict(gffs, circ, num, out, out_best, args_circ): '''remove the false positive which overlap with known annotation''' detect = False gff = None for gff in gffs: if (gff.seq_id == circ.strain) and ( gff.strand == circ.strand): if ((gff.start <= circ.start) and ( gff.end >= circ.start) and ( gff.end <= circ.end)) or ( (gff.start >= circ.start) and ( gff.end <= circ.end)) or ( (gff.start >= circ.start) and ( gff.start <= circ.end) and ( gff.end >= circ.end)) and ( (gff.start <= circ.start) and ( gff.end >= circ.end)): detect = True break if detect: feature = get_feature(gff) out.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n".format( circ.strain, circ.strand, circ.start, circ.end, feature, circ.supported_reads, float(circ.supported_reads) / float(circ.start_site_reads), float(circ.supported_reads) / float(circ.end_site_reads))) else: start_read = float(circ.supported_reads) / float(circ.start_site_reads) end_read = float(circ.supported_reads) / float(circ.end_site_reads) if (circ.supported_reads >= args_circ.support) and ( start_read >= args_circ.start_ratio) and ( end_read >= args_circ.end_ratio): out_best.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}" "\t{6}\t{7}\n".format( circ.strain, circ.strand, circ.start, circ.end, "NA", circ.supported_reads, start_read, end_read)) out.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n".format( circ.strain, circ.strand, circ.start, circ.end, "NA", circ.supported_reads, start_read, end_read)) return detect def import_num(support, nums, strain): if support not in nums[strain].keys(): nums[strain][support] = 0 if support not in nums["all"].keys(): nums["all"][support] = 0 nums[strain][support] += 1 nums["all"][support] += 1 def print_file(nums, stat, strain): for key in sorted(nums[strain].keys()): stat.write("\tthe number of potential circular RNAs, ") stat.write("more than {0} supported it = {1}\n".format( key, nums[strain][key])) def read_file(input_file, gff_file, hypo): circs = [] gffs = [] ps = SpliceParser() high = 0 splice_fh = open(input_file) for entry in ps.parser(splice_fh): if entry.supported_reads > high: high = entry.supported_reads circs.append(entry) gff_parser = Gff3Parser() for entry in gff_parser.entries(open(gff_file)): if ("product" in entry.attributes.keys()) and (hypo): if "hypothetical protein" not in entry.attributes["product"]: gffs.append(entry) else: gffs.append(entry) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) circs = sorted(circs, key=lambda x: (x.strain, x.supported_reads), reverse=True) splice_fh.close() return circs, gffs, high def get_circrna(circs, gffs, high, out, out_best, args_circ): '''search the splice data to find the potential circRNA''' num_circular = {} num_circular["all"] = 0 num_support = {} num_support["all"] = {} num_conflict = {} num_conflict["all"] = {} pre_seq_id = "" num = 0 for circ in circs: if pre_seq_id != circ.strain: num_support[circ.strain] = {} num_conflict[circ.strain] = {} num_circular[circ.strain] = 0 if (circ.situation != "F") and (circ.splice_type == "C"): num_circular[circ.strain] += 1 num_circular["all"] += 1 detect = detect_conflict(gffs, circ, num, out, out_best, args_circ) for support in range(0, high + 5, 5): if circ.supported_reads >= int(support): import_num(support, num_support, circ.strain) if detect is False: if (float(circ.supported_reads) / float(circ.start_site_reads) >= args_circ.start_ratio) and ( float(circ.supported_reads) / float(circ.end_site_reads) >= args_circ.end_ratio): import_num(support, num_conflict, circ.strain) num += 1 pre_seq_id = circ.strain return {"circular": num_circular, "support": num_support, "conflict": num_conflict} def detect_circrna(input_file, gff_file, output_file, args_circ, statistics): circs, gffs, high = read_file(input_file, gff_file, args_circ.hypo) out = open(output_file, "w") out.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n".format( "Genome", "Strand", "Start", "End", "Annotation_overlap", "Supported_reads", "Supported_reads/Reads_at_start", "Supported_reads/Reads_at_end")) out_best = open(output_file.replace("_all.csv", "_best.csv"), "w") out_best.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n".format( "Genome", "Strand", "Start", "End", "Annotation_overlap", "Supported_reads", "Supported_reads/Reads_at_start", "Supported_reads/Reads_at_end")) nums = get_circrna(circs, gffs, high, out, out_best, args_circ) stat = open(statistics, "w") stat.write("All genomes:\n") stat.write("\tBefore filtering:\n") stat.write("\tthe number of all circular RNAs = {0}\n".format( nums["circular"]["all"])) print_file(nums["support"], stat, "all") stat.write("\n\tAfter filtering:\n") stat.write("\t\twithout conflict with annotation\n") stat.write("\t\tsupport read ratio of starting " "point is larger than {0}\n".format( args_circ.start_ratio)) stat.write("\t\tsupport read ratio of end point " "is larger than {0}\n".format( args_circ.end_ratio)) print_file(nums["conflict"], stat, "all") if len(nums["circular"]) > 2: for strain in nums["circular"].keys(): if strain != "all": stat.write("\n{0}:\n".format(strain)) stat.write("\tBefore filtering:\n") stat.write("\tthe number of all circular RNAs = {0}\n".format( nums["circular"][strain])) print_file(nums["support"], stat, strain) stat.write("\n\tAfter filtering:\n") stat.write("\t\twithout conflict with annotation\n") stat.write("\t\tsupport read ratio of starting point " "is larger than {0}\n".format( args_circ.start_ratio)) stat.write("\t\tsupport read ratio of end point " "is larger than {0}\n".format( args_circ.end_ratio)) print_file(nums["conflict"], stat, strain) out.close() out_best.close() stat.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/circRNA_detection.py
circRNA_detection.py
import os import csv import shutil from annogesiclib.gff3 import Gff3Parser def read_gff(gff_file): datas = [] for entry in Gff3Parser().entries(open(gff_file)): datas.append(entry) datas = sorted(datas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) return datas def print_table(srna_table, out_t, srnas): fh = open(srna_table, "r") for row in csv.reader(fh, delimiter='\t'): for srna in srnas: if (row[0] == srna.seq_id) and ( int(row[2]) == srna.start) and ( int(row[3]) == srna.end) and ( row[4] == srna.strand): if "with_term" in srna.attributes.keys(): with_term = [srna.attributes["with_term"]] else: with_term = ["NA"] out_t.write("\t".join(row + with_term) + "\n") def compare_srna_term(srna_gff, srna_table, term_file, fuzzy_b, fuzzy_a): '''Comparison of sRNA and terminator. It can search the sRNA which is associated with terminator''' srnas = read_gff(srna_gff) terms = read_gff(term_file) out_g = open("tmp_srna.gff", "w") out_t = open("tmp_srna.csv", "w") out_g.write("##gff-version 3\n") for srna in srnas: detect = False for term in terms: if (srna.seq_id == term.seq_id) and ( srna.strand == term.strand): if (srna.strand == "+"): if (((srna.end - term.end) <= fuzzy_b) and ( srna.end >= term.end) and ( srna.start < term.start)) or ( ((term.start - srna.end) <= fuzzy_a) and ( term.start >= srna.end)) or ( (srna.end > term.start) and ( srna.end < term.end) and ( srna.start < term.start)): term_string = (term.feature + ":" + str(term.start) + "-" + str(term.end) + "_" + term.strand) srna.attributes["with_term"] = term_string detect = True break else: if (((term.start - srna.start) <= fuzzy_b) and ( term.start >= srna.start) and ( term.end < srna.end)) or ( ((srna.start - term.end) <= fuzzy_a) and ( srna.start >= term.end)) or ( (srna.start > term.start) and ( srna.start < term.end) and ( srna.end > term.end)): term_string = (term.feature + ":" + str(term.start) + "-" + str(term.end) + "_" + term.strand) srna.attributes["with_term"] = term_string detect = True break if "end_cleavage" in srna.attributes.keys(): if (srna.attributes["end_cleavage"] != "NA") and ( "with_term" not in srna.attributes.keys()): srna.attributes["with_term"] = srna.attributes["end_cleavage"] elif (srna.attributes["end_cleavage"] != "NA") and ( "with_term" in srna.attributes.keys()): srna.attributes["with_term"] = ",".join([ srna.attributes["with_term"], srna.attributes["end_cleavage"]]) if detect: out_g.write(srna.info + ";with_term=" + srna.attributes["with_term"] + "\n") else: out_g.write(srna.info + ";with_term=NA" + "\n") print_table(srna_table, out_t, srnas) os.remove(srna_gff) os.remove(srna_table) out_t.close() out_g.close() shutil.move("tmp_srna.gff", srna_gff) shutil.move("tmp_srna.csv", srna_table)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/compare_srna_term.py
compare_srna_term.py
import os import sys import csv import time from subprocess import call from annogesiclib.helper import Helper from annogesiclib.plot_PPI import plot_ppi from annogesiclib.converter import Converter from annogesiclib.gff3 import Gff3Parser class PPINetwork(object): '''detection of PPI''' def __init__(self, out_folder): self.helper = Helper() self.converter = Converter() self.gffparser = Gff3Parser() self.tmp_id = os.path.join(out_folder, "tmp_id_list") self.all_result = os.path.join(out_folder, "all_results") self.best_result = os.path.join(out_folder, "best_results") self.fig = os.path.join(out_folder, "figures") self.ref_tags = {} self.with_strain = "with_strain" self.without_strain = "without_strain" self.tmp_files = {"log": "tmp_log", "action": "tmp_action.log", "pubmed": "tmp_pubmed.log", "specific": os.path.join( out_folder, "tmp_specific"), "nospecific": os.path.join( out_folder, "tmp_nospecific"), "wget_action": os.path.join( out_folder, "tmp_action")} def _make_folder_no_exist(self, path, folder): if folder not in os.listdir(path): os.mkdir(os.path.join(path, folder)) def _make_subfolder(self, path, strain, ptt): os.mkdir(os.path.join(path, strain)) os.mkdir(os.path.join(path, strain, ptt)) def _run_wget(self, source, folder, err, log): log.write(" ".join(["wget", source, "-O", folder]) + "\n") call(["wget", source, "-O", folder], stderr=err) time.sleep(2) def _wget_id(self, strain, locus, strain_id, files, log): detect_id = False if strain == strain_id["ptt"]: print("Retrieving STRING ID for {0} of {1} -- {2}".format( locus, strain_id["string"], strain_id["file"])) id_source = ("http://string-db.org/api/tsv/get_string_ids?" "identifier={0}&species={1}").format( locus, strain_id["string"]) self._run_wget(id_source, os.path.join(files["id_list"], locus), files["id_log"], log) detect_id = True return detect_id def _retrieve_id(self, strain_id, genes, files, log): log.write("Retrieving STRING ID for {0}.\n".format(strain_id["ptt"])) for gene in genes: if gene["gene"] != "-": detect_id = self._wget_id(gene["strain"], gene["gene"], strain_id, files, log) self.ref_tags[gene["gene"]] = gene["locus_tag"] else: detect_id = self._wget_id(gene["strain"], gene["locus_tag"], strain_id, files, log) self.ref_tags[gene["locus_tag"]] = gene["locus_tag"] if not detect_id: log.write("{0} is not found in {1}.\n".format( gene, strain_id["file"])) print("Error: There is no {0} in {1}".format( gene, strain_id["file"])) log.write("The temporary files are generated and stored in the " "following folders:\n") log.write("\t" + os.path.join( files["id_list"], gene["locus_tag"]) + "\n") def _get_prefer_name(self, row_a, strain_id, files, querys, log): prefername = "" filename = row_a.split(".") if ((filename[1] not in os.listdir(files["id_list"])) and ( "all" not in querys)) or ("all" in querys): self._wget_id(strain_id["ptt"], filename[1], strain_id, files, log) if (filename[1] in os.listdir(files["id_list"])) or ( "all" in querys): if (filename[1] in os.listdir(files["id_list"])): id_h = open(os.path.join(files["id_list"], filename[1]), "r") for row_i in csv.reader(id_h, delimiter="\t"): if row_a == row_i[1]: prefername = row_i[4] id_h.close() return prefername def _print_title(self, out, id_file, id_folder): id_h = open(os.path.join(id_folder, id_file), "r") prefername = id_file for row_i in csv.reader(id_h, delimiter="\t"): prefername = row_i[3] id_h.close() if prefername not in self.ref_tags.keys(): locus = id_file else: locus = self.ref_tags[prefername] out.write("Interaction of {0} | {1}\n".format( locus, prefername)) out.write("Genome\tstringId_A\tstringId_B\tpreferredName_A\t" "preferredName_B\tncbiTaxonId\t" "STRING_score\tPubmed_id\tPubmed_score\n") def _get_pubmed(self, row, strain_id, score, id_file, first_output, ptt, files, paths, args_ppi, log): prefer1 = self._get_prefer_name(row[0], strain_id, files, args_ppi.querys, log) prefer2 = self._get_prefer_name(row[1], strain_id, files, args_ppi.querys, log) if (len(prefer1) > 0) and (len(prefer2) > 0): if args_ppi.no_specific: pubmed_source = ( "http://www.ncbi.nlm.nih.gov/CBBresearch/" "Wilbur/IRET/PIE/getppi.cgi?term={0}+AND+{1}").format( prefer1, prefer2) self._run_wget(pubmed_source, self.tmp_files["nospecific"], files["pubmed_log"], log) strain_id["pie"] = "+".join(strain_id["pie"].split(" ")) pubmed_source = ( "http://www.ncbi.nlm.nih.gov/CBBresearch/Wilbur" "/IRET/PIE/getppi.cgi?term={0}+AND+{1}+AND+{2}").format( prefer1, prefer2, strain_id["pie"]) self._run_wget(pubmed_source, self.tmp_files["specific"], files["pubmed_log"], log) row[0] = row[0].split(".")[-1] row[1] = row[1].split(".")[-1] self._merge_information( first_output, self.tmp_files["specific"], files["all_specific"], files["best_specific"], row, args_ppi.score, id_file, files["id_list"], "specific", os.path.join(paths["all"], self.with_strain), os.path.join(paths["best"], self.with_strain), ptt) if args_ppi.no_specific: self._merge_information( first_output, self.tmp_files["nospecific"], files["all_nospecific"], files["best_nospecific"], row, args_ppi.score, id_file, files["id_list"], "nospecific", os.path.join(paths["all"], self.without_strain), os.path.join(paths["best"], self.without_strain), ptt) def _print_single_file(self, out_single, row_a, ptt, row): if row == "NA": out_single.write("\t".join( [ptt, "\t".join(row_a[:6]), "NA", "NA"]) + "\n") else: out_single.write("\t".join( [ptt, "\t".join(row_a[:6]), "\t".join(row)]) + "\n") def _merge_information(self, first_output, filename, out_all, out_best, row_a, score, id_file, id_folder, file_type, all_folder, best_folder, ptt): if os.path.getsize(filename) != 0: f_h = open(filename, "r") out_all_single = open(os.path.join( all_folder, ptt, "_".join([row_a[0], row_a[1] + ".csv"])), "w") out_best_single = open(os.path.join( best_folder, ptt, "_".join([row_a[0], row_a[1] + ".csv"])), "w") self._print_title(out_all_single, id_file, id_folder) self._print_title(out_best_single, id_file, id_folder) detect = False for row in csv.reader(f_h, delimiter="\t"): self._print_single_file(out_all_single, row_a, ptt, row) if first_output["_".join([file_type, "all"])]: first_output["_".join([file_type, "all"])] = False self._print_title(out_all, id_file, id_folder) out_all.write("\t".join([ptt, "\t".join(row_a[:6]), "\t".join(row)]) + "\n") if (float(row[1]) >= score): detect = True self._print_single_file(out_best_single, row_a, ptt, row) if first_output["_".join([file_type, "best"])]: first_output["_".join([file_type, "best"])] = False self._print_title(out_best, id_file, id_folder) out_best.write("\t".join([ptt, "\t".join(row_a[:6]), "\t".join(row)]) + "\n") f_h.close() if not detect: os.remove(os.path.join(best_folder, ptt, "_".join([row_a[0], row_a[1] + ".csv"]))) out_all_single.close() out_best_single.close() else: out_all_single = open(os.path.join( all_folder, ptt, "_".join([row_a[0], row_a[1] + ".csv"])), "w") self._print_title(out_all_single, id_file, id_folder) self._print_single_file(out_all_single, row_a, ptt, "NA") if first_output["_".join([file_type, "all"])]: first_output["_".join([file_type, "all"])] = False self._print_title(out_all, id_file, id_folder) out_all.write("\t".join([ptt, "\t".join(row_a), "NA", "NA"]) + "\n") out_all_single.close() def _detect_protein(self, strain_id, args_ppi): fh = open(os.path.join(args_ppi.ptts, strain_id["file"]), "r") genes = [] for row in csv.reader(fh, delimiter="\t"): if (len(row) == 1) and ("-" in row[0]) and (".." in row[0]): name = (row[0].split("-"))[0].strip().split(",")[0].strip() if ("all" in args_ppi.querys): if (len(row) > 1) and (row[0] != "Location"): genes.append({"strain": name, "locus_tag": row[4], "gene": row[5]}) else: for query in args_ppi.querys: datas = query.split(":") strain = datas[0] start = datas[1] end = datas[2] strand = datas[3] if (len(row) > 1) and (row[0] != "Location") and ( name == strain) and ( start == row[0].split("..")[0]) and ( end == row[0].split("..")[1]) and ( strand == row[1]): genes.append({"strain": name, "locus_tag": row[4], "gene": row[5]}) fh.close() return genes def _setup_nospecific(self, paths, strain_id, files): self._make_subfolder( paths["all"], self.without_strain, strain_id["ptt"]) self._make_subfolder( paths["best"], self.without_strain, strain_id["ptt"]) self._make_subfolder( paths["fig"], self.without_strain, strain_id["ptt"]) filename_nostrain = "_".join([strain_id["file"].replace(".ptt", ""), self.without_strain + ".csv"]) files["all_nospecific"] = open(os.path.join(paths["all"], filename_nostrain), "w") files["best_nospecific"] = open(os.path.join(paths["best"], filename_nostrain), "w") def _setup_folder_and_read_file(self, strain_id, pre_file, files, paths, args_ppi): if strain_id["file"].endswith(".ptt"): if strain_id["file"] != pre_file: self.helper.check_make_folder( "_".join([self.tmp_id, strain_id["file"]])) paths["all"] = os.path.join( self.all_result, strain_id["file"][:-4]) paths["best"] = os.path.join( self.best_result, strain_id["file"][:-4]) paths["fig"] = os.path.join( self.fig, strain_id["file"][:-4]) self.helper.check_make_folder( os.path.join(self.all_result, strain_id["file"][:-4])) self.helper.check_make_folder( os.path.join(self.best_result, strain_id["file"][:-4])) self.helper.check_make_folder( os.path.join(self.fig, strain_id["file"][:-4])) self._make_subfolder( paths["all"], self.with_strain, strain_id["ptt"]) self._make_subfolder( paths["best"], self.with_strain, strain_id["ptt"]) self._make_subfolder( paths["fig"], self.with_strain, strain_id["ptt"]) filename_strain = "_".join( [strain_id["file"].replace(".ptt", ""), self.with_strain + ".csv"]) files["all_specific"] = open(os.path.join( paths["all"], filename_strain), "w") files["best_specific"] = open(os.path.join( paths["best"], filename_strain), "w") if args_ppi.no_specific: self._setup_nospecific(paths, strain_id, files) files["id_list"] = "_".join([self.tmp_id, strain_id["file"]]) files["id_log"] = open(os.path.join(files["id_list"], self.tmp_files["log"]), "w") files["action_log"] = open(os.path.join(args_ppi.out_folder, self.tmp_files["action"]), "w") files["pubmed_log"] = open(os.path.join(args_ppi.out_folder, self.tmp_files["pubmed"]), "w") pre_file = strain_id["file"] if strain_id["file"] in os.listdir(args_ppi.ptts): genes = self._detect_protein(strain_id, args_ppi) else: self._make_folder_no_exist(os.path.join(paths["all"], self.with_strain), strain_id["ptt"]) self._make_folder_no_exist(os.path.join(paths["best"], self.with_strain), strain_id["ptt"]) if args_ppi.no_specific: self._make_folder_no_exist( os.path.join(paths["all"], self.without_strain), strain_id["ptt"]) self._make_folder_no_exist( os.path.join(paths["best"], self.without_strain), strain_id["ptt"]) else: print("Error: Wrong .ptt file!") sys.exit() return genes def _wget_actions(self, files, id_file, strain_id, out_folder, log): detect = False t_h = open(os.path.join(files["id_list"], id_file), "r") print("Retrieving STRING actions for {0} of {1} -- {2}".format( id_file, strain_id["string"], strain_id["file"])) for row in csv.reader(t_h, delimiter="\t"): if row[0].startswith("queryIndex"): continue else: detect = True if row[2] == strain_id["string"]: action_source = ("http://string-db.org/api/tsv/interaction_partners?" "identifier={0}&species={1}").format( row[1], row[2]) self._run_wget( action_source, self.tmp_files["wget_action"], files["action_log"], log) t_h.close() if not detect: log.write(id_file + " can not be found in STRING.\n") print("Warning: " + id_file + " can not be found in STRING!") return detect def _retrieve_actions(self, files, strain_id, paths, args_ppi, log): '''get the interaction of proteins''' log.write("Using STRING and PIE to retrieve the interaction " "information for {0}.\n".format(strain_id["ptt"])) for id_file in os.listdir(files["id_list"]): if id_file != self.tmp_files["log"]: detect_id = self._wget_actions(files, id_file, strain_id, args_ppi.out_folder, log) if detect_id: a_h = open(self.tmp_files["wget_action"], "r") pre_row = [] first = True detect = False first_output = {"specific_all": True, "specific_best": True, "nospecific_all": True, "nospecific_best": True} print("Retrieving Pubmed IDs for {0} of {1} -- {2}".format( id_file, strain_id["string"], strain_id["file"])) for row_a in csv.reader(a_h, delimiter="\t"): if row_a == []: print("No interaction can be detected") break if row_a[0].startswith("stringId_A"): continue else: detect = True if first: first = False score = row_a[5] else: if (row_a[0] != pre_row[0]) or ( row_a[1] != pre_row[1]): self._get_pubmed( pre_row, strain_id, score, id_file, first_output, strain_id["ptt"], files, paths, args_ppi, log) score = row_a[5] else: score = score + ";" + row_a[5] pre_row = row_a if detect: detect = False self._get_pubmed( row_a, strain_id, score, id_file, first_output, strain_id["ptt"], files, paths, args_ppi, log) self._list_files(args_ppi, paths, files, log) if detect_id: a_h.close() def _list_files(self, args_ppi, paths, files, log): log.write("The temporary files are generated and stored in the " "following folders:\n") if args_ppi.no_specific: folders = [files["id_list"], self.tmp_files["wget_action"], self.tmp_files["specific"], self.tmp_files["nospecific"]] else: folders = [files["id_list"], self.tmp_files["wget_action"], self.tmp_files["specific"]] for folder in folders: log.write("\t" + os.path.join(folder) + "\n") log.write("The files for storing the interaction information are " "generated and stored in the following folders:\n") for data in (paths["all"], paths["best"]): for files in os.listdir(data): if os.path.isdir(os.path.join(data, files)): for file_ in os.listdir(os.path.join(data, files)): log.write("\t" + os.path.join(data, files, file_) + "\n") log.write("The merged tables are generated:\n") for data in (paths["all"], paths["best"]): for files in os.listdir(data): if os.path.isfile(os.path.join(data, files)): log.write("\t" + os.path.join(data, files) + "\n") def _plot(self, args_ppi, files, log): log.write("Running plot_PPI.py to generate plots of PPI.\n") log.write("The figures of PPI networks are generated and stored in the " "following folders:\n") if args_ppi.no_specific: files["all_nospecific"].close() files["best_nospecific"].close() files["all_specific"].close() files["best_specific"].close() for folder in os.listdir(self.all_result): if folder in os.listdir(self.fig): print("Plotting {0}".format(folder)) out_folder_spe = os.path.join(self.fig, folder, self.with_strain) plot_ppi(os.path.join(self.all_result, folder, "_".join([folder, self.with_strain + ".csv"])), args_ppi.score, out_folder_spe, args_ppi.size) for file_ in os.listdir(out_folder_spe): log.write("\t" + os.path.join( out_folder_spe, file_) + "\n") if args_ppi.no_specific: out_folder_nospe = os.path.join(self.fig, folder, self.without_strain) plot_ppi(os.path.join(self.all_result, folder, "_".join([folder, self.without_strain + ".csv"])), args_ppi.score, out_folder_nospe, args_ppi.size) for file_ in os.listdir(out_folder_nospe): log.write("\t" + os.path.join( out_folder_nospe, file_) + "\n") def _remove_tmps(self, args_ppi): self.helper.remove_all_content(os.path.join(args_ppi.out_folder), "tmp", "file") self.helper.remove_all_content(os.path.join(args_ppi.out_folder), "tmp", "dir") for file_ in os.listdir(args_ppi.ptts): if file_.startswith("PPI_"): os.remove(os.path.join(args_ppi.ptts, file_)) self.helper.remove_all_content(os.path.join(args_ppi.out_folder), "temp", "dir") def check_query(self, args_ppi, log): if "all" not in args_ppi.querys: for query in args_ppi.querys: detect = False datas = query.split(":") for gff in os.listdir(args_ppi.ptts): gff_f = open(os.path.join(args_ppi.ptts, gff), "r") for entry in Gff3Parser().entries(gff_f): if (entry.seq_id == datas[0]) and ( entry.start == int(datas[1])) and ( entry.end == int(datas[2])) and ( entry.strand == datas[3]): detect = True break if not detect: log.write(query + " is not found in gff file.\n") print("Error: {0} is not found in gff file!".format(query)) sys.exit() def retrieve_ppi_network(self, args_ppi, log): '''retrieve PPI from STRING with PIE and draw network''' strain_ids = [] paths = {} files = {} self.check_query(args_ppi, log) log.write("Running converter.py to generate ptt and rnt files.\n") log.write("The following files are generated:\n") for strain in args_ppi.strains: datas = strain.split(":") ptt_file = "PPI_" + datas[0].replace(".gff", ".ptt") rnt_file = "PPI_" + datas[0].replace(".gff", ".rnt") self.converter.convert_gff2rntptt( os.path.join(args_ppi.ptts, datas[0]), datas[1], "0", os.path.join(args_ppi.ptts, ptt_file), os.path.join(args_ppi.ptts, rnt_file), None, None) strain_ids.append({"file": ptt_file, "ptt": datas[1], "string": datas[2], "pie": datas[3]}) log.write("\t" + os.path.join(args_ppi.ptts, ptt_file) + "\n") log.write("\t" + os.path.join(args_ppi.ptts, rnt_file) + "\n") strain_ids.sort(key=lambda x: x["file"]) pre_file = "" for strain_id in strain_ids: genes = self._setup_folder_and_read_file(strain_id, pre_file, files, paths, args_ppi) s_h = open(args_ppi.species, "r") for row in csv.reader(s_h, delimiter="\t"): if row[0] != "##": if row[0] == strain_id["string"]: break elif row[2] == strain_id["string"]: strain_id["string"] = row[0] break elif row[3] == strain_id["string"]: strain_id["string"] = row[0] break self._retrieve_id(strain_id, genes, files, log) self._retrieve_actions(files, strain_id, paths, args_ppi, log) self._plot(args_ppi, files, log) self._remove_tmps(args_ppi)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/ppi.py
ppi.py
import os, gc import sys import shutil import time from subprocess import call, Popen from annogesiclib.multiparser import Multiparser from annogesiclib.helper import Helper from annogesiclib.sRNA_intergenic import intergenic_srna from annogesiclib.TSS_upstream import upstream from annogesiclib.sRNA_utr_derived import utr_derived_srna from annogesiclib.merge_sRNA import merge_srna_gff from annogesiclib.merge_sRNA import merge_srna_table from annogesiclib.extract_sRNA_info import extract_energy, extract_blast from annogesiclib.plot_mountain import plot_mountain_plot from annogesiclib.sRNA_class import classify_srna from annogesiclib.gen_srna_output import gen_srna_table, gen_best_srna from annogesiclib.blast_class import blast_class from annogesiclib.compare_sRNA_sORF import srna_sorf_comparison from annogesiclib.change_db_format import change_format from annogesiclib.compare_srna_term import compare_srna_term from annogesiclib.compare_srna_promoter import compare_srna_promoter from annogesiclib.print_rank_all import print_rank_all from annogesiclib.sRNA_filter_frag import filter_frag from annogesiclib.sRNA_filter_min_utr import filter_utr from annogesiclib.sRNA_antisense import srna_antisense from annogesiclib.args_container import ArgsContainer from annogesiclib.lib_reader import read_wig, read_libs from annogesiclib.extract_sec_info import extract_info_sec, modify_header from annogesiclib.get_srna_poly_u import get_srna_poly_u from annogesiclib.reorganize_table import reorganize_table from annogesiclib.check_srna_overlap import check_overlap class sRNADetection(object): '''detection of sRNA''' def __init__(self, args_srna): self.args_container = ArgsContainer() self.helper = Helper() self.multiparser = Multiparser() self.gff_output = os.path.join(args_srna.out_folder, "gffs") self.table_output = os.path.join(args_srna.out_folder, "tables") self.stat_path = os.path.join(args_srna.out_folder, "statistics") self.tss_path = self._check_folder_exist(args_srna.tss_folder) self.pro_path = self._check_folder_exist(args_srna.pro_folder) self.sorf_path = self._check_folder_exist(args_srna.sorf_file) self.fasta_path = self._check_folder_exist(args_srna.fastas) self.tran_path = os.path.join(args_srna.trans, "tmp") self.term_path = self._check_folder_exist(args_srna.terms) self.merge_wigs = os.path.join(args_srna.out_folder, "merge_wigs") self.prefixs = {"merge": os.path.join( args_srna.out_folder, "tmp_merge"), "utr": os.path.join( args_srna.out_folder, "tmp_utrsrna"), "normal": os.path.join( args_srna.out_folder, "tmp_normal"), "in_cds": os.path.join( args_srna.out_folder, "tmp_incds"), "merge_table": os.path.join( args_srna.out_folder, "tmp_merge_table"), "utr_table": os.path.join( args_srna.out_folder, "tmp_utrsrna_table"), "normal_table": os.path.join( args_srna.out_folder, "tmp_normal_table"), "in_cds_table": os.path.join( args_srna.out_folder, "tmp_incds_table"), "basic": os.path.join( args_srna.out_folder, "tmp_basic"), "energy": os.path.join( args_srna.out_folder, "tmp_energy")} self.tmps = {"nr": os.path.join(args_srna.out_folder, "tmp_nr"), "srna": os.path.join(args_srna.out_folder, "tmp_sRNA")} self.best_table = os.path.join(self.table_output, "best_candidates") self.table_output = os.path.join(args_srna.out_folder, "tables") self.stat_path = os.path.join(args_srna.out_folder, "statistics") self.all_best = {"all_gff": os.path.join( self.gff_output, "all_candidates"), "best_gff": os.path.join(self.gff_output, "best_candidates"), "all_table": os.path.join( self.table_output, "all_candidates"), "best_table": os.path.join(self.table_output, "best_candidates")} def _check_folder_exist(self, folder): if folder is not None: path = os.path.join(folder, "tmp") else: path = None return path def _check_gff(self, gffs): for gff in os.listdir(gffs): if gff.endswith(".gff"): self.helper.check_uni_attributes(os.path.join(gffs, gff)) def _run_format(self, blastdb, database, type_, db_file, err, log): log.write("Please make sure the version of BLAST+ is at least 2.2.28+.\n") log.write(" ".join([blastdb, "-in", database, "-dbtype", type_, "-out", db_file]) + "\n") call([blastdb, "-in", database, "-dbtype", type_, "-out", db_file], stderr=err) log.write("Done!\n") def _wait_process(self, processes): for p in processes: p.wait() if p.stdout: p.stdout.close() if p.stdin: p.stdin.close() if p.stderr: p.stderr.close() try: p.kill() except OSError: pass time.sleep(5) def _formatdb(self, database, type_, out_folder, blastdb, database_type, log): err = open(os.path.join(out_folder, "log.txt"), "w") if database_type == "sRNA": change_format(database, "tmp_srna_database") os.remove(database) shutil.move("tmp_srna_database", database) log.write("Formating sRNA database.\n") else: log.write("Formating nr database.\n") db_file = ".".join(database.split(".")[:-1]) self._run_format(blastdb, database, type_, db_file, err, log) err.close() if (database.endswith(".fa")) or ( database.endswith(".fna")) or ( database.endswith(".fasta")): database = ".".join(database.split(".")[:-1]) return database def _merge_frag_tex_file(self, files, args_srna): '''merge the results of fragmented and tex treated libs''' if (args_srna.frag_wigs is not None) and ( args_srna.tex_wigs is not None): self.helper.merge_file(files["frag_gff"], files["tex_gff"]) self.helper.merge_file(files["frag_csv"], files["tex_csv"]) shutil.move(files["tex_csv"], files["merge_csv"]) self.helper.sort_gff(files["tex_gff"], files["merge_gff"]) os.remove(files["frag_csv"]) os.remove(files["frag_gff"]) os.remove(files["tex_gff"]) elif (args_srna.frag_wigs is not None): shutil.move(files["frag_csv"], files["merge_csv"]) self.helper.sort_gff(files["frag_gff"], files["merge_gff"]) os.remove(files["frag_gff"]) elif (args_srna.tex_wigs is not None): shutil.move(files["tex_csv"], files["merge_csv"]) self.helper.sort_gff(files["tex_gff"], files["merge_gff"]) def _read_lib_wig(self, args_srna): libs, texs = read_libs(args_srna.input_libs, args_srna.wig_folder) wigs_f = read_wig(args_srna.wig_f_file, "+", libs) wigs_r = read_wig(args_srna.wig_r_file, "-", libs) return [libs, texs, wigs_f, wigs_r] def _run_normal(self, prefix, gff, tran, fuzzy_tss, args_srna, log): '''detection of intergenic and antisense sRNA''' tex_datas = None frag_datas = None if "tmp_cutoff_inter" in os.listdir(args_srna.out_folder): os.remove(os.path.join(args_srna.out_folder, "tmp_cutoff_inter")) files = {"frag_gff": None, "frag_csv": None, "tex_gff": None, "tex_csv": None, "merge_gff": None, "merge_csv": None} if self.tss_path is not None: if ("TSS_classes" in os.listdir(args_srna.out_folder)) and ( not args_srna.source): tss = os.path.join(args_srna.out_folder, "TSS_classes", prefix + "_TSS.gff") else: tss = self.helper.get_correct_file(self.tss_path, "_TSS.gff", prefix, None, None) else: tss = None if self.pro_path is not None: pro = self.helper.get_correct_file( self.pro_path, "_processing.gff", prefix, None, None) else: pro = None if args_srna.frag_wigs is not None: files["frag_gff"] = os.path.join( args_srna.out_folder, "_".join(["tmp_frag", prefix])) files["frag_csv"] = os.path.join( args_srna.out_folder, "_".join(["tmp_frag_table", prefix])) args_srna = self.args_container.container_intersrna( "frag", files, args_srna, prefix, os.path.join(args_srna.gffs, gff), tran, tss, pro, fuzzy_tss) frag_datas = self._read_lib_wig(args_srna) log.write("Running sRNA_intergenic.py to detecting intergenic " "sRNA for {0} based on fragmented libs.\n".format(prefix)) intergenic_srna(args_srna, frag_datas[0], frag_datas[1], frag_datas[2], frag_datas[3], tss) if args_srna.tex_wigs is not None: files["tex_gff"] = os.path.join( args_srna.out_folder, "_".join(["tmp_tex", prefix])) files["tex_csv"] = os.path.join( args_srna.out_folder, "_".join(["tmp_tex_table", prefix])) args_srna = self.args_container.container_intersrna( "tex", files, args_srna, prefix, os.path.join(args_srna.gffs, gff), tran, tss, pro, fuzzy_tss) tex_datas = self._read_lib_wig(args_srna) log.write("Running sRNA_intergenic.py to detecting intergenic " "sRNA for {0} based on dRNA-Seq libs.\n".format(prefix)) intergenic_srna(args_srna, tex_datas[0], tex_datas[1], tex_datas[2], tex_datas[3], tss) files["merge_csv"] = "_".join([self.prefixs["normal_table"], prefix]) files["merge_gff"] = "_".join([self.prefixs["normal"], prefix]) self._merge_frag_tex_file(files, args_srna) return tss, frag_datas, tex_datas def _run_utrsrna(self, gff, tran, prefix, tss, pro, args_srna, frag_datas, tex_datas, log): '''detection of UTR-derived sRNA''' if "tmp_median" in os.listdir(args_srna.out_folder): os.remove(os.path.join(args_srna.out_folder, "tmp_median")) files = {"frag_gff": None, "frag_csv": None, "tex_gff": None, "tex_csv": None, "merge_gff": None, "merge_csv": None} if args_srna.tex_wigs is not None: files["tex_gff"] = os.path.join( args_srna.out_folder, "_".join(["tmp_utr_tex", prefix])) files["tex_csv"] = os.path.join( args_srna.out_folder, "_".join(["tmp_utr_tex_table", prefix])) args_srna = self.args_container.container_utrsrna( os.path.join(args_srna.gffs, gff), tran, tss, files, pro, os.path.join(self.fasta_path, prefix + ".fa"), "tex", prefix, args_srna) log.write("Running sRNA_utr_derived.py to detect UTR-derived " "sRNAs for {0} based on dRNA-Seq data.\n".format(prefix)) utr_derived_srna(args_srna, tex_datas[0], tex_datas[1], tex_datas[2], tex_datas[3]) if args_srna.frag_wigs is not None: files["frag_gff"] = os.path.join( args_srna.out_folder, "_".join(["tmp_utr_frag", prefix])) files["frag_csv"] = os.path.join( args_srna.out_folder, "_".join(["tmp_utr_frag_table", prefix])) args_srna = self.args_container.container_utrsrna( os.path.join(args_srna.gffs, gff), tran, tss, files, pro, os.path.join(self.fasta_path, prefix + ".fa"), "frag", prefix, args_srna) log.write("Running sRNA_utr_derived.py to detect UTR-derived " "sRNAs for {0} based on fragmented libs.\n".format(prefix)) utr_derived_srna(args_srna, frag_datas[0], frag_datas[1], frag_datas[2], frag_datas[3]) files["merge_csv"] = "_".join([self.prefixs["utr_table"], prefix]) files["merge_gff"] = "_".join([self.prefixs["utr"], prefix]) self._merge_frag_tex_file(files, args_srna) log.write("Running sRNA_filter_min_utr.py to filter out the " "UTR-derived sRNAs which length is too short.\n") filter_utr(files["merge_gff"], files["merge_csv"], args_srna.min_utr) def _check_database(self, formatdb, database): if formatdb: if (database.endswith(".fa")) or ( database.endswith(".fna")) or ( database.endswith(".fasta")): return database else: folders = database.split("/") filename = folders[-1] folder = "/".join(folders[:-1]) for fasta in os.listdir(folder): if (fasta.endswith(".fa")) or ( fasta.endswith(".fna")) or ( fasta.endswith(".fasta")): if ".".join(fasta.split(".")[:-1]) == filename: database = os.path.join(folder, fasta) return database else: return database print("Error: The nr database or sRNA database is not in fasta " "format or the file name does not end with " ".fa or .fna or .fasta!") sys.exit() def _check_necessary_file(self, args_srna, log): if (args_srna.gffs is None) or (args_srna.trans is None) or ( (args_srna.tex_wigs is None) and ( args_srna.frag_wigs is None)): print("Error: Lack required files!") log.write("The annotation gff files, transcipt files, or wiggle " "files do not be assigned.\n") sys.exit() if args_srna.utr_srna: if (args_srna.tss_folder is None): print("Error: Lack required TSS files for UTR " "derived sRNA detection!") log.write("TSS files are required for detecting UTR-derived " "sRNAs.\n") sys.exit() if (args_srna.pro_folder is None): print("Warning: Lack Processing site files for UTR " "derived sRNA detection!") print("It may affect the results!") self._check_gff(args_srna.gffs) self._check_gff(args_srna.trans) args_srna.nr_database = self._check_database(args_srna.nr_format, args_srna.nr_database) args_srna.srna_database = self._check_database(args_srna.srna_format, args_srna.srna_database) if args_srna.tss_folder is not None: self._check_gff(args_srna.tss_folder) self.multiparser.parser_gff(args_srna.tss_folder, "TSS") self.multiparser.combine_gff(args_srna.gffs, self.tss_path, None, "TSS") if args_srna.pro_folder is not None: self._check_gff(args_srna.pro_folder) self.multiparser.parser_gff(args_srna.pro_folder, "processing") self.multiparser.combine_gff(args_srna.gffs, self.pro_path, None, "processing") if args_srna.sorf_file is not None: self._check_gff(args_srna.sorf_file) self.multiparser.parser_gff(args_srna.sorf_file, "sORF") self.multiparser.combine_gff(args_srna.gffs, self.sorf_path, None, "sORF") if args_srna.import_info is not None: if args_srna.utr_srna or ("sec_str" in args_srna.import_info) or ( args_srna.nr_database is not None) or ( args_srna.srna_database is not None): if args_srna.fastas is None: print("Error: Fasta file is not assinged!") log.write("Fasta file is not assinged.\n") sys.exit() if args_srna.terms is not None: self._check_gff(args_srna.terms) self.multiparser.parser_gff(args_srna.terms, "term") self.multiparser.combine_gff(args_srna.gffs, self.term_path, None, "term") else: self.term_path = None def _merge_tex_frag_datas(self, tex_datas, frag_datas): if (tex_datas is not None) and (frag_datas is not None): for index in [2, 3]: for strain, conds in frag_datas[index].items(): if strain not in tex_datas[index].keys(): tex_datas[index][strain] = conds else: for cond, tracks in conds.items(): tex_datas[index][strain][cond] = tracks elif (tex_datas is None) and (frag_datas is not None): tex_datas = frag_datas return tex_datas def _run_program(self, args_srna, log): prefixs = [] tss = None for gff in os.listdir(args_srna.gffs): if gff.endswith(".gff"): prefix = gff.replace(".gff", "") prefixs.append(prefix) tran = self.helper.get_correct_file( self.tran_path, "_transcript.gff", prefix, None, None) gffs = {"merge": "_".join([self.prefixs["merge"], prefix]), "utr": "_".join([self.prefixs["utr"], prefix]), "normal": "_".join([self.prefixs["normal"], prefix])} csvs = {"merge": "_".join([ self.prefixs["merge_table"], prefix]), "utr": "_".join([self.prefixs["utr_table"], prefix]), "normal": "_".join([ self.prefixs["normal_table"], prefix])} if not args_srna.source: if "TSS_classes" not in os.listdir(args_srna.out_folder): os.mkdir(os.path.join(args_srna.out_folder, "TSS_classes")) print("Classifying TSSs of {0}".format(prefix)) upstream(os.path.join(self.tss_path, prefix + "_TSS.gff"), None, os.path.join(args_srna.gffs, prefix + ".gff"), os.path.join(args_srna.out_folder, "TSS_classes", "_".join([prefix, "TSS.gff"])), args_srna, prefix) print("Running sRNA detection of {0}".format(prefix)) tss, frag_datas, tex_datas = self._run_normal( prefix, gff, tran, args_srna.fuzzy_tsss["inter"], args_srna, log) if args_srna.utr_srna: print("Running UTR derived sRNA detection of {0}".format( prefix)) if tss is None: tss = self.helper.get_correct_file( self.tss_path, "_TSS.gff", prefix, None, None) if self.pro_path is not None: pro = self.helper.get_correct_file( self.pro_path, "_processing.gff", prefix, None, None) else: pro = None if tss is not None: self._run_utrsrna(gff, tran, prefix, tss, pro, args_srna, frag_datas, tex_datas, log) tex_datas = self._merge_tex_frag_datas(tex_datas, frag_datas) del frag_datas gc.collect() self._merge_srna(args_srna, gffs, csvs, prefix, os.path.join(args_srna.gffs, gff), tss, tex_datas) del tex_datas filter_frag(csvs["merge"], gffs["merge"]) self.helper.sort_gff(gffs["merge"], "_".join([self.prefixs["basic"], prefix])) log.write("\t" + "_".join([self.prefixs["basic"], prefix]) + " is generated to temporary store sRNA candidates.\n") log.write("\t" + csvs["merge"] + " is generated to temporary store " "the detail information of sRNA candidates.\n") return prefixs def _merge_srna(self, args_srna, gffs, csvs, prefix, gff_file, tss, tex_datas): print("Merging data of sRNA") merge_srna_gff(gffs, args_srna.in_cds, args_srna.cutoff_overlap, gff_file, args_srna.ex_srna) merge_srna_table(gffs["merge"], csvs, tex_datas[2], tex_datas[3], tss, args_srna) def _run_RNAfold(self, seq_file, rnafold, sec_file, log): log.write("Running RNAfold to predict secondary structure.\n") log.write("Please make sure the version of Vienna Packge is at " "least 2.3.2.\n") log.write(" ".join(["cat", seq_file, "|", rnafold, "-p", ">", sec_file]) + "\n") os.system(" ".join(["cat", seq_file, "|", rnafold, "-p", ">", sec_file])) log.write("Done!\n") log.write("\t" + sec_file + " is generated.\n") def _get_seq_sec(self, fasta_path, out_folder, prefix, sec_path, dot_path, rnafold, log): '''extract the sec str energy''' detect = False for fasta in os.listdir(fasta_path): if fasta.endswith(".fa") and ( ".".join(fasta.split(".")[:-1]) == prefix): detect = True break if detect: detect = False seq_file = os.path.join(out_folder, "_".join(["sRNA_seq", prefix])) sec_file = os.path.join(out_folder, "_".join(["sRNA_2d", prefix])) index_file = os.path.join(out_folder, "_".join( ["sRNA_index", prefix])) log.write("Running helper.py to get the sequences of sRNA for " "{0}.\n".format(prefix)) self.helper.get_seq("_".join([self.prefixs["basic"], prefix]), os.path.join(fasta_path, fasta), index_file) modify_header(seq_file, index_file) log.write("\t" + seq_file + " is generated.\n") else: print("Error: There is not fasta file of {0}!".format(prefix)) print("Please check your imported information.") log.write("No fasta file of {0}.\n".format(prefix)) sys.exit() tmp_path = os.path.join(out_folder, "tmp_srna") self.helper.check_make_folder(tmp_path) main_path = os.getcwd() os.chdir(tmp_path) sec_file = os.path.join(main_path, sec_file) seq_file = os.path.join(main_path, seq_file) index_file = os.path.join(main_path, index_file) tmp_sec_path = os.path.join(main_path, sec_path) tmp_dot_path = os.path.join(main_path, dot_path) self._run_RNAfold(seq_file, rnafold, sec_file, log) extract_info_sec(sec_file, seq_file, index_file) os.remove(index_file) log.write("Running extract_sRNA_info.py to extract the energy " "information for {0}.\n".format(prefix)) extract_energy(os.path.join(main_path, "_".join([self.prefixs["basic"], prefix])), sec_file, os.path.join(main_path, "_".join([self.prefixs["energy"], prefix]))) log.write("\t" + os.path.join(main_path, "_".join([ self.prefixs["energy"], prefix])) + " is generated to temporary " "store energy information.\n") for ps in os.listdir(os.getcwd()): new_ps = ps.replace("|", "_") shutil.move(ps, new_ps) return {"sec": tmp_sec_path, "dot": tmp_dot_path, "main": main_path, "tmp": os.path.join(main_path, tmp_path)} def _run_replot(self, relplot_pl, tmp_paths, file_, dot_file, rel_file, log): log.write(" ".join([relplot_pl, os.path.join(tmp_paths["tmp"], file_), os.path.join(tmp_paths["tmp"], dot_file), ">", os.path.join(tmp_paths["tmp"], rel_file)]) + "\n") os.system(" ".join([relplot_pl, os.path.join(tmp_paths["tmp"], file_), os.path.join(tmp_paths["tmp"], dot_file), ">", os.path.join(tmp_paths["tmp"], rel_file)])) def _replot_sec(self, relplot_pl, tmp_paths, prefix, log): log.write("Running relplot.pl for {0}.\n".format(prefix)) for file_ in os.listdir(os.getcwd()): if file_.endswith("ss.ps"): dot_file = file_.replace("ss.ps", "dp.ps") rel_file = file_.replace("ss.ps", "rss.ps") print("Relplotting {0}".format(file_)) self._run_replot(relplot_pl, tmp_paths, file_, dot_file, rel_file, log) log.write("Done!\n") os.mkdir(os.path.join(tmp_paths["sec"], prefix)) os.mkdir(os.path.join(tmp_paths["dot"], prefix)) self.helper.move_all_content( tmp_paths["tmp"], os.path.join(tmp_paths["sec"], prefix), ["rss.ps"]) self.helper.move_all_content( tmp_paths["tmp"], os.path.join(tmp_paths["dot"], prefix), ["dp.ps"]) log.write("All plots are stored in {0} and {1}.\n".format( os.path.join(tmp_paths["sec"], prefix), os.path.join(tmp_paths["dot"], prefix)) + "\n") def _run_mountain(self, mountain_pl, dot_path, dot_file, out, log): log.write(" ".join([mountain_pl, os.path.join(dot_path, dot_file)]) + "\n") call([mountain_pl, os.path.join(dot_path, dot_file)], stdout=out) def _plot_mountain(self, mountain, moun_path, tmp_paths, prefix, mountain_pl, log): if mountain: tmp_moun_path = os.path.join(tmp_paths["main"], moun_path) os.mkdir(os.path.join(tmp_moun_path, prefix)) txt_path = os.path.join(tmp_paths["tmp"], "tmp_txt") self.helper.check_make_folder(txt_path) print("Generating mountain plots of {0}".format(prefix)) dot_path = os.path.join(tmp_paths["dot"], prefix) log.write("Running mountain.pl for {0}.\n".format(prefix)) for dot_file in os.listdir(dot_path): if dot_file.endswith("dp.ps"): moun_txt = os.path.join(tmp_paths["tmp"], "mountain.txt") out = open(moun_txt, "w") moun_file = dot_file.replace("dp.ps", "mountain.pdf") print("Generating {0}".format(moun_file)) self._run_mountain(mountain_pl, dot_path, dot_file, out, log) plot_mountain_plot(moun_txt, moun_file) shutil.move(moun_file, os.path.join(tmp_moun_path, prefix, moun_file)) out.close() os.remove(moun_txt) log.write("Done!\n") log.write("All plots are stored in {0}.".format( os.path.join(tmp_moun_path, prefix))) def _compute_2d_and_energy(self, args_srna, prefixs, log): print("Running energy calculation") moun_path = os.path.join(args_srna.out_folder, "figs", "mountain_plots") sec_path = os.path.join(args_srna.out_folder, "figs", "sec_plots") dot_path = os.path.join(args_srna.out_folder, "figs", "dot_plots") self.helper.remove_all_content(sec_path, None, "dir") self.helper.remove_all_content(dot_path, None, "dir") self.helper.remove_all_content(moun_path, None, "dir") for prefix in prefixs: tmp_paths = self._get_seq_sec( self.fasta_path, args_srna.out_folder, prefix, sec_path, dot_path, args_srna.rnafold, log) self._replot_sec(args_srna.relplot_pl, tmp_paths, prefix, log) self._plot_mountain(args_srna.mountain, moun_path, tmp_paths, prefix, args_srna.mountain_pl, log) os.chdir(tmp_paths["main"]) shutil.move("_".join([self.prefixs["energy"], prefix]), "_".join([self.prefixs["basic"], prefix])) log.write("_".join([self.prefixs["basic"], prefix]) + " is updated, " "and " + "_".join([self.prefixs["energy"], prefix]) + " is deleted.\n") shutil.rmtree(os.path.join(args_srna.out_folder, "tmp_srna")) def _run_blast(self, program, database, e, seq_file, blast_file, strand, para_num, processes, log): if para_num == 1: log.write(" ".join([program, "-db", database, "-evalue", str(e), "-strand", strand, "-query", seq_file, "-out", blast_file]) + "\n") call([program, "-db", database, "-evalue", str(e), "-strand", strand, "-query", seq_file, "-out", blast_file]) else: log.write(" ".join([program, "-db", database, "-evalue", str(e), "-strand", strand, "-query", seq_file, "-out", blast_file]) + "\n") p = Popen([program, "-db", database, "-evalue", str(e), "-strand", strand, "-query", seq_file, "-out", blast_file]) processes.append(p) def _run_para_blast(self, program, database, e, seq_file, blast_file, strand, paras, log): srnas = {} with open(seq_file) as fh: for line in fh: line = line.strip() if line.startswith(">"): name = line srnas[name] = "" else: srnas[name] = line file_num = int(len(srnas) / paras) processes = [] if (file_num == 0) or (paras == 1): self._run_blast(program, database, e, seq_file, blast_file, strand, 1, processes, log) else: cur_para = 0 line_num = 0 first = True seq_files = [] log.write("{0} is splited to {1} subset files.\n".format( seq_file, paras)) for name, seq in srnas.items(): if (line_num >= file_num) or first: if (not first) and (cur_para < paras): out.close() first = False if cur_para < paras: out = open("_".join([seq_file, str(cur_para)]), "w") seq_files.append("_".join([seq_file, str(cur_para)])) line_num = 0 cur_para += 1 if line_num < file_num: out.write(name + "\n") out.write(seq + "\n") if (cur_para == paras) and (line_num >= file_num): out.write(name + "\n") out.write(seq + "\n") line_num += 1 out.close() for para in range(paras): self._run_blast( program, database, e, "_".join([seq_file, str(para)]), "_".join([blast_file, strand, str(para)]), strand, paras, processes, log) self._wait_process(processes) for para in range(paras): cur_blast_file = "_".join([blast_file, strand, str(para)]) self.helper.merge_file(cur_blast_file, blast_file) os.remove(cur_blast_file) for file_ in seq_files: os.remove(file_) log.write("Done!\n") if (os.path.exists(blast_file)): log.write("\t" + blast_file + " is generated.\n") def _get_strand_fasta(self, seq_file, out_folder): tmp_plus = os.path.join(out_folder, "tmp_plus.fa") tmp_minus = os.path.join(out_folder, "tmp_minus.fa") out_p = open(tmp_plus, "w") out_m = open(tmp_minus, "w") strand = "" with open(seq_file) as sh: for line in sh: line = line.strip() if line.startswith(">"): if line[-1] == "+": out_p.write(line + "\n") strand = "plus" elif line[-1] == "-": out_m.write(line + "\n") strand = "minus" else: if strand == "plus": out_p.write(line + "\n") elif strand == "minus": out_m.write(line + "\n") out_p.close() out_m.close() return tmp_plus, tmp_minus def _blast(self, database, database_format, data_type, args_srna, prefixs, program, database_type, e, filters, log): if (database is None): log.write(" No database was assigned!\n") print("Error: No database was assigned!") else: if database_format: database = self._formatdb(database, data_type, args_srna.out_folder, args_srna.blastdb, database_type, log) for prefix in prefixs: blast_file = os.path.join( args_srna.out_folder, "blast_results_and_misc", "_".join([database_type, "blast", prefix + ".txt"])) if os.path.exists(blast_file): os.remove(blast_file) srna_file = "_".join([self.prefixs["basic"], prefix]) out_file = os.path.join( args_srna.out_folder, "_".join(["tmp", database_type, prefix])) print("Running Blast of {0} in {1}".format(prefix, database)) seq_file = os.path.join( args_srna.out_folder, "_".join(["sRNA_seq", prefix])) if (seq_file not in os.listdir(args_srna.out_folder)) or (( database_type == "nr") and ("sec_str" in filters)): log.write("Running helper.py to extract the sequences " "of sRNAs.\n") self.helper.get_seq( srna_file, os.path.join(self.fasta_path, prefix + ".fa"), seq_file) log.write("\t" + seq_file + " is generated.\n") if database_type == "nr": log.write("Running BLAST+ for nr database for {0}.".format( prefix)) log.write("Make sure the version of BLAST+ is at least 2.2.28+.\n") tmp_plus, tmp_minus = self._get_strand_fasta( seq_file, args_srna.out_folder) tmp_blast = os.path.join(args_srna.out_folder, "blast_results_and_misc", "tmp_blast.txt") if os.path.exists(tmp_blast): os.remove(tmp_blast) self._run_para_blast(program, database, e, tmp_plus, tmp_blast, "plus", args_srna.para_blast, log) self._run_para_blast(program, database, e, tmp_minus, blast_file, "minus", args_srna.para_blast, log) self.helper.merge_file(tmp_blast, blast_file) os.remove(tmp_plus) os.remove(tmp_minus) else: log.write("Running BLAST+ for sRNA database for {0}.".format( prefix)) log.write("Make sure the version of BLAST+ is at least 2.2.28+.\n") self._run_para_blast(program, database, e, seq_file, blast_file, "both", args_srna.para_blast, log) log.write("Running extract_sRNA_info.py to extract BLAST " "information.\n") extract_blast(blast_file, srna_file, out_file, out_file + ".csv", database_type, args_srna.blast_score_s, args_srna.blast_score_n) log.write(srna_file + " is updated.\n") shutil.move(out_file, srna_file) def _class_srna(self, prefixs, args_srna, log): '''classify the sRNA based on the filters''' if (args_srna.import_info is not None) or ( args_srna.srna_database is not None) or ( args_srna.nr_database is not None) or ( self.sorf_path is not None) or ( self.tss_path is not None) or ( self.term_path is not None) or ( args_srna.promoter_table is not None): log.write("Running sRNA_class.py to classify sRNAs based on " "input files and --filter_info.\n") log.write("The following files are generated:\n") for prefix in prefixs: print("Classifying sRNA of {0}".format(prefix)) class_gff = os.path.join(self.gff_output, "for_classes") class_table = os.path.join(self.table_output, "for_classes") self.helper.check_make_folder(os.path.join(class_table, prefix)) self.helper.check_make_folder(os.path.join(class_gff, prefix)) class_gff = os.path.join(class_gff, prefix) class_table = os.path.join(class_table, prefix) self.helper.check_make_folder(class_table) self.helper.check_make_folder(class_gff) out_stat = os.path.join( self.stat_path, "_".join([ "stat_sRNA_class", prefix + ".csv"])) classify_srna(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])), class_gff, out_stat, args_srna) log.write("\t" + out_stat + "\n") for srna in os.listdir(class_gff): out_table = os.path.join( class_table, srna.replace(".gff", ".csv")) gen_srna_table( os.path.join(class_gff, srna), "_".join([self.prefixs["merge_table"], prefix]), "_".join([self.tmps["nr"], prefix + ".csv"]), "_".join([self.tmps["srna"], prefix + ".csv"]), args_srna, out_table, self.term_path) for folder in (class_gff, class_table): for file_ in os.listdir(folder): log.write("\t" + os.path.join(folder, file_) + "\n") def _get_best_result(self, prefixs, args_srna, log): '''get the best results based on the filters''' log.write("Running gen_srna_output to select the best candidates.\n") log.write("The following files are generated:\n") for prefix in prefixs: best_gff = os.path.join(self.all_best["best_gff"], "_".join([prefix, "sRNA.gff"])) best_table = os.path.join(self.all_best["best_table"], "_".join([prefix, "sRNA.csv"])) gen_best_srna(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])), best_gff, args_srna) gen_srna_table(os.path.join(self.all_best["best_gff"], "_".join([prefix, "sRNA.gff"])), "_".join([self.prefixs["merge_table"], prefix]), "_".join([self.tmps["nr"], prefix + ".csv"]), "_".join([self.tmps["srna"], prefix + ".csv"]), args_srna, best_table, self.term_path) log.write("\t" + best_gff + "\n") log.write("\t" + best_table + "\n") def _remove_file(self, args_srna): self.helper.remove_all_content(args_srna.out_folder, "tmp_", "dir") self.helper.remove_all_content(args_srna.out_folder, "tmp_", "file") self.helper.remove_tmp_dir(args_srna.fastas) self.helper.remove_tmp_dir(args_srna.gffs) self.helper.remove_tmp(self.gff_output) if "temp_wig" in os.listdir(args_srna.out_folder): shutil.rmtree(os.path.join(args_srna.out_folder, "temp_wig")) if (args_srna.frag_wigs is not None) and ( args_srna.tex_wigs is not None): shutil.rmtree(args_srna.merge_wigs) self.helper.remove_tmp_dir(args_srna.trans) if args_srna.tss_folder is not None: self.helper.remove_tmp_dir(args_srna.tss_folder) if args_srna.pro_folder is not None: self.helper.remove_tmp_dir(args_srna.pro_folder) if args_srna.sorf_file is not None: self.helper.remove_tmp_dir(args_srna.sorf_file) if "tmp_median" in os.listdir(args_srna.out_folder): os.remove(os.path.join(args_srna.out_folder, "tmp_median")) if self.term_path is not None: self.helper.remove_tmp_dir(args_srna.terms) tmp_blast = os.path.join(args_srna.out_folder, "blast_results_and_misc", "tmp_blast.txt") if os.path.exists(tmp_blast): os.remove(tmp_blast) def _filter_srna(self, args_srna, prefixs, log): '''set the filter of sRNA''' if args_srna.compute_sec_str: self._compute_2d_and_energy(args_srna, prefixs, log) if args_srna.nr_database is not None: self._blast(args_srna.nr_database, args_srna.nr_format, "prot", args_srna, prefixs, args_srna.blastx, "nr", args_srna.e_nr, args_srna.import_info, log) if self.sorf_path is not None: for prefix in prefixs: if ("_".join([prefix, "sORF.gff"]) in os.listdir(self.sorf_path)): tmp_srna = os.path.join(args_srna.out_folder, "".join(["tmp_srna_sorf", prefix])) tmp_sorf = os.path.join(args_srna.out_folder, "".join(["tmp_sorf_srna", prefix])) log.write("Running compare_sRNA_sORF.py to compare sRNAs " "and sORFs.\n") srna_sorf_comparison( "_".join([self.prefixs["basic"], prefix]), os.path.join(self.sorf_path, "_".join([prefix, "sORF.gff"])), tmp_srna, tmp_sorf) os.remove(tmp_sorf) shutil.move(tmp_srna, "_".join([self.prefixs["basic"], prefix])) log.write("_".join([self.prefixs["basic"], prefix]) + " is updated.\n") if args_srna.srna_database is not None: self._blast(args_srna.srna_database, args_srna.srna_format, "nucl", args_srna, prefixs, args_srna.blastn, "sRNA", args_srna.e_srna, args_srna.import_info, log) def _import_info_format(self, import_info): new_info = [] for info in import_info: info = info.lower() new_info.append(info) return new_info def _gen_table(self, prefixs, args_srna, log): log.write("Running gen_srna_output.py to generate sRNA table.\n") log.write("The following files are generated.\n") for prefix in prefixs: print("Generating table for " + prefix) out_table = os.path.join(self.all_best["all_table"], "_".join([prefix, "sRNA.csv"])) gen_srna_table(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])), "_".join([self.prefixs["merge_table"], prefix]), "_".join([self.tmps["nr"], prefix + ".csv"]), "_".join([self.tmps["srna"], prefix + ".csv"]), args_srna, out_table, self.term_path) log.write("\t" + out_table + "\n") def _print_rank_all(self, prefixs, log): log.write("Running print_rank_all.py for ranking the sRNA candidates.\n") log.write("The following files are updated:\n") for prefix in prefixs: all_table = os.path.join(self.all_best["all_table"], "_".join([prefix, "sRNA.csv"])) best_table = os.path.join(self.all_best["best_table"], "_".join([prefix, "sRNA.csv"])) print_rank_all(all_table, best_table) log.write("\t" + all_table + "\n") log.write("\t" + best_table + "\n") def _filter_min_utr(self, prefixs, min_utr): '''filter out the low expressed UTR-derived sRNA''' for prefix in prefixs: filter_utr(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])), os.path.join(self.all_best["all_table"], "_".join([prefix, "sRNA.csv"])), min_utr) def _antisense(self, gffs, prefixs): '''detection of antisense''' for prefix in prefixs: all_table = os.path.join(self.all_best["all_table"], "_".join([prefix, "sRNA.csv"])) best_table = os.path.join(self.all_best["best_table"], "_".join([prefix, "sRNA.csv"])) all_gff = os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])) best_gff = os.path.join(self.all_best["best_gff"], "_".join([prefix, "sRNA.gff"])) srna_antisense(all_gff, all_table, os.path.join(gffs, prefix + ".gff")) srna_antisense(best_gff, best_table, os.path.join(gffs, prefix + ".gff")) def _blast_stat(self, stat_path, srna_tables, log): '''do statistics for blast result''' log.write("Running blast_class.py to do statistics for BLAST results.\n") for srna_table in os.listdir(os.path.join(srna_tables, "best_candidates")): out_srna_blast = os.path.join( stat_path, "stat_" + srna_table.replace(".csv", "_blast.csv")) blast_class(os.path.join(srna_tables, "best_candidates", srna_table), out_srna_blast) log.write("\t" + out_srna_blast + " is generated.\n") def _compare_term_promoter(self, out_table, prefix, args_srna, log): '''compare sRNA with terminator and promoter''' if self.term_path is not None: log.write("Running compare_srna_term.py to compare sRNAs with " "terminators.\n") compare_srna_term(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])), out_table, os.path.join(self.term_path, "_".join([prefix, "term.gff"])), args_srna.fuzzy_b, args_srna.fuzzy_a) log.write(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])) + " is updated.\n") log.write(out_table + " is updated.\n") if (args_srna.promoter_table is not None): log.write("Running compare_srna_term.py to compare sRNAs with " "promoters.\n") compare_srna_promoter(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])), out_table, args_srna) log.write(os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])) + " is updated.\n") log.write(out_table + " is updated.\n") def _get_poly_u(self, prefixs, args_srna, log): print("Searching poly U tail ...") log.write("Running get_srna_poly_u.py to seach the poly U " "tails of sRNAs.\n") for prefix in prefixs: get_srna_poly_u("_".join([self.prefixs["basic"], prefix]), os.path.join(self.fasta_path, prefix + ".fa"), "_".join([self.prefixs["merge_table"], prefix]), args_srna) def _re_table(self, args_srna, prefixs, log): log.write("Running re_table.py to generate coverage information.\n") log.write("The following files are updated:\n") for type_ in ["all_candidates", "best_candidates"]: for prefix in prefixs: srna_table = os.path.join(args_srna.out_folder, "tables", type_, "_".join([ prefix, "sRNA.csv"])) reorganize_table(args_srna.libs, args_srna.merge_wigs, "Track/Coverage", srna_table) log.write("\t" + srna_table + "\n") for c_table in os.listdir(os.path.join(args_srna.out_folder, "tables", "for_classes", prefix)): for prefix in prefixs: srna_table_c = os.path.join(args_srna.out_folder, "tables", "for_classes", prefix, c_table) reorganize_table(args_srna.libs, args_srna.merge_wigs, "Track/Coverage", srna_table_c) log.write("\t" + srna_table_c + "\n") def _check_overlap_cds(self, args_srna, prefixs, log): log.write("Running check_srna_overlap.py to compare sRNAs with " "genome annotations.\n") log.write("The following files are updated:\n") for type_ in ["all_candidates", "best_candidates"]: for prefix in prefixs: srna_table = os.path.join(args_srna.out_folder, "tables", type_, "_".join([ prefix, "sRNA.csv"])) gff_file = os.path.join(args_srna.gffs, prefix + ".gff") check_overlap(srna_table, gff_file) log.write("\t" + srna_table + "\n") for c_table in os.listdir(os.path.join(args_srna.out_folder, "tables", "for_classes", prefix)): for prefix in prefixs: gff_file = os.path.join(args_srna.gffs, prefix + ".gff") srna_table_c = os.path.join(args_srna.out_folder, "tables", "for_classes", prefix, c_table) check_overlap(srna_table_c, gff_file) log.write("\t" + srna_table_c + "\n") def run_srna_detection(self, args_srna, log): self._check_necessary_file(args_srna, log) self.multiparser.parser_gff(args_srna.trans, "transcript") self.multiparser.combine_gff(args_srna.gffs, self.tran_path, None, "transcript") self.multiparser.parser_fasta(args_srna.fastas) self.multiparser.combine_fasta(args_srna.gffs, self.fasta_path, None) if args_srna.import_info is not None: args_srna.import_info = self._import_info_format(args_srna.import_info) prefixs = self._run_program(args_srna, log) self._get_poly_u(prefixs, args_srna, log) self._filter_srna(args_srna, prefixs, log) for prefix in prefixs: shutil.copyfile("_".join([self.prefixs["basic"], prefix]), os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"]))) log.write("\t" + os.path.join(self.all_best["all_gff"], "_".join([prefix, "sRNA.gff"])) + " is generated, and " "_".join([self.prefixs["basic"], prefix]) + " is deleted.\n") self._compare_term_promoter("_".join([self.prefixs["merge_table"], prefix]), prefix, args_srna, log) self._gen_table(prefixs, args_srna, log) self._class_srna(prefixs, args_srna, log) self._get_best_result(prefixs, args_srna, log) self._print_rank_all(prefixs, log) if args_srna.srna_database is not None: if "blast_srna" in args_srna.import_info: self._blast_stat(self.stat_path, self.table_output, log) self._check_overlap_cds(args_srna, prefixs, log) self._re_table(args_srna, prefixs, log) self._remove_file(args_srna)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/srna.py
srna.py
import itertools import numpy as np from annogesiclib.gff3 import Gff3Parser import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.style.use('ggplot') def plot(pri, sec, anti, inter, orph, total, total_more, name, feature_name, file_type): tsss = [pri, sec, anti, inter, orph] ind = np.arange(5) width = 0.5 fig, ax = plt.subplots() if feature_name == "processing site": plt.text(0.85, 0.95, "Total processing sites", ha='center', va='center', transform=ax.transAxes) plt.text(0.85, 0.9, str(total), ha='center', va='center', transform=ax.transAxes) elif feature_name == "TSS": plt.text(0.9, 0.95, "Total TSSs", ha='center', va='center', transform=ax.transAxes) plt.text(0.9, 0.9, str(total), ha='center', va='center', transform=ax.transAxes) rects = ax.bar(ind, tsss, width, color='#9999FF') ax.set_ylabel("the number of " + feature_name) ax.set_xticks(ind + width/2) ax.set_xticklabels(('Primary', 'Secondary', 'Antisense', 'Internal', 'Orphan')) ax.set_xlabel("The type of " + feature_name) for rect in rects: height = rect.get_height() plt.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d' % int(height), ha='center', va='bottom') plt.savefig(file_type + "_class_" + name + ".png") def stat(tsss, strain, feature_name, out_stat, file_type, out_lib): tss_type = {"Primary": [], "Secondary": [], "Internal": [], "Antisense": [], "Orphan": []} tss_libs = {} num_tss = 0 num_tss_more = 0 for entry in tsss: num_tss += 1 if entry.attributes["type"].find("Primary") != -1: tss_type["Primary"].append(num_tss) if entry.attributes["type"].find("Secondary") != -1: tss_type["Secondary"].append(num_tss) if entry.attributes["type"].find("Antisense") != -1: tss_type["Antisense"].append(num_tss) if entry.attributes["type"].find("Internal") != -1: tss_type["Internal"].append(num_tss) if entry.attributes["type"].find("Orphan") != -1: tss_type["Orphan"].append(num_tss) if "libs" in entry.attributes.keys(): libs = entry.attributes["libs"].split("&") for lib in libs: if lib not in tss_libs.keys(): tss_libs[lib] = 1 else: tss_libs[lib] += 1 for key in tss_type.keys(): num_tss_more = num_tss_more + len(tss_type[key]) plot(len(tss_type["Primary"]), len(tss_type["Secondary"]), len(tss_type["Antisense"]), len(tss_type["Internal"]), len(tss_type["Orphan"]), num_tss, num_tss_more, strain, feature_name, file_type) out_stat.write(strain + ":\n") out_lib.write(strain + ":\n") out_lib.write("total TSS are {0}\n".format(num_tss)) for tss_lib, lib_num in tss_libs.items(): out_lib.write(": ".join([tss_lib, str(lib_num)])) out_lib.write(" ({0})\n".format(lib_num / num_tss)) out_stat.write("total number of {0} (if one {1} belongs to two classes, " "it count two times) = {2}\n".format( feature_name, feature_name, num_tss_more)) out_stat.write("total number of unique {0} (if one {1} belongs to " "two classes, it count only one time) = {2}\n".format( feature_name, feature_name, num_tss)) for it in range(1, 5): for tss in itertools.combinations(tss_type.keys(), it): union = [] for key in tss: union = list(set(tss_type[key]) | set(union)) out_stat.write("{0} = {1} ({2})\n".format( '-'.join(tss), len(union), float(len(union)) / float(num_tss))) out_stat.write("\n") def stat_tsspredator(tss_file, file_type, stat_file, lib_file): if file_type == "processing": feature_name = "processing site" else: feature_name = "TSS" tsss = [] tsss_strain = {} pre_seq_id = "" out_stat = open(stat_file, "w") out_lib = open(lib_file, "w") gff_parser = Gff3Parser() fh = open(tss_file) for entry in gff_parser.entries(fh): if entry.seq_id != pre_seq_id: pre_seq_id = entry.seq_id tsss_strain[entry.seq_id] = [] tsss_strain[entry.seq_id].append(entry) tsss.append(entry) tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) if len(tsss_strain) > 1: stat(tsss, "All_genomes", feature_name, out_stat, file_type, out_lib) for strain in tsss_strain.keys(): stat(tsss_strain[strain], strain, feature_name, out_stat, file_type, out_lib) out_stat.close() out_lib.close() fh.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/stat_TSSpredator.py
stat_TSSpredator.py
import sys from annogesiclib.gff3 import Gff3Parser def import_psortb(seq_name, psortbs, local_name, local_score, type_, results): seq_datas = seq_name.split("__") seq_id = seq_datas[0] features = seq_datas[1].split("_") prot_id = "_".join(features[:-3]) if type_ == "multi": psortbs.append({"seq_id": seq_id, "protein_id": prot_id, "strand": features[-3], "start": int(features[-2]), "end": int(features[-1]), "local": "/".join(local_name), "score": "/".join(local_score)}) else: psortbs.append({"seq_id": seq_id, "protein_id": prot_id, "strand": features[-3], "start": int(features[-2]), "end": int(features[-1]), "local": results[0], "score": results[-1]}) return {"datas": seq_datas, "features": features, "prot_id": prot_id} def get_results(line, scores, psortbs, out_p, seq_name, fuzzy): '''print the results of psorb''' local_name = [] local_score = [] if len(line) == 0: pass elif "(This protein may have multiple localization sites.)" in line: results = line.split(" ") sort_scores = sorted(scores, key=lambda x: (x["score"]), reverse=True) first = True high_scores = [] for score in sort_scores: if first: high_scores.append(score) first = False best_score = score else: if score["local"] != results[0]: if score["score"] < (best_score["score"] - fuzzy): break else: high_scores.append(score) for high_score in high_scores: local_name.append(high_score["local"]) local_score.append(str(high_score["score"])) seq_datas = import_psortb(seq_name, psortbs, local_name, local_score, "multi", results) out_p.write("\t".join([seq_datas["datas"][0], seq_datas["prot_id"], "\t".join(seq_datas["features"][-3:]), "/".join(local_name), "/".join(local_score)]) + "\n") else: results = line.split(" ") seq_datas = import_psortb(seq_name, psortbs, None, None, "unique", results) out_p.write("\t".join([seq_datas["datas"][0], seq_datas["prot_id"], "\t".join(seq_datas["features"][-3:]), results[0], results[-1]]) + "\n") return local_name, local_score def get_information(psortb_table, out_p, fuzzy): '''get the information of psorb''' scores = [] psortbs = [] seq_name = None detects = {"score": False, "result": False} with open(psortb_table, "r") as p_h: for line in p_h: line = line.strip() if (line.startswith("--")) or \ (line.startswith("Secondary localization(s):")): detects["result"] = False if detects["score"]: if "Final Prediction:" not in line: datas = line.split(" ") scores.append({"local": datas[0], "score": float(datas[-1])}) if detects["result"]: local_name, local_score = get_results( line, scores, psortbs, out_p, seq_name, fuzzy) if line.startswith("Final Prediction:"): detects["score"] = False detects["result"] = True if line.startswith("SeqID:"): seq_name = line.replace("SeqID: ", "") scores = [] if line.startswith("Localization Scores:"): detects["score"] = True return psortbs def print_gff(gffs, psortbs, out_m): for gff in gffs: detect = False for psortb in psortbs: if (gff.feature == "CDS") and \ (gff.start == psortb["start"]) and \ (gff.end == psortb["end"]) and \ (gff.strand == psortb["strand"]): if "protein_id" in gff.attributes.keys(): if gff.attributes["protein_id"] == psortb["protein_id"]: detect = True break elif "locus_tag" in gff.attributes.keys(): if gff.attributes["locus_tag"] == psortb["protein_id"]: detect = True break else: if gff.attributes["ID"] == psortb["protein_id"]: detect = True break if detect: gff.attribute_string = gff.attribute_string + \ ";subcellular_localization=" + \ psortb["local"] out_m.write("\t".join([gff.info_without_attributes, gff.attribute_string + "\n"])) else: out_m.write(gff.info + "\n") def extract_psortb(psortb_table, out_psortb, merge_gff, out_merge, fuzzy): '''extract and re-generate the output information of psorb''' gffs = [] if merge_gff: if out_merge is None: print("Error: Assign a name of output merged annotation file.") sys.exit() out_m = open(out_merge, "w") for entry in Gff3Parser().entries(open(merge_gff)): gffs.append(entry) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) else: out_m = None out_p = open(out_psortb, "w") out_p.write("#Genome\tProtein\tStrand\tStart\tEnd\tLocation\tScore\n") psortbs = get_information(psortb_table, out_p, fuzzy) if merge_gff: print_gff(gffs, psortbs, out_m)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/extract_psortb.py
extract_psortb.py
import os from annogesiclib.helper import Helper from annogesiclib.gff3 import Gff3Parser from annogesiclib.parser_wig import WigParser def read_gff(gff_file, features): gffs = [] if not os.path.isfile(gff_file): filename = gff_file.split(".") gff_file = ".".join(filename[0:-2]) + ".gff" g_f = open(gff_file, "r") for entry in Gff3Parser().entries(g_f): if entry.feature in features: gffs.append(entry) gffs = sorted(gffs, key=lambda k: (k.seq_id, k.start)) return gffs def is_primary(cds_start, cds_end, tss_pos, strand): '''check primary TSS''' if strand == "+": if is_utr(cds_start, tss_pos, 300) and (cds_start >= tss_pos): return True else: if is_utr(tss_pos, cds_end, 300) and (cds_end <= tss_pos): return True def is_internal(cds_start, cds_end, tss_pos, strand): '''check internal TSS''' if ((cds_start < tss_pos) and (cds_end > tss_pos)) or ( (strand == "+") and (tss_pos == cds_end)) or ( (strand == "-") and (tss_pos == cds_start)): return True def is_antisense(cds_start, cds_end, tss_pos, strand): '''check antisense TSS''' if ((is_utr(cds_start, tss_pos, 100)) and (cds_start >= tss_pos)) or ( (is_utr(tss_pos, cds_end, 100)) and (cds_end <= tss_pos)) or ( is_internal(cds_start, cds_end, tss_pos, strand)): return True def is_utr(pos1, pos2, length): '''check the utr''' if pos1 - pos2 <= length: return True def get_attributes(tss, cds): if tss.attributes["associated_gene"] == "orphan": if "locus_tag" in cds.attributes.keys(): tss.attributes["associated_gene"] = cds.attributes["locus_tag"] else: strand = Helper().get_strand_name(cds.strand) tss.attributes["associated_gene"] = cds.feature + ":" + \ str(cds.start) + "-" + str(cds.end) + "_" + strand else: if "locus_tag" in cds.attributes.keys(): tss.attributes["associated_gene"] = "&".join([ tss.attributes["associated_gene"], cds.attributes["locus_tag"]]) else: strand = Helper().get_strand_name(cds.strand) tss.attributes["associated_gene"] = "&".join([ tss.attributes["associated_gene"], cds.feature + ":" + str(cds.start) + "-" + str(cds.end) + "_" + strand]) def detect_coverage(wigs, tss, ref): '''compare the coverage of TSS in order to get proper primary TSS''' tss_cover = -1 ref_cover = -1 for strain, tracks in wigs.items(): if strain == tss.seq_id: tss_cover = 0 ref_cover = 0 for wig in tracks.values(): if ((tss.start + 1) <= len(wig)) and ( (ref.start + 1) <= len(wig)): if tss.strand == "+": diff_t = (wig[tss.start - 1]["coverage"] - wig[tss.start - 2]["coverage"]) diff_r = (wig[ref.start - 1]["coverage"] - wig[ref.start - 2]["coverage"]) else: diff_t = (wig[tss.start - 1]["coverage"] - wig[tss.start]["coverage"]) diff_r = (wig[ref.start - 1]["coverage"] - wig[ref.start]["coverage"]) tss_cover = tss_cover + diff_t ref_cover = ref_cover + diff_r return tss_cover, ref_cover def del_repeat(tsss): '''delete redundant assigned types of TSS''' for tss in tsss: types = tss.attributes["type"].split("&") utrs = tss.attributes["UTR_length"].split("&") genes = tss.attributes["associated_gene"].split("&") detect_pri = False detect_sec = False index = 0 final_types = [] final_utrs = [] final_genes = [] for type_ in types: if (type_ == "Primary") and (not detect_pri): detect_pri = True pri_utr = int(utrs[index].split("_")[1]) real_index = index elif (type_ == "Primary") and (detect_pri): compare_utr = int(utrs[index].split("_")[1]) if compare_utr < pri_utr: pri_utr = compare_utr real_index = index elif (type_ == "Secondary") and (not detect_sec): detect_sec = True sec_utr = int(utrs[index].split("_")[1]) real_index2 = index elif (type_ == "Secondary") and (detect_sec): compare_utr = int(utrs[index].split("_")[1]) if compare_utr < sec_utr: sec_utr = compare_utr real_index2 = index elif (type_ == "Antisense") or (type_ == "Internal") or ( type_ == "Orphan"): final_types.append(types[index]) final_utrs.append(utrs[index]) final_genes.append(genes[index]) index += 1 if detect_pri: final_types.append(types[real_index]) final_utrs.append(utrs[real_index]) final_genes.append(genes[real_index]) else: if detect_sec: final_types.append(types[real_index2]) final_utrs.append(utrs[real_index2]) final_genes.append(genes[real_index2]) tss.attributes["type"] = "&".join(final_types) tss.attributes["UTR_length"] = "&".join(final_utrs) tss.attributes["associated_gene"] = "&".join(final_genes) def fix_attributes(tss, tss_entry): '''change primary TSS to secondary TSS''' index = 0 genes = tss.attributes["associated_gene"].split("&") utrs = tss.attributes["UTR_length"].split("&") types = tss.attributes["type"].split("&") for gene in genes: if gene == tss_entry["locus"]: utrs[index] = utrs[index].replace("Primary", "Secondary") types[index] = types[index].replace("Primary", "Secondary") index += 1 tss.attributes["UTR_length"] = "&".join(utrs) tss.attributes["type"] = "&".join(types) def get_primary_locus_tag(tss): tsss = [] tss_types = tss.attributes["type"].split("&") tss_locus_tags = tss.attributes["associated_gene"].split("&") tss_utr_lengths = tss.attributes["UTR_length"].split("&") index = 0 for tss_type in tss_types: if "Primary" in tss_type: tsss.append({"locus": tss_locus_tags[index], "utr": int(tss_utr_lengths[index].split("_")[1]), "type": tss_type}) index += 1 return tsss def fix_primary_type(tsss, wigs_f, wigs_r): '''Deal with the multiple primary TSS of one gene. change the low expressed one to be secondary TSS''' for tss in tsss: if "Primary" in tss.attributes["type"]: tss_entrys = get_primary_locus_tag(tss) for ref in tsss: if (ref.seq_id == tss.seq_id) and ( ref.strand == tss.strand) and ( ref.start == tss.start): pass else: if "Primary" in ref.attributes["type"]: ref_entrys = get_primary_locus_tag(ref) for tss_entry in tss_entrys: for ref_entry in ref_entrys: if (tss_entry["locus"] == ref_entry["locus"]) and ( tss_entry["type"] == "Primary") and ( ref_entry["type"] == "Primary") and ( tss.seq_id == ref.seq_id): if tss.strand == "+": tss_cover, ref_cover = detect_coverage( wigs_f, tss, ref) else: tss_cover, ref_cover = detect_coverage( wigs_r, tss, ref) if tss_cover < ref_cover: fix_attributes(tss, tss_entry) elif tss_cover > ref_cover: fix_attributes(ref, ref_entry) elif tss_cover == ref_cover: if tss_entry["utr"] < ref_entry["utr"]: fix_attributes(ref, ref_entry) elif (tss_entry["utr"] > ref_entry["utr"]): fix_attributes(tss, tss_entry) del_repeat(tsss) return tsss def read_wig(filename, strand): wigs = {} wig_parser = WigParser() if filename: wig_fh = open(filename) for entry in wig_parser.parser(wig_fh, strand): if entry.strain not in wigs.keys(): strain = entry.strain wigs[strain] = {} if entry.track not in wigs[strain].keys(): wigs[strain][entry.track] = [] wigs[strain][entry.track].append({ "pos": entry.pos, "coverage": entry.coverage, "strand": entry.strand}) wig_fh.close() return wigs def get_attributes_int_anti(tss, cds, type_): '''import useful information to attributes''' if tss.attributes["type"] != "Orphan": tss.attributes["type"] = "&".join( [tss.attributes["type"], type_]) tss.attributes["UTR_length"] = "&".join( [tss.attributes["UTR_length"], type_ + "_NA"]) else: tss.attributes["type"] = type_ tss.attributes["UTR_length"] = type_ + "_NA" get_attributes(tss, cds) def compare_cds_check_orphan(tsss, cdss): '''main part of checking all orphan TSS''' for tss in tsss: if tss.attributes["type"] == "Orphan": for cds in cdss: if (tss.seq_id == cds.seq_id) and \ (tss.strand == cds.strand): if is_primary(cds.start, cds.end, tss.start, tss.strand): if tss.attributes["type"] != "Orphan": tss.attributes["type"] = "&".join( [tss.attributes["type"], "Primary"]) if tss.strand == "+": tss.attributes["UTR_length"] = "&".join([ tss.attributes["UTR_length"], "Primary_" + str(cds.start - tss.start)]) else: tss.attributes["UTR_length"] = "&".join([ tss.attributes["UTR_length"], "Primary_" + str(tss.start - cds.end)]) else: tss.attributes["type"] = "Primary" if tss.strand == "+": tss.attributes["UTR_length"] = ( "Primary_" + str(cds.start - tss.start)) else: tss.attributes["UTR_length"] = ( "Primary_" + str(tss.start - cds.end)) get_attributes(tss, cds) if is_internal(cds.start, cds.end, tss.start, tss.strand): if "locus_tag" in cds.attributes.keys(): if (cds.attributes["locus_tag"] not in tss.attributes["associated_gene"]): get_attributes_int_anti(tss, cds, "Internal") else: strand = Helper().get_strand_name(cds.strand) if ("".join([cds.feature, ":", str(cds.start), "-", str(cds.end), "_", strand]) not in tss.attributes["associated_gene"]): get_attributes_int_anti(tss, cds, "Internal") if is_antisense(cds.start, cds.end, tss.start, tss.strand): if "locus_tag" in cds.attributes.keys(): if (cds.attributes["locus_tag"] not in tss.attributes["associated_gene"]): get_attributes_int_anti(tss, cds, "Antisense") else: strand = Helper().get_strand_name(cds.strand) if ("".join([cds.feature, ":", str(cds.start), "-", str(cds.end), "_", strand]) not in tss.attributes["associated_gene"]): get_attributes_int_anti(tss, cds, "Antisense") def check_orphan(tss_file, gff_file, wig_f_file, wig_r_file, out_gff): '''If the genome annotation gff file has no locus tag, TSSpredator will classify all TSS into orphan. It is for fixing this mistake. It will compare the TSS and gene to classify the TSS.''' cdss = read_gff(gff_file, ["CDS", "tRNA", "rRNA"]) tsss = read_gff(tss_file, ["TSS"]) wigs_f = read_wig(wig_f_file, "+") wigs_r = read_wig(wig_r_file, "-") out = open(out_gff, "w") out.write("##gff-version 3\n") compare_cds_check_orphan(tsss, cdss) final_tsss = fix_primary_type(tsss, wigs_f, wigs_r) for tss in final_tsss: tss.attribute_string = ";".join( ["=".join(items) for items in tss.attributes.items()]) out.write("\t".join([str(field) for field in [ tss.seq_id, tss.source, tss.feature, tss.start, tss.end, tss.score, tss.strand, tss.phase, tss.attribute_string]]) + "\n")
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/check_orphan.py
check_orphan.py
import os import shutil import sys import csv from Bio import SeqIO from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq from annogesiclib.seqmodifier import SeqModifier class SeqEditer(object): '''Edit the sequence if it is needed''' def _row_to_location(self, row, out_name): return({"ref_id": row[0], "target_id": "_".join([out_name, row[0]]), "datas": [{"ref_nt": row[3], "tar_nt": row[4], "position": row[1]}]}) def _import_data(self, mod_table_file, out_name): datas = [] first = True num_index = 0 fh = open(mod_table_file) for row in csv.reader(fh, delimiter="\t"): if row[0].startswith("#"): continue else: if first: datas.append(self._row_to_location(row, out_name)) pre_ref_id = row[0].strip() first = False else: if (row[0] == pre_ref_id): datas[num_index]["datas"].append( {"ref_nt": row[3].strip(), "tar_nt": row[4].strip(), "position": row[1].strip()}) else: datas.append(self._row_to_location(row, out_name)) num_index += 1 pre_ref_id = row[0].strip() fh.close() return datas def modify_seq(self, fasta_folder, mod_table_file, output_folder, out_name): datas = self._import_data(mod_table_file, out_name) for data in datas: seq = "" if (data["ref_id"] + ".fa") in os.listdir(fasta_folder): filename = os.path.join(fasta_folder, data["ref_id"] + ".fa") with open(filename, "r") as fasta: for line in fasta: line = line.strip() if len(line) != 0: if line[0] != ">": seq = seq + line seq_modifier = SeqModifier(seq) for change in data["datas"]: if change["ref_nt"] == "-": seq_modifier.insert( int(change["position"]), change["tar_nt"]) elif change["tar_nt"] == "-": seq_modifier.remove(int(change["position"]), len(change["ref_nt"])) else: seq_modifier.replace( int(change["position"]), change["tar_nt"]) record = SeqRecord(Seq(seq_modifier.seq())) record.id = data["target_id"] record.description = "" SeqIO.write(record, os.path.join( output_folder, record.id + ".fa"), "fasta") def modify_header(self, input_file): first = True tmp_file_path = input_file + "_TMP" output_fh = open(input_file + "_TMP", "w") with open(input_file, "r") as s_h: for line in s_h: line = line.strip() if first: first = False if (line[0] != ">"): print("Error: No proper header!!") sys.exit() if line.startswith(">"): mod = line.split("|") folder = input_file.split("/") folder = "/".join(folder[:-1]) if (len(mod) == 5) and (line[0] == ">"): new_header = ">%s" % (mod[3]) elif (len(mod) != 5) and (line[0] == ">"): new_header = line.split(" ")[0] elif (line[0] != ">"): print("Error: No proper header!!") sys.exit() line = new_header output_fh.write(line + "\n") output_fh.close() shutil.move(tmp_file_path, input_file)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/seq_editer.py
seq_editer.py
from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def uni(tas, genes, out): '''This is for the transcript which is not overlap with annotation''' start_tmp = 0 stop_tmp = 0 strand_tmp = "" detect = False for ta in tas: for gene in genes: if (ta.strand == gene.strand) and ( ta.seq_id == gene.seq_id): if ((ta.start < gene.start) and ( ta.end > gene.start) and ( ta.end < gene.end)) or ( (ta.start > gene.start) and ( ta.end < gene.end)) or ( (ta.start > gene.start) and ( ta.start < gene.end) and ( ta.end > gene.end)): detect = True if ((not detect) and (start_tmp != ta.start) and ( stop_tmp != ta.end)) or ( (not detect) and (((start_tmp == ta.start) or ( stop_tmp == ta.end)) and ( strand_tmp != ta.strand))): out.write(ta.info + "\n") start_tmp = ta.start stop_tmp = ta.end strand_tmp = ta.strand detect = False def check_modify(start_tmp, stop_tmp, gene, ta, modify): if (ta.start >= gene.start) and ( ta.end <= gene.end): if "within_extend_ends" in modify: start_tmp = gene.start stop_tmp = gene.end else: start_tmp = ta.start stop_tmp = ta.end elif ((ta.start <= gene.start) and ( ta.end >= gene.start) and ( ta.end <= gene.end)): if (ta.strand == "+") and ("extend_3end" in modify): start_tmp = ta.start stop_tmp = gene.end elif (ta.strand == "-") and ("extend_5end" in modify): start_tmp = ta.start stop_tmp = gene.end else: start_tmp = ta.start stop_tmp = ta.end elif ((ta.start >= gene.start) and ( ta.start <= gene.end) and ( ta.end >= gene.end)): if (ta.strand == "+") and ("extend_5end" in modify): start_tmp = gene.start stop_tmp = ta.end elif (ta.strand == "-") and ("extend_3end" in modify): start_tmp = gene.start stop_tmp = ta.end else: start_tmp = ta.start stop_tmp = ta.end return start_tmp, stop_tmp def overlap(tas, genes, out, modify): '''Check the overlap of annotation and transcript''' check = False for gene in genes: start_tmp = 0 stop_tmp = 0 start = 0 stop = 0 for ta in tas: if (ta.strand == gene.strand) and ( ta.seq_id == gene.seq_id): if ((ta.start <= gene.start) and ( ta.end >= gene.start) and ( ta.end <= gene.end)) or ( (ta.start >= gene.start) and ( ta.end <= gene.end)) or ( (ta.start >= gene.start) and ( ta.start <= gene.end) and ( ta.end >= gene.end)): check = True tmp_ta = ta if start_tmp == 0: start_tmp, stop_tmp = check_modify( start_tmp, stop_tmp, gene, ta, modify) start = start_tmp stop = stop_tmp else: start_tmp, stop_tmp = check_modify( start_tmp, stop_tmp, gene, ta, modify) if "merge_overlap" in modify: if stop < stop_tmp: stop = stop_tmp else: if (start_tmp != 0): out.write("\t".join([str(field) for field in [ tmp_ta.seq_id, tmp_ta.source, tmp_ta.feature, start, stop, tmp_ta.score, tmp_ta.strand, tmp_ta.phase, tmp_ta.attribute_string]]) + "\n") start = start_tmp stop = stop_tmp if (ta.start > gene.end) and (start != 0) and (check): check = False out.write("\t".join([str(field) for field in [ tmp_ta.seq_id, tmp_ta.source, tmp_ta.feature, start, stop, tmp_ta.score, tmp_ta.strand, tmp_ta.phase, tmp_ta.attribute_string]]) + "\n") break if (start != 0) and (check): out.write('\t'.join([str(field) for field in [ tmp_ta.seq_id, tmp_ta.source, tmp_ta.feature, start, stop, tmp_ta.score, tmp_ta.strand, tmp_ta.phase, tmp_ta.attribute_string]]) + "\n") def fill_gap(gff_file, ta_file, type_, output, modify): '''compare transcript with genome annotation to modify the transcript''' tas = [] genes = [] ta_f = open(ta_file, "r") gff_f = open(gff_file, "r") for entry in Gff3Parser().entries(ta_f): tas.append(entry) ta_f.close() tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for entry in Gff3Parser().entries(gff_f): if (entry.feature != "source") and ( entry.feature != "region") and ( entry.feature != "repeat_region") and ( entry.feature != "STS") and ( entry.feature != "remark"): genes.append(entry) gff_f.close() genes = sorted(genes, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) out = open(output, "w") out.write("##gff-version 3\n") if type_ == "overlap": overlap(tas, genes, out, modify) elif type_ == "uni": uni(tas, genes, out) def print_file(ta, num, out): ta.attributes["ID"] = ta.seq_id + "_transcript" + str(num) ta.attributes["Name"] = "transcript_" + ('%0*d' % (5, num)) attribute_string = ";".join( ["=".join(items) for items in ta.attributes.items()]) out.write("\t".join([str(field) for field in [ ta.seq_id, ta.source, ta.feature, ta.start, ta.end, ta.score, ta.strand, ta.phase, attribute_string]]) + "\n") def longer_ta(ta_file, length, out_file): '''merge overlaped transcript to for a complete transcript''' tas = [] for entry in Gff3Parser().entries(open(ta_file)): tas.append(entry) tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for ta_1 in tas: for ta_2 in tas: if (ta_1.seq_id == ta_2.seq_id) and ( ta_1.strand == ta_2.strand): if (ta_1.start <= ta_2.start) and ( ta_1.end >= ta_2.start) and ( ta_1.end <= ta_2.end): ta_1.end = ta_2.end elif (ta_1.start >= ta_2.start) and ( ta_1.start <= ta_2.end) and ( ta_1.end >= ta_2.end): ta_1.start = ta_2.start elif (ta_1.start <= ta_2.start) and ( ta_1.end >= ta_2.end): pass elif (ta_1.start >= ta_2.start) and ( ta_1.end <= ta_2.end): ta_1.start = ta_2.start ta_1.end = ta_2.end first = True out = open(out_file, "w") out.write("##gff-version 3\n") num = 0 pre_ta = None tas = sorted(tas, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) for ta in tas: if (ta.end - ta.start) >= length: if first: first = False print_file(ta, num, out) num += 1 else: if (ta.seq_id == pre_ta.seq_id) and ( ta.strand == pre_ta.strand) and ( ta.start == pre_ta.start) and ( ta.end == pre_ta.end): pass else: print_file(ta, num, out) num += 1 pre_ta = ta out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/fill_gap.py
fill_gap.py
from annogesiclib.gff3 import Gff3Parser from annogesiclib.helper import Helper def get_feature(cds): if "locus_tag" in cds.attributes.keys(): feature = cds.attributes["locus_tag"] elif "protein_id" in cds.attributes.keys(): feature = cds.attributes["protein_id"] else: feature = cds.attributes["ID"] return feature def import_data(seq, cds, start, end): feature = get_feature(cds) return {"seq": seq, "strain": cds.seq_id, "strand": cds.strand, "protein": feature, "start": start, "end": end} def detect_site(inters, args_ribo): '''Detection of ribosome binding site''' rbss = [] for inter in inters: if args_ribo.without_rbs: rbss.append(inter) else: for ribo_seq in args_ribo.rbs_seq: detect = False for nts in range(0, (len(inter["seq"]) - len(ribo_seq))): miss = 0 for index in range(len(ribo_seq)): if miss > args_ribo.fuzzy_rbs: break else: if inter["seq"][nts:(nts + len(ribo_seq))][index] != ribo_seq[index]: miss += 1 if (miss <= args_ribo.fuzzy_rbs) and ( len(inter["seq"][nts:(nts + len(ribo_seq))]) >= len(ribo_seq)): rbss.append(inter) detect = True break if detect: break return rbss def read_file(seq_file, gff_file, tss_file, tran_file): cdss = [] tsss = [] trans = [] seq = {} with open(seq_file, "r") as f_h: for line in f_h: line = line.strip() if line.startswith(">"): strain = line[1:] seq[strain] = "" else: seq[strain] = seq[strain] + line g_h = open(gff_file) for entry in Gff3Parser().entries(g_h): if (entry.feature == "CDS"): cdss.append(entry) if tss_file is not None: t_h = open(tss_file) for entry in Gff3Parser().entries(t_h): tsss.append(entry) tsss = sorted(tsss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) t_h.close() a_h = open(tran_file) for entry in Gff3Parser().entries(a_h): trans.append(entry) cdss = sorted(cdss, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) trans = sorted(trans, key=lambda k: (k.seq_id, k.start, k.end, k.strand)) g_h.close() a_h.close() return cdss, seq, tsss, trans def extract_inter_seq(inter, cds, seq, fuzzy, inters): '''extract the sequence of pre-CDS region''' helper = Helper() start = inter["start"] - fuzzy end = inter["end"] + fuzzy if inter["start"] - fuzzy <= 0: start = 1 if inter["end"] + fuzzy >= len(seq[cds.seq_id]): end = len(seq) if cds.strand == "+": inter_seq = helper.extract_gene(seq[cds.seq_id], start, end, "+") else: inter_seq = helper.extract_gene(seq[cds.seq_id], start, end, "-") inters.append(import_data(inter_seq, cds, inter["start"], inter["end"])) def compare_tss(tsss, cds, inters, fuzzy, seq, utr): '''Compare CDS with TSS to get the pre-CDS region''' for tss in tsss: if (cds.seq_id == tss.seq_id) and ( cds.strand == tss.strand): if tss.strand == "+": if ((cds.start - tss.start) <= utr) and ( (cds.start - tss.start) > 0): inter = {"start": tss.start, "end": cds.start, "strain": cds.seq_id, "strand": cds.strand} extract_inter_seq(inter, cds, seq, fuzzy, inters) else: if ((tss.end - cds.end) <= utr) and ( (tss.end - cds.end) > 0): inter = {"start": cds.end, "end": tss.end, "strain": cds.seq_id, "strand": cds.strand} extract_inter_seq(inter, cds, seq, fuzzy, inters) def compare_pre_cds(first, cdss, cds, seq): '''Search the front position CDS of the query one to get the pre-CDS region''' detect_cds = False start = None end = None for pre_cds in cdss: if (pre_cds.seq_id == cds.seq_id) and ( pre_cds.strand == cds.strand): if pre_cds.strand == "+": if first: start = 1 end = cds.start detect_cds = True first = False break elif pre_cds.end < cds.start: start = pre_cds.end end = cds.start detect_cds = True elif pre_cds.end >= cds.start: break else: if pre_cds.start > cds.end: start = cds.end end = pre_cds.start detect_cds = True break if (not detect_cds) and (cds.strand == "-"): start = cds.end end = len(seq[cds.seq_id]) return first, start, end def compare_tran(cds, trans, seq, inters, fuzzy, start, end): '''For detect the expressed region of candidates''' detect = False for tran in trans: if (tran.seq_id == cds.seq_id) and ( tran.strand == cds.strand): if tran.strand == "+": if (cds.start > tran.start) and ( cds.start <= tran.end): if start < tran.start: start = tran.start detect = True elif (tran.start > cds.start): break else: if (cds.end > tran.start) and ( cds.end <= tran.end): if end > tran.start: end = tran.start detect = True elif (tran.start > cds.end): break if detect: inter = {"start": tran.start, "end": cds.start, "strain": cds.seq_id, "strand": cds.strand} extract_inter_seq(inter, cds, seq, fuzzy, inters) else: for tran in trans: if (tran.seq_id == cds.seq_id) and ( tran.strand == cds.strand): if ((start <= tran.start) and ( end >= tran.end)) or ( (start >= tran.start) and ( end <= tran.end)) or ( (start <= tran.start) and ( end <= tran.end) and ( end >= tran.start)) or ( (start >= tran.start) and ( start <= tran.end) and ( end >= tran.end)): inter = {"start": tran.start, "end": cds.start, "strain": cds.seq_id, "strand": cds.strand} extract_inter_seq(inter, cds, seq, fuzzy, inters) break def extract_seq(cdss, seq, tsss, trans, fuzzy, utr): '''extract the sequence for searching the riboswitch or RNA thermometer by comparing with TSS, transcript and CDS''' first = True inters = [] for cds in cdss: if len(tsss) != 0: compare_tss(tsss, cds, inters, fuzzy, seq, utr) first, start, end = compare_pre_cds(first, cdss, cds, seq) if (start is not None) and (end is not None): compare_tran(cds, trans, seq, inters, fuzzy, start, end) return inters def extract_potential_rbs(seq_file, gff_file, tss_file, tran_file, out_file, args_ribo, feature): '''Get the potential riboswitch or RNA-thermometer''' out = open(out_file, "w") cdss, seq, tsss, trans = read_file(seq_file, gff_file, tss_file, tran_file) inters = extract_seq(cdss, seq, tsss, trans, args_ribo.fuzzy, args_ribo.utr) rbss = detect_site(inters, args_ribo) num = 0 for rbs in rbss: out.write(">" + feature + "_{0}\n".format( "|".join([str(num), rbs["strain"], rbs["strand"], rbs["protein"], str(rbs["start"]), str(rbs["end"])]))) out.write(rbs["seq"] + "\n") num += 1 out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/extract_RBS.py
extract_RBS.py
import csv import copy import numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt plt.style.use('ggplot') def plot_bar(cutoffs, strain, out_snp, type_): name = [] for index in range(0, len(cutoffs) + 1): name.append(index * 10) if len(name) > len(cutoffs): name = name[:len(cutoffs)] ind = np.arange(len(cutoffs)) width = 0.5 plt.figure(figsize=(20, 15)) plt.bar(ind, cutoffs, width, color='#FF9999') plt.ylabel('the number of SNPs', fontsize=20) plt.xlabel('QUAL of SNP in transcripts', fontsize=20) plt.xlim([0, len(cutoffs) + 1]) plt.xticks(ind+width-0.75, name, fontsize=18, rotation=40) plt.yticks(fontsize=18) plt.savefig("_".join([out_snp, strain, "SNP_QUAL", type_])) plt.clf() def row_in_list(row): info = "" filt = "." snps = {"strain": None, "pos": -1, "id": "", "ref": "", "alt": "", "qual": -1, "filter": "", "info": "", "depth": -1, "all_info": "", "indel": -1, "frac": -1, "dp4_sum": -1, "dp4_frac": -1} if len(row) >= 8: snps = {"strain": row[0], "pos": int(row[1]), "id": row[2], "ref": row[3], "alt": row[4], "qual": float(row[5]), "filter": filt, "info": "", "depth": -1, "all_info": "\t".join(row), "indel": -1, "frac": -1} infos = row[7].split(";") snps["info"] = infos for info in infos: datas = info.split("=") if len(datas) > 1: if datas[0] == "DP": snps["depth"] = int(datas[1]) if datas[0] == "IDV": snps["indel"] = int(datas[1]) if datas[0] == "IMF": snps["frac"] = float(datas[1]) if datas[0] == "DP4": dp4s = datas[1].split(",") snps["dp4_sum"] = int(dp4s[2]) + int(dp4s[3]) total = (int(dp4s[0]) + int(dp4s[1]) + int(dp4s[2]) + int(dp4s[3])) snps["dp4_frac"] = float(snps["dp4_sum"] / total) return snps else: return snps def gen_ref(snps, pos, refs, num): if num == 1: for snp in snps: refs.append(":".join([str(pos), snp["alt"]])) else: new_refs = [] for snp in snps: for ref in refs: new_refs.append(ref + "_" + str(pos) + ":" + snp["alt"]) refs = copy.deepcopy(new_refs) return refs def change(snp, seq): start_point = snp["pos"] - 1 + seq["num_mod"] end_point = snp["pos"] - 1 + len(snp["ref"]) + seq["num_mod"] if len(snp["ref"]) == len(snp["alt"]): if seq["seq"][start_point: end_point].upper() == snp["ref"].upper(): seq["seq"] = (seq["seq"][:start_point] + snp["alt"].lower() + seq["seq"][end_point:]) if len(snp["ref"]) > len(snp["alt"]): if seq["seq"][start_point: end_point].upper() == snp["ref"].upper(): seq["seq"] = (seq["seq"][:start_point] + snp["alt"].lower() + seq["seq"][end_point:]) seq["num_mod"] = seq["num_mod"] - ( len(snp["ref"]) - len(snp["alt"])) if len(snp["ref"]) < len(snp["alt"]): if seq["seq"][start_point: end_point].upper() == snp["ref"].upper(): seq["seq"] = (seq["seq"][:start_point] + snp["alt"].lower() + seq["seq"][end_point:]) seq["num_mod"] = seq["num_mod"] - ( len(snp["ref"]) - len(snp["alt"])) def get_n_a_value(para, depth_file, min_sample, type_): '''get the corresponding number of cutoff''' if (para.lower() == "none") and type_ == "b_dp": value = None else: tag = para.split("_")[0] try: value = float(para.split("_")[-1]) except ValueError: print("Error: The format of input cutoff is wrong! Please check " "--read_depth_range, --dp4_cutoff, --indel_fraction again.") sys.exit() if tag == "a": fh = open(depth_file, "r") total = 0 num = 0 for row in csv.reader(fh, delimiter='\t'): total = total + int(row[-1]) num += 1 avg_dep = float(total / num) fh.close() value = avg_dep * value elif tag == "n": value = (min_sample * value) elif tag == "r": pass return value def apply_filter(filters, snp, snps): '''apply the filter to remove the false positives''' exclude = False for filt in filters: tag = filt.split("_")[0] cutoff = filt.split("_")[-1] for info in snp["info"]: if tag in info: value = float(info.split("=")[-1]) if cutoff[0] == "b": if value < float(cutoff[1:]): exclude = True if cutoff[0] == "s": if value > float(cutoff[1:]): exclude = True if not exclude: snps.append(snp) def import_data(snp_file, args_snp, bam_number, depth_file, min_sample): snps = [] raw_snps = [] dess = [] max_quals = {} max_quals["All_genome"] = 0 pre_strain = "" cutoff_sum = get_n_a_value(args_snp.dp4_sum, depth_file, min_sample, "dp4") cutoff_frac = float(args_snp.dp4_frac) depth_s = get_n_a_value(args_snp.depth_s, depth_file, min_sample, "s_dp") depth_b = get_n_a_value(args_snp.depth_b, depth_file, min_sample, "b_dp") idv = get_n_a_value(args_snp.idv, depth_file, min_sample, "idv") imf = float(args_snp.imf) fh = open(snp_file, "r") for row in csv.reader(fh, delimiter="\t"): if row[0].startswith("##"): dess.append(row[0]) if row[0].startswith("#"): continue else: snp = row_in_list(row) raw_snps.append(snp) if snp["strain"]: if snp["strain"] != pre_strain: pre_strain = snp["strain"] max_quals[snp["strain"]] = 0 if snp["qual"] > max_quals[snp["strain"]]: max_quals[snp["strain"]] = snp["qual"] if snp["qual"] > max_quals["All_genome"]: max_quals["All_genome"] = snp["qual"] compute = False if (depth_b is None): if (snp["depth"] >= depth_s) and ( snp["dp4_sum"] >= cutoff_sum) and ( snp["dp4_frac"] >= cutoff_frac): compute = True else: if (snp["depth"] >= depth_s) and ( snp["depth"] <= depth_b) and ( snp["dp4_sum"] >= cutoff_sum) and ( snp["dp4_frac"] >= cutoff_frac): compute = True if compute: if snp["indel"] == -1: apply_filter(args_snp.filters, snp, snps) else: if (snp["frac"] >= imf) and ( snp["indel"] >= idv): apply_filter(args_snp.filters, snp, snps) fh.close() return max_quals, snps, dess, raw_snps def check_overlap(new_snps, overlaps): count = 0 element = 0 first_overlap = True printeds = [] count_overlap = len(overlaps) for key, value in new_snps.items(): if first_overlap: for overlap in overlaps: if "print" in overlap.keys(): element += 1 printeds.append(overlap) else: break first_overlap = False if ("print" not in overlaps[element].keys()): if len(printeds) == 0: value.append(overlaps[element]) count += 1 else: for printed in printeds: if printed not in value: value.append(overlaps[element]) count += 1 if count_overlap != 0: if count == (len(new_snps.keys()) / count_overlap): overlaps[element]["print"] = True element += 1 if element >= len(overlaps): break count = 0 def overlap_position(qual_snps): '''deal with the conflict position of SNPs''' first = True qual_nooverlap_snps = {} num_overlap = 1 qual_nooverlap_snps[num_overlap] = [] conflicts = [] for snp1 in qual_snps: overlaps = [] overlaps.append(snp1) for snp2 in qual_snps: if (snp1 != snp2) and (snp1["strain"] == snp2["strain"]) and ( (snp2["pos"] - snp1["pos"] < len(snp1["ref"]))) and ( "print" not in snp2.keys()): overlaps.append(snp2) if len(overlaps) != 1: conflicts.append(overlaps) if first: for overlap in overlaps: qual_nooverlap_snps[num_overlap] = [] qual_nooverlap_snps[num_overlap].append(overlap) num_overlap += 1 overlap["print"] = True num_overlap = 1 first = False else: new_snps = qual_nooverlap_snps.copy() index = len(qual_nooverlap_snps.keys()) repeat = 0 for overlap in overlaps: if "print" in overlap.keys(): repeat += 1 for times in range(1, len(overlaps) - repeat): for key, value in qual_nooverlap_snps.items(): new_snps[key + index * times] = list(value) check_overlap(new_snps, overlaps) qual_nooverlap_snps = new_snps.copy() else: if "print" not in snp1.keys(): if first: qual_nooverlap_snps[num_overlap].append(snp1) first = False else: for key, value in qual_nooverlap_snps.items(): value.append(snp1) snp1["print"] = True if first: first = False return conflicts, qual_nooverlap_snps def print_file(refs, out_ref, conflicts, key, values, mod_seq_init, mod_seqs, out_seq, strain): num_seq = 1 num_nt = 0 paths = [] if len(conflicts) == 0: paths.append("NA") else: for conflict in conflicts: for path in conflict: for value in values: if path == value: paths.append(str(path["pos"])) if len(refs) != 0: num_ref = 1 for ref in refs: out_ref.write("\t".join([str(key), "_".join(paths), str(num_ref), ref, strain]) + "\n") num_ref += 1 else: out_ref.write("\t".join([str(key), "_".join(paths), "1", "NA", mod_seq_init["genome"]]) + "\n") if len(mod_seqs) == 0: out_fasta = open("_".join([out_seq, mod_seq_init["genome"], str(key), "1.fa"]), "w") out_fasta.write(">{0}\n".format(mod_seq_init["genome"])) for nt in mod_seq_init["seq"]: num_nt += 1 out_fasta.write("{0}".format(nt)) if num_nt % 60 == 0: out_fasta.write("\n") out_fasta.close() else: for seq in mod_seqs: num_nt = 0 out_fasta = open("_".join([out_seq, seq["genome"], str(key), str(num_seq)]) + ".fa", "w") out_fasta.write(">{0}\n".format(seq["genome"])) for nt in seq["seq"]: num_nt += 1 out_fasta.write("{0}".format(nt)) if num_nt % 60 == 0: out_fasta.write("\n") num_seq += 1 out_fasta.close() def stat(max_quals, trans_snps, bam_number, stat_prefix, out_snp, args_snp, type_): out_stat = open("_".join([stat_prefix, type_]), "w") printed = False for strain, max_qual in max_quals.items(): max_qual = int(((max_qual / 10) + 1) * 10) cutoffs = [] if (strain == "All_genome") and (len(max_quals) > 2): printed = True elif (strain != "All_genome"): printed = True if printed: for cutoff in range(0, max_qual, 10): cutoffs.append(0) for snp in trans_snps: if (snp["strain"] == strain) or (strain == "All_genome"): index = int(snp["qual"] / 10) cutoffs[index] += 1 num_cutoff = 10 num_quality = 0 out_stat.write(strain + ":\n") best_cutoffs = [] for cutoff in cutoffs: if ((args_snp.quality <= (num_cutoff - 10)) and ( "best" in type_)) or ("raw" in type_): num_quality = num_quality + cutoff if (num_cutoff < args_snp.quality) and ("best" in type_): num_quality = 0 best_cutoffs.append(0) else: best_cutoffs.append(cutoff) out_stat.write("the number of QUAL which is between " "{0} and {1} = {2}\n".format( num_cutoff - 10, num_cutoff, cutoff)) num_cutoff = num_cutoff + 10 out_stat.write("the total numbers of QUAL are {0}\n".format( num_quality)) plot_bar(best_cutoffs, strain, out_snp, type_.replace(".csv", ".png")) printed = False out_stat.close() def read_fasta(fasta_file): seqs = [] first = True num_index = 0 seq_name = "" with open(fasta_file, "r") as fh: for line in fh: line = line.strip() if line.startswith(">"): if first: seqs.append({line[1:]: ""}) first = False else: seqs.append({line[1:]: ""}) num_index += 1 seq_name = line[1:] else: seqs[num_index][seq_name] = seqs[num_index][seq_name] + line return seqs def gen_new_fasta(qual_nooverlap_snps, seqs, out_ref, conflicts, out_seq): refs = {} for key, values in qual_nooverlap_snps.items(): for seq in seqs: for strain, fasta in seq.items(): refs[strain] = [] num_var = 0 mod_seq_init = {"genome": strain, "seq": fasta, "num_mod": 0} mod_seqs = [] for snp in values: if snp["strain"] == strain: if "," in snp["alt"]: num_var += 1 tmps = [] tmp_snps = [] alts = snp["alt"].split(",") for alt in alts: tmp_snp = snp.copy() tmp_snp["alt"] = alt tmp_snps.append(tmp_snp) if len(mod_seqs) == 0: num_mod_seqs = len(mod_seqs) tmps.append(mod_seq_init.copy()) else: num_mod_seqs = len(mod_seqs) for mod_seq in mod_seqs: tmps.append(mod_seq.copy()) mod_seqs = list(tmps) num_mod = 0 num = 1 refs[strain] = gen_ref(tmp_snps, snp["pos"], refs[strain], num_var) for mod_seq in mod_seqs: change(tmp_snps[num_mod], mod_seq) if num >= num_mod_seqs: num_mod += 1 num = 0 num += 1 else: if len(mod_seqs) == 0: change(snp, mod_seq_init) else: for mod_seq in mod_seqs: change(snp, mod_seq) print_file(refs[strain], out_ref, conflicts, key, values, mod_seq_init, mod_seqs, out_seq, strain) def snp_detect(fasta_file, snp_file, depth_file, out_snp, out_seq, bam_number, stat_prefix, args_snp, min_sample): max_quals, snps, dess, raw_snps = import_data( snp_file, args_snp, bam_number, depth_file, min_sample) out_best = open(out_snp + "_best.vcf", "w") out_ref = open(out_snp + "_seq_reference.csv", "w") out_ref.write("Pos_Conflict_ID\tSelected_Pos\tMutation_Conflict_ID" "\tSelected_Pos:NT\tStrain\n") out_best.write("\n".join(dess) + "\n") out_best.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL" "\tFILTER\tINFO\tFORMAT\tBAM\n") best_snps = [] for snp in snps: if snp["qual"] >= args_snp.quality: out_best.write(snp["all_info"] + "\n") best_snps.append(snp) conflicts, qual_nooverlap_snps = overlap_position(best_snps) stat(max_quals, raw_snps, bam_number, stat_prefix, out_snp, args_snp, "raw.csv") stat(max_quals, best_snps, bam_number, stat_prefix, out_snp, args_snp, "best.csv") seqs = read_fasta(fasta_file) gen_new_fasta(qual_nooverlap_snps, seqs, out_ref, conflicts, out_seq) out_best.close() out_ref.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/transcript_SNP.py
transcript_SNP.py
import csv class Gff3Parser(object): """ A format description can be found at: http://genome.ucsc.edu/FAQ/FAQformat.html#format3 http://www.sequenceontology.org/gff3.shtml a validator can be found here: http://modencode.oicr.on.ca/cgi-bin/validate_gff3_online WARNING: Currently this class in not strict enough and would also parse file not following the standard. """ def entries(self, input_gff_fh): """ """ for entry_dict in csv.DictReader( input_gff_fh, delimiter="\t", fieldnames=["seq_id", "source", "feature", "start", "end", "score", "strand", "phase", "attributes"]): if entry_dict["seq_id"].startswith("#"): continue yield self._dict_to_entry(entry_dict) def _dict_to_entry(self, entry_dict): return Gff3Entry(entry_dict) class Gff3Entry(object): """ Example: start, end = sorted([int(pos) for pos in [start, end]]) Gff3Entry({ "seq_id" : seq_id, "source" : "MyLab", "feature" : "sRNA", "start" : start, "end" : end, "strand" : strand, "score" : ".", "phase" : ".", "attributes" : "name=%s;locus_tag=%s" % (name, locus_tag)}) """ def __init__(self, entry_dict): self.seq_id = entry_dict["seq_id"] self.source = entry_dict["source"] self.feature = entry_dict["feature"] # 1-based coordinates # Make sure that start <= end start, end = sorted([int(entry_dict["start"]), int(entry_dict["end"])]) self.start = start self.end = end self.score = entry_dict["score"] self.strand = entry_dict["strand"] self.phase = entry_dict["phase"] self.attributes = self._attributes(entry_dict["attributes"]) self.attribute_string = entry_dict["attributes"] self.info = "\t".join([str(field) for field in [ self.seq_id, self.source, self.feature, self.start, self.end, self.score, self.strand, self.phase, self.attribute_string]]) self.info_without_attributes = "\t".join([str(field) for field in [ self.seq_id, self.source, self.feature, self.start, self.end, self.score, self.strand, self.phase]]) def _attributes(self, attributes_string): """Translate the attribute string to dictionary""" attributes = {} if len(attributes_string) > 0: for attribute in attributes_string.split(";"): key_value_pair = attribute.split("=") key = key_value_pair[0] if len(key_value_pair) > 2: value = "=".join(key_value_pair[1:]) elif len(key_value_pair) == 2: value = key_value_pair[1] else: value = "" attributes[key] = value return attributes else: return attributes def add_attribute(self, key, value): self.attributes[key] = value self.attribute_string = ";".join( ["=".join(items) for items in self.attributes.items()]) def __str__(self): return "\t".join([str(field) for field in [ self.seq_id, self.source, self.feature, self.start, self.end, self.score, self.strand, self.phase, self.attribute_string]])
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/gff3.py
gff3.py
import os import itertools from annogesiclib.gff3 import Gff3Parser def print_intersection(datas, keys, num_srna, gff_name, type_, out_stat): num = 0 datas_merge = [] if type_ == "total": out = open(gff_name, "w") out.write("##gff-version 3\n") for data in datas[keys[0]]: check_same = [] for key in keys[1:]: if data in datas[key]: check_same.append("True") if len(check_same) == (len(keys) - 1): if len(keys) <= 5: if type_ == "total": out.write(data.info + "\t" + "\n") if "best_avg_coverage" in data.attributes.keys(): datas_merge.append({ "data": data, "wig": data.attributes["best_avg_coverage"]}) num += 1 datas_sort = sorted(datas_merge, key=lambda k: float(k['wig']), reverse=True) for data in datas_sort: if type_ == "total": out.write(data["data"].info + "\n") if num_srna == 0: out_stat.write("\t{0} = {1}({2})\n".format(" and ".join(keys), str(num), str(0))) else: out_stat.write("\t{0} = {1}({2})\n".format(" and ".join(keys), str(num), str(float(num)/float(num_srna)))) if type_ == "total": out.close() def initiate(key, key_list, class_name, class_num, index, out, content): if key in key_list: class_num += 1 index[class_name] = class_num out.write(str(class_num) + content + "\n") return class_num def create_class(data, energy, datas_srna, index, type_, nr_hits_num): if "2d_energy" in data.attributes.keys(): if float(data.attributes["2d_energy"]) < energy: datas_srna["class_" + str(index["2d_energy"])].append(data) if "with_TSS" in data.attributes.keys(): if data.attributes["with_TSS"] != "NA": datas_srna["class_" + str(index["with_TSS"])].append(data) elif ((type_ == "UTR_derived") or (type_ == "total")) and ( ("5utr" in data.attributes["sRNA_type"]) or ( "3utr" in data.attributes["sRNA_type"]) or ( "interCDS" in data.attributes["sRNA_type"])): if (data.attributes["start_cleavage"] != "NA") and ( ("3utr" in data.attributes["sRNA_type"]) or ( "interCDS" in data.attributes["sRNA_type"])): datas_srna["class_" + str(index["with_TSS"])].append(data) if "nr_hit" in data.attributes.keys(): if ((data.attributes["nr_hit"] != "NA") and ( int(data.attributes["nr_hit"]) <= nr_hits_num)) or ( data.attributes["nr_hit"] == "NA"): datas_srna["class_" + str(index["nr_no_hit"])].append(data) if "sORF" in data.attributes.keys(): if (data.attributes["sORF"] == "NA"): datas_srna["class_" + str(index["sORF"])].append(data) if "sRNA_hit" in data.attributes.keys(): if data.attributes["sRNA_hit"] != "NA": datas_srna["class_" + str(index["sRNA_hit"])].append(data) else: datas_srna["class_" + str(index["sRNA_no_hit"])].append(data) if "with_term" in data.attributes.keys(): if (data.attributes["with_term"] != "NA"): datas_srna["class_" + str(index["with_term"])].append(data) elif ("end_cleavage" in data.attributes.keys()): if data.attributes["end_cleavage"] != "NA": datas_srna["class_" + str(index["with_term"])].append(data) if "promoter" in data.attributes.keys(): if (data.attributes["promoter"] != "NA"): datas_srna["class_" + str(index["promoter"])].append(data) def import_class(class_num, datas_srna, datas, index, num_srna, strain, type_, srna_type, energy, nr_hits_num): for num in range(1, class_num + 1): datas_srna["class_" + str(num)] = [] for data in datas[strain]: detect = False if (srna_type in data.attributes["sRNA_type"]) or (type_ == "total"): if type_ == "UTR_derived": if srna_type in data.attributes["sRNA_type"]: detect = True else: detect = True if detect: num_srna += 1 create_class(data, energy, datas_srna, index, type_, nr_hits_num) return num_srna def import_data(class_num, datas, index, num_srna, strain, checks, energy, nr_hits_num): datas_srna = {} if checks["utr"]: datas_srna["5'UTR_derived"] = {} num_srna["5'UTR_derived"] = import_class( class_num, datas_srna["5'UTR_derived"], datas, index, num_srna["5'UTR_derived"], strain, "UTR_derived", "5utr", energy, nr_hits_num) datas_srna["3'UTR_derived"] = {} num_srna["3'UTR_derived"] = import_class( class_num, datas_srna["3'UTR_derived"], datas, index, num_srna["3'UTR_derived"], strain, "UTR_derived", "3utr", energy, nr_hits_num) datas_srna["interCDS"] = {} num_srna["interCDS"] = import_class( class_num, datas_srna["interCDS"], datas, index, num_srna["interCDS"], strain, "UTR_derived", "interCDS", energy, nr_hits_num) if checks["inter"]: datas_srna["intergenic"] = {} num_srna["intergenic"] = import_class( class_num, datas_srna["intergenic"], datas, index, num_srna["intergenic"], strain, "intergenic", "intergenic", energy, nr_hits_num) if checks["in_CDS"]: datas_srna["in_CDS"] = {} num_srna["in_CDS"] = import_class( class_num, datas_srna["in_CDS"], datas, index, num_srna["in_CDS"], strain, "in_CDS", "in_CDS", energy, nr_hits_num) if checks["antisense"]: datas_srna["antisense"] = {} num_srna["antisense"] = import_class( class_num, datas_srna["antisense"], datas, index, num_srna["antisense"], strain, "antisense", "antisense", energy, nr_hits_num) datas_srna["total"] = {} num_srna["total"] = import_class( class_num, datas_srna["total"], datas, index, num_srna["total"], strain, "total", "total", energy, nr_hits_num) return datas_srna def sort_keys(keys): nums = [] final_keys = [] for key in keys: nums.append(int(key.split("_")[1])) nums = sorted(nums) for num in nums: final_keys.append("_".join(["class", str(num)])) return final_keys def print_stat_title(checks, out_stat, strain, srna_datas, num_strain, args_srna): class_num = 0 index = {} if checks["first"]: checks["first"] = False class_num = initiate( "2d_energy", srna_datas[strain][0].attributes.keys(), "2d_energy", class_num, index, out_stat, " - Normalized(by length of sRNA) free energy " "change of the secondary structure is below to " + str(args_srna.energy)) name = " ".join([ " - sRNA candidates start with TSS", "(3'UTR derived and interCDS sRNA also includes the sRNA " "candidates which start with processing site.)"]) class_num = initiate( "with_TSS", srna_datas[strain][0].attributes.keys(), "with_TSS", class_num, index, out_stat, name) class_num = initiate( "nr_hit", srna_datas[strain][0].attributes.keys(), "nr_no_hit", class_num, index, out_stat, "".join([" - Running BLAST can not find the homology in nr " "database (the cutoff is ", str(args_srna.nr_hits_num), ")."])) class_num = initiate( "with_term", srna_datas[strain][0].attributes.keys(), "with_term", class_num, index, out_stat, " - sRNA candidates end with terminator (including the " "candidates ends with processing site).") class_num = initiate( "sORF", srna_datas[strain][0].attributes.keys(), "sORF", class_num, index, out_stat, " - sRNA candidates have no conflict with sORF candidates.") class_num = initiate( "sRNA_hit", srna_datas[strain][0].attributes.keys(), "sRNA_no_hit", class_num, index, out_stat, " - Running BLAST can not find the homology in sRNA database.") class_num = initiate( "sRNA_hit", srna_datas[strain][0].attributes.keys(), "sRNA_hit", class_num, index, out_stat, " - Running BLAST can find the homology in sRNA database.") class_num = initiate( "promoter", srna_datas[strain][0].attributes.keys(), "promoter", class_num, index, out_stat, " - sRNA candidates are associated with promoter.") else: out_stat.write("\n") if num_strain <= 2: out_stat.write("All genomes:\n") checks["limit"] = True else: if strain == "all": out_stat.write("All genomes:\n") else: out_stat.write(strain + ":\n") return class_num, index def read_file(srna_file): strains = [] checks = {"limit": False, "first": True, "utr": False, "inter": False, "in_CDS": False, "antisense": False} srna_datas = {} srna_datas["all"] = [] strains.append("all") pre_seq_id = "" fh = open(srna_file) for entry in Gff3Parser().entries(fh): if ("5utr" in entry.attributes["sRNA_type"]) or ( "3utr" in entry.attributes["sRNA_type"]) or ( "interCDS" in entry.attributes["sRNA_type"]): checks["utr"] = True elif "intergenic" in entry.attributes["sRNA_type"]: checks["inter"] = True elif entry.attributes["sRNA_type"] == "in_CDS": checks["in_CDS"] = True elif "antisense" in entry.attributes["sRNA_type"]: checks["antisense"] = True if entry.seq_id != pre_seq_id: srna_datas[entry.seq_id] = [] strains.append(entry.seq_id) pre_seq_id = entry.seq_id srna_datas[entry.seq_id].append(entry) srna_datas["all"].append(entry) for strain in srna_datas.keys(): srna_datas[strain] = sorted( srna_datas[strain], key=lambda k: (k.seq_id, k.start, k.end, k.strand)) fh.close() return srna_datas, strains, checks def set_num(num_srna, types): for type_ in types: num_srna[type_] = 0 def check_and_set_num(checks): num_srna = {"total": 0} if checks["utr"]: set_num(num_srna, ["5'UTR_derived", "3'UTR_derived", "interCDS"]) if checks["antisense"]: set_num(num_srna, ["antisense"]) if checks["inter"]: set_num(num_srna, ["intergenic"]) if checks["in_CDS"]: set_num(num_srna, ["in_CDS"]) return num_srna def classify_srna(srna_file, out_folder, out_stat_file, args_srna): '''classify the sRNA based on the filters''' srna_datas, strains, checks = read_file(srna_file) out_stat = open(out_stat_file, "w") out = None for strain in strains: checks["first"] = True if checks["limit"] is True: break class_num = 0 num_srna = check_and_set_num(checks) if args_srna.in_cds: num_srna["in_CDS"] = 0 class_num, index = print_stat_title( checks, out_stat, strain, srna_datas, len(strains), args_srna) srna_class = import_data( class_num, srna_datas, index, num_srna, strain, checks, args_srna.energy, args_srna.nr_hits_num) for type_, srna in num_srna.items(): out_stat.write("sRNA type - {0}:\n".format(type_)) out_stat.write("\ttotal sRNA candidates = {0}\n".format(srna)) for num in range(1, class_num + 1): if srna != 0: out_stat.write("\tclass {0} = {1}({2})\n".format( num, len(srna_class[type_]["class_" + str(num)]), float(len(srna_class[type_]["class_" + str(num)])) / float(srna))) elif srna == 0: out_stat.write("\tclass {0} = {1}({2})\n".format( num, len(srna_class[type_]["class_" + str(num)]), 0)) if type_ == "total": out = open(os.path.join( out_folder, "_".join(["class", str(num), strain + ".gff"])), "w") out.write("##gff-version 3\n") for data in ( srna_class[type_]["_".join(["class", str(num)])]): out.write(data.info + "\n") if class_num >= 2: for comb in range(2, class_num): for keys in itertools.combinations( srna_class[type_].keys(), comb): if ("sRNA_hit" in index.keys()) or ( "sRNA_no_hit" in index.keys()): if (("class_" + str(index["sRNA_hit"])) in keys) and ( ("class_" + str(index["sRNA_no_hit"])) in keys): continue keys = sort_keys(list(keys)) gff_name = os.path.join( out_folder, "_".join(sorted(list(keys)) + [strain]) + ".gff") print_intersection( srna_class[type_], keys, srna, gff_name, type_, out_stat) out_stat.close() if out is not None: out.close()
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sRNA_class.py
sRNA_class.py
import os import csv import shutil from annogesiclib.gff3 import Gff3Parser def filter_frag(srna_table, srna_gff): out = open("tmp_srna.gff", "w") out_ta = open("tmp_srna.csv", "w") out.write("##gff-version 3\n") gffs = [] tables = [] gff_parser = Gff3Parser() g_f = open(srna_gff, "r") for entry in gff_parser.entries(g_f): gffs.append(entry) fh = open(srna_table, "r") for row in csv.reader(fh, delimiter='\t'): tables.append(row) new_gffs = [] for gff in gffs: if ("UTR_type" in gff.attributes.keys()): if ("5utr" in gff.attributes["UTR_type"]) or ( "interCDS" in gff.attributes["UTR_type"]): for table in tables: if (gff.seq_id == table[0]) and ( gff.start == int(table[2])) and ( gff.end == int(table[3])) and ( gff.strand == table[4]): if "frag" in table[5]: new_gffs.append(gff) elif "3utr" in gff.attributes["UTR_type"]: new_gffs.append(gff) else: new_gffs.append(gff) new_tables = [] for table in tables: for gff in new_gffs: if (gff.seq_id == table[0]) and ( gff.start == int(table[2])) and ( gff.end == int(table[3])) and ( gff.strand == table[4]): new_tables.append(table) out_ta.write("\t".join(table) + "\n") for gff in new_gffs: for table in new_tables: if (gff.seq_id == table[0]) and ( gff.start == int(table[2])) and ( gff.end == int(table[3])) and ( gff.strand == table[4]): out.write(gff.info + "\n") g_f.close() fh.close() out.close() out_ta.close() os.remove(srna_gff) os.remove(srna_table) shutil.move("tmp_srna.gff", srna_gff) shutil.move("tmp_srna.csv", srna_table)
ANNOgesic
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/sRNA_filter_frag.py
sRNA_filter_frag.py