body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def to_memory_units(memory_bytes, round_up): 'Convert from bytes -> memory units.' value = (memory_bytes / MEMORY_RESOURCE_UNIT_BYTES) if (value < 1): raise ValueError('The minimum amount of memory that can be requested is {} bytes, however {} bytes was asked.'.format(MEMORY_RESOURCE_UNIT_BYTES, memory_bytes)) if (isinstance(value, float) and (not value.is_integer())): if round_up: value = int(math.ceil(value)) else: value = int(math.floor(value)) return int(value)
-8,472,874,613,192,096,000
Convert from bytes -> memory units.
python/ray/ray_constants.py
to_memory_units
stephanie-wang/ray
python
def to_memory_units(memory_bytes, round_up): value = (memory_bytes / MEMORY_RESOURCE_UNIT_BYTES) if (value < 1): raise ValueError('The minimum amount of memory that can be requested is {} bytes, however {} bytes was asked.'.format(MEMORY_RESOURCE_UNIT_BYTES, memory_bytes)) if (isinstance(value, float) and (not value.is_integer())): if round_up: value = int(math.ceil(value)) else: value = int(math.floor(value)) return int(value)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
-7,717,226,299,015,962,000
Args: train/dev/test (list[list] or str): Filenames of the train/dev/test datasets. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. update_steps (int): Gradient accumulation steps. Default: 1. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating training configs.
supar/parsers/dep.py
train
LiBinNLP/HOSDP
python
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
534,450,388,935,096,700
Args: data (str): The data for evaluation, both list of instances and filename are allowed. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating evaluation configs. Returns: The loss scalar and evaluation results.
supar/parsers/dep.py
evaluate
LiBinNLP/HOSDP
python
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=False, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
7,355,355,497,957,411,000
Args: data (list[list] or str): The data for prediction, both a list of instances and filename are allowed. pred (str): If specified, the predicted results will be saved to the file. Default: ``None``. lang (str): Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize. ``None`` if tokenization is not required. Default: ``None``. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. prob (bool): If ``True``, outputs the probabilities. Default: ``False``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating prediction configs. Returns: A :class:`~supar.utils.Dataset` object that stores the predicted results.
supar/parsers/dep.py
predict
LiBinNLP/HOSDP
python
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=False, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'biaffine-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('biaffine-dep-en')\n >>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
5,227,721,562,502,721,000
Loads a parser with data fields and pretrained model parameters. Args: path (str): - a string with the shortcut name of a pretrained model defined in ``supar.MODEL`` to load from cache or download, e.g., ``'biaffine-dep-en'``. - a local path to a pretrained model, e.g., ``./<path>/model``. reload (bool): Whether to discard the existing cache and force a fresh download. Default: ``False``. src (str): Specifies where to download the model. ``'github'``: github release page. ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8). Default: None. kwargs (dict): A dict holding unconsumed arguments for updating training configs and initializing the model. Examples: >>> from supar import Parser >>> parser = Parser.load('biaffine-dep-en') >>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')
supar/parsers/dep.py
load
LiBinNLP/HOSDP
python
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'biaffine-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('biaffine-dep-en')\n >>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
@classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): '\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary.\n Required if taking words as encoder input.\n Default: 2.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n ' args = Config(**locals()) args.device = ('cuda' if torch.cuda.is_available() else 'cpu') os.makedirs((os.path.dirname(path) or './'), exist_ok=True) if (os.path.exists(path) and (not args.build)): parser = cls.load(**args) parser.model = cls.MODEL(**parser.args) parser.model.load_pretrained(parser.WORD.embed).to(args.device) return parser logger.info('Building the fields') (TAG, CHAR, ELMO, BERT) = (None, None, None, None) if (args.encoder != 'lstm'): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) WORD = SubwordField('words', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) WORD.vocab = t.get_vocab() else: WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True) if ('tag' in args.feat): TAG = Field('tags', bos=BOS) if ('char' in args.feat): CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len) if ('elmo' in args.feat): from allennlp.modules.elmo import batch_to_ids ELMO = RawField('elmo') ELMO.compose = (lambda x: batch_to_ids(x).to(WORD.device)) if ('bert' in args.feat): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) BERT = SubwordField('bert', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) BERT.vocab = t.get_vocab() TEXT = RawField('texts') ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs) REL = Field('rels', bos=BOS) transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL) train = Dataset(transform, args.train) if (args.encoder == 'lstm'): WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None)) if (TAG is not None): TAG.build(train) if (CHAR is not None): CHAR.build(train) REL.build(train) args.update({'n_words': (len(WORD.vocab) if (args.encoder != 'lstm') else WORD.vocab.n_init), 'n_rels': len(REL.vocab), 'n_tags': (len(TAG.vocab) if (TAG is not None) else None), 'n_chars': (len(CHAR.vocab) if (CHAR is not None) else None), 'char_pad_index': (CHAR.pad_index if (CHAR is not None) else None), 'bert_pad_index': (BERT.pad_index if (BERT is not None) else None), 'pad_index': WORD.pad_index, 'unk_index': WORD.unk_index, 'bos_index': WORD.bos_index}) logger.info(f'{transform}') logger.info('Building the model') model = cls.MODEL(**args).load_pretrained((WORD.embed if hasattr(WORD, 'embed') else None)).to(args.device) logger.info(f'''{model} ''') return cls(args, model, transform)
8,270,736,086,687,907,000
Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Required if taking words as encoder input. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments.
supar/parsers/dep.py
build
LiBinNLP/HOSDP
python
@classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): '\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary.\n Required if taking words as encoder input.\n Default: 2.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n ' args = Config(**locals()) args.device = ('cuda' if torch.cuda.is_available() else 'cpu') os.makedirs((os.path.dirname(path) or './'), exist_ok=True) if (os.path.exists(path) and (not args.build)): parser = cls.load(**args) parser.model = cls.MODEL(**parser.args) parser.model.load_pretrained(parser.WORD.embed).to(args.device) return parser logger.info('Building the fields') (TAG, CHAR, ELMO, BERT) = (None, None, None, None) if (args.encoder != 'lstm'): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) WORD = SubwordField('words', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) WORD.vocab = t.get_vocab() else: WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True) if ('tag' in args.feat): TAG = Field('tags', bos=BOS) if ('char' in args.feat): CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len) if ('elmo' in args.feat): from allennlp.modules.elmo import batch_to_ids ELMO = RawField('elmo') ELMO.compose = (lambda x: batch_to_ids(x).to(WORD.device)) if ('bert' in args.feat): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) BERT = SubwordField('bert', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) BERT.vocab = t.get_vocab() TEXT = RawField('texts') ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs) REL = Field('rels', bos=BOS) transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL) train = Dataset(transform, args.train) if (args.encoder == 'lstm'): WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None)) if (TAG is not None): TAG.build(train) if (CHAR is not None): CHAR.build(train) REL.build(train) args.update({'n_words': (len(WORD.vocab) if (args.encoder != 'lstm') else WORD.vocab.n_init), 'n_rels': len(REL.vocab), 'n_tags': (len(TAG.vocab) if (TAG is not None) else None), 'n_chars': (len(CHAR.vocab) if (CHAR is not None) else None), 'char_pad_index': (CHAR.pad_index if (CHAR is not None) else None), 'bert_pad_index': (BERT.pad_index if (BERT is not None) else None), 'pad_index': WORD.pad_index, 'unk_index': WORD.unk_index, 'bos_index': WORD.bos_index}) logger.info(f'{transform}') logger.info('Building the model') model = cls.MODEL(**args).load_pretrained((WORD.embed if hasattr(WORD, 'embed') else None)).to(args.device) logger.info(f'{model} ') return cls(args, model, transform)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
4,529,348,555,688,951,000
Args: train/dev/test (list[list] or str): Filenames of the train/dev/test datasets. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. update_steps (int): Gradient accumulation steps. Default: 1. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. mbr (bool): If ``True``, performs MBR decoding. Default: ``True``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating training configs.
supar/parsers/dep.py
train
LiBinNLP/HOSDP
python
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
-7,514,356,962,041,115,000
Args: data (str): The data for evaluation, both list of instances and filename are allowed. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. mbr (bool): If ``True``, performs MBR decoding. Default: ``True``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating evaluation configs. Returns: The loss scalar and evaluation results.
supar/parsers/dep.py
evaluate
LiBinNLP/HOSDP
python
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
3,519,811,644,094,867,000
Args: data (list[list] or str): The data for prediction, both a list of instances and filename are allowed. pred (str): If specified, the predicted results will be saved to the file. Default: ``None``. lang (str): Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize. ``None`` if tokenization is not required. Default: ``None``. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. prob (bool): If ``True``, outputs the probabilities. Default: ``False``. mbr (bool): If ``True``, performs MBR decoding. Default: ``True``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating prediction configs. Returns: A :class:`~supar.utils.Dataset` object that stores the predicted results.
supar/parsers/dep.py
predict
LiBinNLP/HOSDP
python
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'crf-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('crf-dep-en')\n >>> parser = Parser.load('./ptb.crf.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
-8,849,996,489,005,211,000
Loads a parser with data fields and pretrained model parameters. Args: path (str): - a string with the shortcut name of a pretrained model defined in ``supar.MODEL`` to load from cache or download, e.g., ``'crf-dep-en'``. - a local path to a pretrained model, e.g., ``./<path>/model``. reload (bool): Whether to discard the existing cache and force a fresh download. Default: ``False``. src (str): Specifies where to download the model. ``'github'``: github release page. ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8). Default: None. kwargs (dict): A dict holding unconsumed arguments for updating training configs and initializing the model. Examples: >>> from supar import Parser >>> parser = Parser.load('crf-dep-en') >>> parser = Parser.load('./ptb.crf.dep.lstm.char')
supar/parsers/dep.py
load
LiBinNLP/HOSDP
python
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'crf-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('crf-dep-en')\n >>> parser = Parser.load('./ptb.crf.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
4,529,348,555,688,951,000
Args: train/dev/test (list[list] or str): Filenames of the train/dev/test datasets. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. update_steps (int): Gradient accumulation steps. Default: 1. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. mbr (bool): If ``True``, performs MBR decoding. Default: ``True``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating training configs.
supar/parsers/dep.py
train
LiBinNLP/HOSDP
python
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
-7,514,356,962,041,115,000
Args: data (str): The data for evaluation, both list of instances and filename are allowed. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. mbr (bool): If ``True``, performs MBR decoding. Default: ``True``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating evaluation configs. Returns: The loss scalar and evaluation results.
supar/parsers/dep.py
evaluate
LiBinNLP/HOSDP
python
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
3,519,811,644,094,867,000
Args: data (list[list] or str): The data for prediction, both a list of instances and filename are allowed. pred (str): If specified, the predicted results will be saved to the file. Default: ``None``. lang (str): Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize. ``None`` if tokenization is not required. Default: ``None``. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. prob (bool): If ``True``, outputs the probabilities. Default: ``False``. mbr (bool): If ``True``, performs MBR decoding. Default: ``True``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating prediction configs. Returns: A :class:`~supar.utils.Dataset` object that stores the predicted results.
supar/parsers/dep.py
predict
LiBinNLP/HOSDP
python
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'crf2o-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('crf2o-dep-en')\n >>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
-8,496,512,337,751,790,000
Loads a parser with data fields and pretrained model parameters. Args: path (str): - a string with the shortcut name of a pretrained model defined in ``supar.MODEL`` to load from cache or download, e.g., ``'crf2o-dep-en'``. - a local path to a pretrained model, e.g., ``./<path>/model``. reload (bool): Whether to discard the existing cache and force a fresh download. Default: ``False``. src (str): Specifies where to download the model. ``'github'``: github release page. ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8). Default: None. kwargs (dict): A dict holding unconsumed arguments for updating training configs and initializing the model. Examples: >>> from supar import Parser >>> parser = Parser.load('crf2o-dep-en') >>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')
supar/parsers/dep.py
load
LiBinNLP/HOSDP
python
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'crf2o-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('crf2o-dep-en')\n >>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
@classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): '\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary. Default: 2.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n ' args = Config(**locals()) args.device = ('cuda' if torch.cuda.is_available() else 'cpu') os.makedirs((os.path.dirname(path) or './'), exist_ok=True) if (os.path.exists(path) and (not args.build)): parser = cls.load(**args) parser.model = cls.MODEL(**parser.args) parser.model.load_pretrained(parser.WORD.embed).to(args.device) return parser logger.info('Building the fields') (TAG, CHAR, ELMO, BERT) = (None, None, None, None) if (args.encoder != 'lstm'): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) WORD = SubwordField('words', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) WORD.vocab = t.get_vocab() else: WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True) if ('tag' in args.feat): TAG = Field('tags', bos=BOS) if ('char' in args.feat): CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len) if ('elmo' in args.feat): from allennlp.modules.elmo import batch_to_ids ELMO = RawField('elmo') ELMO.compose = (lambda x: batch_to_ids(x).to(WORD.device)) if ('bert' in args.feat): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) BERT = SubwordField('bert', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) BERT.vocab = t.get_vocab() TEXT = RawField('texts') ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs) SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs) REL = Field('rels', bos=BOS) transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL) train = Dataset(transform, args.train) if (args.encoder == 'lstm'): WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None)) if (TAG is not None): TAG.build(train) if (CHAR is not None): CHAR.build(train) REL.build(train) args.update({'n_words': (len(WORD.vocab) if (args.encoder != 'lstm') else WORD.vocab.n_init), 'n_rels': len(REL.vocab), 'n_tags': (len(TAG.vocab) if (TAG is not None) else None), 'n_chars': (len(CHAR.vocab) if (CHAR is not None) else None), 'char_pad_index': (CHAR.pad_index if (CHAR is not None) else None), 'bert_pad_index': (BERT.pad_index if (BERT is not None) else None), 'pad_index': WORD.pad_index, 'unk_index': WORD.unk_index, 'bos_index': WORD.bos_index}) logger.info(f'{transform}') logger.info('Building the model') model = cls.MODEL(**args).load_pretrained((WORD.embed if hasattr(WORD, 'embed') else None)).to(args.device) logger.info(f'''{model} ''') return cls(args, model, transform)
1,164,278,246,627,036,400
Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments.
supar/parsers/dep.py
build
LiBinNLP/HOSDP
python
@classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): '\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary. Default: 2.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n ' args = Config(**locals()) args.device = ('cuda' if torch.cuda.is_available() else 'cpu') os.makedirs((os.path.dirname(path) or './'), exist_ok=True) if (os.path.exists(path) and (not args.build)): parser = cls.load(**args) parser.model = cls.MODEL(**parser.args) parser.model.load_pretrained(parser.WORD.embed).to(args.device) return parser logger.info('Building the fields') (TAG, CHAR, ELMO, BERT) = (None, None, None, None) if (args.encoder != 'lstm'): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) WORD = SubwordField('words', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) WORD.vocab = t.get_vocab() else: WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True) if ('tag' in args.feat): TAG = Field('tags', bos=BOS) if ('char' in args.feat): CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len) if ('elmo' in args.feat): from allennlp.modules.elmo import batch_to_ids ELMO = RawField('elmo') ELMO.compose = (lambda x: batch_to_ids(x).to(WORD.device)) if ('bert' in args.feat): from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast t = AutoTokenizer.from_pretrained(args.bert) BERT = SubwordField('bert', pad=t.pad_token, unk=t.unk_token, bos=(t.bos_token or t.cls_token), fix_len=args.fix_len, tokenize=t.tokenize, fn=(None if (not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast))) else (lambda x: (' ' + x)))) BERT.vocab = t.get_vocab() TEXT = RawField('texts') ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs) SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs) REL = Field('rels', bos=BOS) transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL) train = Dataset(transform, args.train) if (args.encoder == 'lstm'): WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None)) if (TAG is not None): TAG.build(train) if (CHAR is not None): CHAR.build(train) REL.build(train) args.update({'n_words': (len(WORD.vocab) if (args.encoder != 'lstm') else WORD.vocab.n_init), 'n_rels': len(REL.vocab), 'n_tags': (len(TAG.vocab) if (TAG is not None) else None), 'n_chars': (len(CHAR.vocab) if (CHAR is not None) else None), 'char_pad_index': (CHAR.pad_index if (CHAR is not None) else None), 'bert_pad_index': (BERT.pad_index if (BERT is not None) else None), 'pad_index': WORD.pad_index, 'unk_index': WORD.unk_index, 'bos_index': WORD.bos_index}) logger.info(f'{transform}') logger.info('Building the model') model = cls.MODEL(**args).load_pretrained((WORD.embed if hasattr(WORD, 'embed') else None)).to(args.device) logger.info(f'{model} ') return cls(args, model, transform)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
-7,717,226,299,015,962,000
Args: train/dev/test (list[list] or str): Filenames of the train/dev/test datasets. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. update_steps (int): Gradient accumulation steps. Default: 1. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating training configs.
supar/parsers/dep.py
train
LiBinNLP/HOSDP
python
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs): '\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n ' return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=True, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
-1,705,285,004,826,690,300
Args: data (str): The data for evaluation, both list of instances and filename are allowed. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. punct (bool): If ``False``, ignores the punctuation during evaluation. Default: ``False``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. partial (bool): ``True`` denotes the trees are partially annotated. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating evaluation configs. Returns: The loss scalar and evaluation results.
supar/parsers/dep.py
evaluate
LiBinNLP/HOSDP
python
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=True, partial=False, verbose=True, **kwargs): '\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n ' return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=True, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
606,635,800,211,581,400
Args: data (list[list] or str): The data for prediction, both a list of instances and filename are allowed. pred (str): If specified, the predicted results will be saved to the file. Default: ``None``. lang (str): Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize. ``None`` if tokenization is not required. Default: ``None``. buckets (int): The number of buckets that sentences are assigned to. Default: 32. batch_size (int): The number of tokens in each batch. Default: 5000. prob (bool): If ``True``, outputs the probabilities. Default: ``False``. tree (bool): If ``True``, ensures to output well-formed trees. Default: ``False``. proj (bool): If ``True``, ensures to output projective trees. Default: ``False``. verbose (bool): If ``True``, increases the output verbosity. Default: ``True``. kwargs (dict): A dict holding unconsumed arguments for updating prediction configs. Returns: A :class:`~supar.utils.Dataset` object that stores the predicted results.
supar/parsers/dep.py
predict
LiBinNLP/HOSDP
python
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=True, verbose=True, **kwargs): '\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n ' return super().predict(**Config().update(locals()))
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'vi-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('vi-dep-en')\n >>> parser = Parser.load('./ptb.vi.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
-6,535,850,461,008,373,000
Loads a parser with data fields and pretrained model parameters. Args: path (str): - a string with the shortcut name of a pretrained model defined in ``supar.MODEL`` to load from cache or download, e.g., ``'vi-dep-en'``. - a local path to a pretrained model, e.g., ``./<path>/model``. reload (bool): Whether to discard the existing cache and force a fresh download. Default: ``False``. src (str): Specifies where to download the model. ``'github'``: github release page. ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8). Default: None. kwargs (dict): A dict holding unconsumed arguments for updating training configs and initializing the model. Examples: >>> from supar import Parser >>> parser = Parser.load('vi-dep-en') >>> parser = Parser.load('./ptb.vi.dep.lstm.char')
supar/parsers/dep.py
load
LiBinNLP/HOSDP
python
@classmethod def load(cls, path, reload=False, src=None, **kwargs): "\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'vi-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('vi-dep-en')\n >>> parser = Parser.load('./ptb.vi.dep.lstm.char')\n " return super().load(path, reload, src, **kwargs)
def parse_arguments(): 'Argument parsing for the script' parser = argparse.ArgumentParser(description='Liftbridge sub script.') parser.add_argument('subject', metavar='subject') parser.add_argument('stream', metavar='stream') parser.add_argument('-s', '--server', metavar='s', nargs='?', default='127.0.0.1:9292', help='(default: %(default)s)') parser.add_argument('-t', '--timestamp', action='store_true', help='Display timestamps') parser.add_argument('-c', '--create', action='store_true', help="Creates the stream in case it doesn't exist") parser.add_argument('-d', '--debug', action='store_true', help='Shows debug logs') return parser.parse_args()
2,814,873,715,566,704,000
Argument parsing for the script
examples/lift-sub.py
parse_arguments
LaPetiteSouris/python-liftbridge
python
def parse_arguments(): parser = argparse.ArgumentParser(description='Liftbridge sub script.') parser.add_argument('subject', metavar='subject') parser.add_argument('stream', metavar='stream') parser.add_argument('-s', '--server', metavar='s', nargs='?', default='127.0.0.1:9292', help='(default: %(default)s)') parser.add_argument('-t', '--timestamp', action='store_true', help='Display timestamps') parser.add_argument('-c', '--create', action='store_true', help="Creates the stream in case it doesn't exist") parser.add_argument('-d', '--debug', action='store_true', help='Shows debug logs') return parser.parse_args()
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None): '\n Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[list] instance_ports: List of instance ports to which the policy\n should be applied. This can be specified if the protocol is SSL or TCP.\n :param pulumi.Input[str] load_balancer: The load balancer to which the policy\n should be attached.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (not resource_name): raise TypeError('Missing resource name argument (for URN creation)') if (not isinstance(resource_name, str)): raise TypeError('Expected resource name to be a string') if (opts and (not isinstance(opts, pulumi.ResourceOptions))): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if (instance_ports is None): raise TypeError('Missing required property instance_ports') __props__['instance_ports'] = instance_ports if (load_balancer is None): raise TypeError('Missing required property load_balancer') __props__['load_balancer'] = load_balancer super(ProxyProtocolPolicy, __self__).__init__('aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy', resource_name, __props__, opts)
6,222,031,625,073,402,000
Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[list] instance_ports: List of instance ports to which the policy should be applied. This can be specified if the protocol is SSL or TCP. :param pulumi.Input[str] load_balancer: The load balancer to which the policy should be attached.
sdk/python/pulumi_aws/ec2/proxy_protocol_policy.py
__init__
lemonade-hq/pulumi-aws
python
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None): '\n Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[list] instance_ports: List of instance ports to which the policy\n should be applied. This can be specified if the protocol is SSL or TCP.\n :param pulumi.Input[str] load_balancer: The load balancer to which the policy\n should be attached.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (not resource_name): raise TypeError('Missing resource name argument (for URN creation)') if (not isinstance(resource_name, str)): raise TypeError('Expected resource name to be a string') if (opts and (not isinstance(opts, pulumi.ResourceOptions))): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if (instance_ports is None): raise TypeError('Missing required property instance_ports') __props__['instance_ports'] = instance_ports if (load_balancer is None): raise TypeError('Missing required property load_balancer') __props__['load_balancer'] = load_balancer super(ProxyProtocolPolicy, __self__).__init__('aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy', resource_name, __props__, opts)
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001): 'Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.' shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1) shifted_qs = (np.tile(q_values_input, (n_variants, 1)) + shift) interpolated_curves = np.zeros((n_variants, len(q_values_prediction))) for i in range(n_variants): interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity) return (interpolated_curves, shift)
-2,858,009,355,907,476,500
Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.
mlreflect/curve_fitter/minimizer.py
q_shift_variants
schreiber-lab/mlreflect
python
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001): shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1) shifted_qs = (np.tile(q_values_input, (n_variants, 1)) + shift) interpolated_curves = np.zeros((n_variants, len(q_values_prediction))) for i in range(n_variants): interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity) return (interpolated_curves, shift)
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1): 'Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.' scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1) scaled_curves = np.zeros((n_variants, len(corrected_reflectivity))) for i in range(n_variants): scaled_curves[i] = (corrected_reflectivity.copy() * scalings[i]) return (scaled_curves, scalings)
-7,762,106,858,442,012,000
Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.
mlreflect/curve_fitter/minimizer.py
curve_scaling_variants
schreiber-lab/mlreflect
python
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1): scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1) scaled_curves = np.zeros((n_variants, len(corrected_reflectivity))) for i in range(n_variants): scaled_curves[i] = (corrected_reflectivity.copy() * scalings[i]) return (scaled_curves, scalings)
def curve_variant_log_mse(curve, variant_curves): 'Calculate the log MSE of a curve and a :class:`ndarray` of curves' errors = (np.log10(curve) - np.log10(variant_curves)) return np.mean((errors ** 2), axis=1)
8,469,554,744,767,416,000
Calculate the log MSE of a curve and a :class:`ndarray` of curves
mlreflect/curve_fitter/minimizer.py
curve_variant_log_mse
schreiber-lab/mlreflect
python
def curve_variant_log_mse(curve, variant_curves): errors = (np.log10(curve) - np.log10(variant_curves)) return np.mean((errors ** 2), axis=1)
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)): 'Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.' prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0] start_values = np.array(prep_labels)[0] bounds = ([(val - (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)], [(val + (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)]) fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data), p0=start_values, bounds=bounds) return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))
-8,441,526,859,497,473,000
Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.
mlreflect/curve_fitter/minimizer.py
least_log_mean_squares_fit
schreiber-lab/mlreflect
python
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)): prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0] start_values = np.array(prep_labels)[0] bounds = ([(val - (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)], [(val + (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)]) fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data), p0=start_values, bounds=bounds) return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))
def log_mse_loss(prep_labels, data, generator, output_preprocessor): 'MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.' restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels)) model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0] loss = mean_squared_error(np.log10(data), np.log10(model)) return loss
2,051,504,903,990,957,800
MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.
mlreflect/curve_fitter/minimizer.py
log_mse_loss
schreiber-lab/mlreflect
python
def log_mse_loss(prep_labels, data, generator, output_preprocessor): restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels)) model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0] loss = mean_squared_error(np.log10(data), np.log10(model)) return loss
def mean_squared_error(array1, array2): 'Returns element-wise mean squared error between two arrays.' if (len(array1) != len(array2)): raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})') else: error = (np.asarray(array1) - np.asarray(array2)) return np.mean(np.atleast_2d((error ** 2)), axis=1)
2,385,236,979,828,822,000
Returns element-wise mean squared error between two arrays.
mlreflect/curve_fitter/minimizer.py
mean_squared_error
schreiber-lab/mlreflect
python
def mean_squared_error(array1, array2): if (len(array1) != len(array2)): raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})') else: error = (np.asarray(array1) - np.asarray(array2)) return np.mean(np.atleast_2d((error ** 2)), axis=1)
@hapic.with_api_doc() @hapic.output_body(AboutSchema()) def about(self): '\n This endpoint allow to check that the API is running. This description\n is generated from the docstring of the method.\n ' return {'version': '1.2.3', 'datetime': datetime.now()}
5,390,935,259,571,241,000
This endpoint allow to check that the API is running. This description is generated from the docstring of the method.
example/usermanagement/serve_flask_marshmallow.py
about
algoo/hapic
python
@hapic.with_api_doc() @hapic.output_body(AboutSchema()) def about(self): '\n This endpoint allow to check that the API is running. This description\n is generated from the docstring of the method.\n ' return {'version': '1.2.3', 'datetime': datetime.now()}
@hapic.with_api_doc() @hapic.output_body(UserDigestSchema(many=True)) def get_users(self): '\n Obtain users list.\n ' return UserLib().get_users()
-3,739,341,336,160,205,000
Obtain users list.
example/usermanagement/serve_flask_marshmallow.py
get_users
algoo/hapic
python
@hapic.with_api_doc() @hapic.output_body(UserDigestSchema(many=True)) def get_users(self): '\n \n ' return UserLib().get_users()
@hapic.with_api_doc() @hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND) @hapic.input_path(UserIdPathSchema()) @hapic.output_body(UserSchema()) def get_user(self, id, hapic_data: HapicData): '\n Return a user taken from the list or return a 404\n ' return UserLib().get_user(int(hapic_data.path['id']))
-8,173,223,262,207,807,000
Return a user taken from the list or return a 404
example/usermanagement/serve_flask_marshmallow.py
get_user
algoo/hapic
python
@hapic.with_api_doc() @hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND) @hapic.input_path(UserIdPathSchema()) @hapic.output_body(UserSchema()) def get_user(self, id, hapic_data: HapicData): '\n \n ' return UserLib().get_user(int(hapic_data.path['id']))
@hapic.with_api_doc() @hapic.input_body(UserSchema(exclude=('id',))) @hapic.output_body(UserSchema()) def add_user(self, hapic_data: HapicData): '\n Add a user to the list\n ' new_user = User(**hapic_data.body) return UserLib().add_user(new_user)
2,054,484,460,010,922,200
Add a user to the list
example/usermanagement/serve_flask_marshmallow.py
add_user
algoo/hapic
python
@hapic.with_api_doc() @hapic.input_body(UserSchema(exclude=('id',))) @hapic.output_body(UserSchema()) def add_user(self, hapic_data: HapicData): '\n \n ' new_user = User(**hapic_data.body) return UserLib().add_user(new_user)
def transform_audio(self, audio_segment): 'Add background noise audio.\n\n Note that this is an in-place transformation.\n\n :param audio_segment: Audio segment to add effects to.\n :type audio_segment: AudioSegmenet|SpeechSegment\n ' noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0] if (noise_json['duration'] < audio_segment.duration): raise RuntimeError('The duration of sampled noise audio is smaller than the audio segment to add effects to.') diff_duration = (noise_json['duration'] - audio_segment.duration) start = self._rng.uniform(0, diff_duration) end = (start + audio_segment.duration) noise_segment = AudioSegment.slice_from_file(noise_json['audio_filepath'], start=start, end=end) snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB) audio_segment.add_noise(noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
515,606,146,558,555,200
Add background noise audio. Note that this is an in-place transformation. :param audio_segment: Audio segment to add effects to. :type audio_segment: AudioSegmenet|SpeechSegment
deepspeech/frontend/augmentor/noise_perturb.py
transform_audio
qq1440837150/DeepSpeech
python
def transform_audio(self, audio_segment): 'Add background noise audio.\n\n Note that this is an in-place transformation.\n\n :param audio_segment: Audio segment to add effects to.\n :type audio_segment: AudioSegmenet|SpeechSegment\n ' noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0] if (noise_json['duration'] < audio_segment.duration): raise RuntimeError('The duration of sampled noise audio is smaller than the audio segment to add effects to.') diff_duration = (noise_json['duration'] - audio_segment.duration) start = self._rng.uniform(0, diff_duration) end = (start + audio_segment.duration) noise_segment = AudioSegment.slice_from_file(noise_json['audio_filepath'], start=start, end=end) snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB) audio_segment.add_noise(noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
def test_oc_get_ocp_server_version(): '\n This method get ocp server version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_ocp_server_version()
1,846,085,871,210,349,300
This method get ocp server version :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_ocp_server_version
RobertKrawitz/benchmark-runner
python
def test_oc_get_ocp_server_version(): '\n This method get ocp server version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_ocp_server_version()
def test_oc_get_kata_version(): '\n This method gets the sandboxed containers (kata) version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_kata_version()
6,685,231,822,196,794,000
This method gets the sandboxed containers (kata) version :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_kata_version
RobertKrawitz/benchmark-runner
python
def test_oc_get_kata_version(): '\n This method gets the sandboxed containers (kata) version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_kata_version()
def test_oc_get_cnv_version(): '\n This method get cnv version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_cnv_version()
924,310,108,413,792,000
This method get cnv version :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_cnv_version
RobertKrawitz/benchmark-runner
python
def test_oc_get_cnv_version(): '\n This method get cnv version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_cnv_version()
def test_oc_get_ocs_version(): '\n This method get ocs version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_ocs_version()
8,321,662,595,867,105,000
This method get ocs version :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_ocs_version
RobertKrawitz/benchmark-runner
python
def test_oc_get_ocs_version(): '\n This method get ocs version\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_ocs_version()
def test_oc_get_master_nodes(): '\n This method test get master nodes\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_master_nodes()
-2,892,055,765,142,323,700
This method test get master nodes :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_master_nodes
RobertKrawitz/benchmark-runner
python
def test_oc_get_master_nodes(): '\n This method test get master nodes\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_master_nodes()
def test_login(): '\n This method test login\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) assert oc.login()
8,158,043,458,917,190,000
This method test login :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_login
RobertKrawitz/benchmark-runner
python
def test_login(): '\n This method test login\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) assert oc.login()
def test_oc_get_pod_name(): '\n This test run oc get pod by name\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) assert (oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == '')
-593,645,872,348,187,500
This test run oc get pod by name :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_pod_name
RobertKrawitz/benchmark-runner
python
def test_oc_get_pod_name(): '\n This test run oc get pod by name\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) assert (oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == )
def test_oc_get_pods(): '\n This test run oc get pods\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) assert oc.get_pods()
-7,074,968,122,280,273,000
This test run oc get pods :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_get_pods
RobertKrawitz/benchmark-runner
python
def test_oc_get_pods(): '\n This test run oc get pods\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) assert oc.get_pods()
def test_get_prom_token(): '\n This method return prom token from cluster\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_prom_token()
4,325,391,949,915,039,000
This method return prom token from cluster :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_get_prom_token
RobertKrawitz/benchmark-runner
python
def test_get_prom_token(): '\n This method return prom token from cluster\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.get_prom_token()
def test_is_cnv_installed(): '\n This method check if cnv operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_cnv_installed()
1,317,381,338,846,734,600
This method check if cnv operator is installed :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_is_cnv_installed
RobertKrawitz/benchmark-runner
python
def test_is_cnv_installed(): '\n This method check if cnv operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_cnv_installed()
def test_is_kata_installed(): '\n This method checks if the sandboxed containers (kata) operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_kata_installed()
-48,029,872,576,008,216
This method checks if the sandboxed containers (kata) operator is installed :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_is_kata_installed
RobertKrawitz/benchmark-runner
python
def test_is_kata_installed(): '\n This method checks if the sandboxed containers (kata) operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_kata_installed()
def test_is_ocs_installed(): '\n This method check if ocs operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_ocs_installed()
-5,860,578,108,085,043,000
This method check if ocs operator is installed :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_is_ocs_installed
RobertKrawitz/benchmark-runner
python
def test_is_ocs_installed(): '\n This method check if ocs operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_ocs_installed()
def test_is_kata_installed(): '\n This method check if kata operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_kata_installed()
-612,831,646,245,680,800
This method check if kata operator is installed :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_is_kata_installed
RobertKrawitz/benchmark-runner
python
def test_is_kata_installed(): '\n This method check if kata operator is installed\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() assert oc.is_kata_installed()
def test_oc_exec(): '\n Test that oc exec works\n :return:\n ' test_message = 'I am here' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() answer = oc.exec(pod_name='prometheus-k8s-0', namespace='openshift-monitoring', container='prometheus', command=f'echo "{test_message}"') assert (answer == test_message)
3,037,614,774,960,477,700
Test that oc exec works :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_oc_exec
RobertKrawitz/benchmark-runner
python
def test_oc_exec(): '\n Test that oc exec works\n :return:\n ' test_message = 'I am here' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() answer = oc.exec(pod_name='prometheus-k8s-0', namespace='openshift-monitoring', container='prometheus', command=f'echo "{test_message}"') assert (answer == test_message)
def test_collect_prometheus(): '\n Test that Prometheus data can be collected. TBD test that data is valid.\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() with tempfile.TemporaryDirectory() as dirname: snapshot = PrometheusSnapshot(oc=oc, artifacts_path=dirname, verbose=True) snapshot.prepare_for_snapshot(pre_wait_time=1) time.sleep(10) tarball = snapshot.retrieve_snapshot(post_wait_time=1) assert tarfile.is_tarfile(tarball)
-6,243,749,812,123,490,000
Test that Prometheus data can be collected. TBD test that data is valid. :return:
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
test_collect_prometheus
RobertKrawitz/benchmark-runner
python
def test_collect_prometheus(): '\n Test that Prometheus data can be collected. TBD test that data is valid.\n :return:\n ' oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password']) oc.login() with tempfile.TemporaryDirectory() as dirname: snapshot = PrometheusSnapshot(oc=oc, artifacts_path=dirname, verbose=True) snapshot.prepare_for_snapshot(pre_wait_time=1) time.sleep(10) tarball = snapshot.retrieve_snapshot(post_wait_time=1) assert tarfile.is_tarfile(tarball)
@property def splits(self): 'Dictionary of split names and probabilities. Must sum to one.' raise NotImplementedError()
401,169,447,897,842,200
Dictionary of split names and probabilities. Must sum to one.
magenta/models/score2perf/score2perf.py
splits
flyingleafe/magenta
python
@property def splits(self): raise NotImplementedError()
@property def min_hop_size_seconds(self): 'Minimum hop size in seconds at which to split input performances.' raise NotImplementedError()
-1,182,965,727,413,683,200
Minimum hop size in seconds at which to split input performances.
magenta/models/score2perf/score2perf.py
min_hop_size_seconds
flyingleafe/magenta
python
@property def min_hop_size_seconds(self): raise NotImplementedError()
@property def max_hop_size_seconds(self): 'Maximum hop size in seconds at which to split input performances.' raise NotImplementedError()
-7,320,718,132,117,424,000
Maximum hop size in seconds at which to split input performances.
magenta/models/score2perf/score2perf.py
max_hop_size_seconds
flyingleafe/magenta
python
@property def max_hop_size_seconds(self): raise NotImplementedError()
@property def num_replications(self): 'Number of times entire input performances will be split.' return 1
6,038,184,881,289,907,000
Number of times entire input performances will be split.
magenta/models/score2perf/score2perf.py
num_replications
flyingleafe/magenta
python
@property def num_replications(self): return 1
@property def add_eos_symbol(self): 'Whether to append EOS to encoded performances.' raise NotImplementedError()
-1,922,712,463,153,412,000
Whether to append EOS to encoded performances.
magenta/models/score2perf/score2perf.py
add_eos_symbol
flyingleafe/magenta
python
@property def add_eos_symbol(self): raise NotImplementedError()
@property def absolute_timing(self): 'Whether or not score should use absolute (vs. tempo-relative) timing.' return False
8,370,973,809,132,255,000
Whether or not score should use absolute (vs. tempo-relative) timing.
magenta/models/score2perf/score2perf.py
absolute_timing
flyingleafe/magenta
python
@property def absolute_timing(self): return False
@property def stretch_factors(self): 'Temporal stretch factors for data augmentation (in datagen).' return [1.0]
2,906,986,062,144,383,000
Temporal stretch factors for data augmentation (in datagen).
magenta/models/score2perf/score2perf.py
stretch_factors
flyingleafe/magenta
python
@property def stretch_factors(self): return [1.0]
@property def transpose_amounts(self): 'Pitch transposition amounts for data augmentation (in datagen).' return [0]
-979,399,267,056,224,400
Pitch transposition amounts for data augmentation (in datagen).
magenta/models/score2perf/score2perf.py
transpose_amounts
flyingleafe/magenta
python
@property def transpose_amounts(self): return [0]
@property def random_crop_length_in_datagen(self): 'Randomly crop targets to this length in datagen.' return None
9,185,185,018,205,633,000
Randomly crop targets to this length in datagen.
magenta/models/score2perf/score2perf.py
random_crop_length_in_datagen
flyingleafe/magenta
python
@property def random_crop_length_in_datagen(self): return None
@property def random_crop_in_train(self): 'Whether to randomly crop each training example when preprocessing.' return False
-3,151,171,822,926,777,300
Whether to randomly crop each training example when preprocessing.
magenta/models/score2perf/score2perf.py
random_crop_in_train
flyingleafe/magenta
python
@property def random_crop_in_train(self): return False
@property def split_in_eval(self): 'Whether to split each eval example when preprocessing.' return False
-2,600,506,686,284,337,000
Whether to split each eval example when preprocessing.
magenta/models/score2perf/score2perf.py
split_in_eval
flyingleafe/magenta
python
@property def split_in_eval(self): return False
def performances_input_transform(self, tmp_dir): 'Input performances beam transform (or dictionary thereof) for datagen.' raise NotImplementedError()
-5,446,088,655,176,826,000
Input performances beam transform (or dictionary thereof) for datagen.
magenta/models/score2perf/score2perf.py
performances_input_transform
flyingleafe/magenta
python
def performances_input_transform(self, tmp_dir): raise NotImplementedError()
def performance_encoder(self): 'Encoder for target performances.' return music_encoders.MidiPerformanceEncoder(steps_per_second=STEPS_PER_SECOND, num_velocity_bins=NUM_VELOCITY_BINS, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH, add_eos=self.add_eos_symbol)
7,870,267,202,908,675,000
Encoder for target performances.
magenta/models/score2perf/score2perf.py
performance_encoder
flyingleafe/magenta
python
def performance_encoder(self): return music_encoders.MidiPerformanceEncoder(steps_per_second=STEPS_PER_SECOND, num_velocity_bins=NUM_VELOCITY_BINS, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH, add_eos=self.add_eos_symbol)
def score_encoders(self): 'List of (name, encoder) tuples for input score components.' return []
5,118,624,544,231,853,000
List of (name, encoder) tuples for input score components.
magenta/models/score2perf/score2perf.py
score_encoders
flyingleafe/magenta
python
def score_encoders(self): return []
def augment_note_sequence(ns, stretch_factor, transpose_amount): 'Augment a NoteSequence by time stretch and pitch transposition.' augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
2,368,470,625,032,840,000
Augment a NoteSequence by time stretch and pitch transposition.
magenta/models/score2perf/score2perf.py
augment_note_sequence
flyingleafe/magenta
python
def augment_note_sequence(ns, stretch_factor, transpose_amount): augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
def augment_note_sequence(ns, stretch_factor, transpose_amount): 'Augment a NoteSequence by time stretch and pitch transposition.' augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
2,368,470,625,032,840,000
Augment a NoteSequence by time stretch and pitch transposition.
magenta/models/score2perf/score2perf.py
augment_note_sequence
flyingleafe/magenta
python
def augment_note_sequence(ns, stretch_factor, transpose_amount): augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
def augment_note_sequence(ns, stretch_factor, transpose_amount): 'Augment a NoteSequence by time stretch and pitch transposition.' augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
2,368,470,625,032,840,000
Augment a NoteSequence by time stretch and pitch transposition.
magenta/models/score2perf/score2perf.py
augment_note_sequence
flyingleafe/magenta
python
def augment_note_sequence(ns, stretch_factor, transpose_amount): augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
def augment_note_sequence(ns, stretch_factor, transpose_amount): 'Augment a NoteSequence by time stretch and pitch transposition.' augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
2,368,470,625,032,840,000
Augment a NoteSequence by time stretch and pitch transposition.
magenta/models/score2perf/score2perf.py
augment_note_sequence
flyingleafe/magenta
python
def augment_note_sequence(ns, stretch_factor, transpose_amount): augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False) try: (_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH, in_place=True) except chord_symbols_lib.ChordSymbolError: raise datagen_beam.DataAugmentationError('Transposition of chord symbol(s) failed.') if num_deleted_notes: raise datagen_beam.DataAugmentationError('Transposition caused out-of-range pitch(es).') return augmented_ns
def __init__(self, trainer): '\n Generates a path for saving model which can also be used for resuming\n from a checkpoint.\n ' self.trainer = trainer self.config = self.trainer.config self.save_dir = self.config.training_parameters.save_dir self.model_name = self.config.model self.ckpt_foldername = ckpt_name_from_core_args(self.config) self.ckpt_foldername += foldername_from_config_override(self.trainer.args) self.device = registry.get('current_device') self.ckpt_prefix = '' if hasattr(self.trainer.model, 'get_ckpt_name'): self.ckpt_prefix = (self.trainer.model.get_ckpt_name() + '_') self.config['log_foldername'] = self.ckpt_foldername self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername) self.pth_filepath = os.path.join(self.ckpt_foldername, (((self.ckpt_prefix + self.model_name) + getattr(self.config.model_attributes, self.model_name).code_name) + '_final.pth')) self.models_foldername = os.path.join(self.ckpt_foldername, 'models') if (not os.path.exists(self.models_foldername)): os.makedirs(self.models_foldername) self.save_config() self.repo_path = updir(os.path.abspath(__file__), n=3)
1,764,111,408,306,437,600
Generates a path for saving model which can also be used for resuming from a checkpoint.
pythia/utils/checkpoint.py
__init__
likenneth/mmgnn_textvqa
python
def __init__(self, trainer): '\n Generates a path for saving model which can also be used for resuming\n from a checkpoint.\n ' self.trainer = trainer self.config = self.trainer.config self.save_dir = self.config.training_parameters.save_dir self.model_name = self.config.model self.ckpt_foldername = ckpt_name_from_core_args(self.config) self.ckpt_foldername += foldername_from_config_override(self.trainer.args) self.device = registry.get('current_device') self.ckpt_prefix = if hasattr(self.trainer.model, 'get_ckpt_name'): self.ckpt_prefix = (self.trainer.model.get_ckpt_name() + '_') self.config['log_foldername'] = self.ckpt_foldername self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername) self.pth_filepath = os.path.join(self.ckpt_foldername, (((self.ckpt_prefix + self.model_name) + getattr(self.config.model_attributes, self.model_name).code_name) + '_final.pth')) self.models_foldername = os.path.join(self.ckpt_foldername, 'models') if (not os.path.exists(self.models_foldername)): os.makedirs(self.models_foldername) self.save_config() self.repo_path = updir(os.path.abspath(__file__), n=3)
def create_user(self, email, password=None, **extra_fields): 'Create and saves a new user' if (not email): raise ValueError('Users must have email address') user = self.model(email=self.normalize_email(email), **extra_fields) user.set_password(password) user.save(using=self._db) return user
-6,611,066,487,681,690,000
Create and saves a new user
app/core/models.py
create_user
shadow-smoke/recipe-app-api
python
def create_user(self, email, password=None, **extra_fields): if (not email): raise ValueError('Users must have email address') user = self.model(email=self.normalize_email(email), **extra_fields) user.set_password(password) user.save(using=self._db) return user
def transaction_exists(self, pkglist): '\n checks the package list to see if any packages are\n involved in an incomplete transaction\n ' conflicts = [] if (not transaction_helpers): return conflicts pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist) unfinished_transactions = find_unfinished_transactions() for trans in unfinished_transactions: steps = find_ts_remaining(trans) for step in steps: (action, step_spec) = step (n, v, r, e, a) = splitFilename(step_spec) for pkg in pkglist_nvreas: label = ('%s-%s' % (n, a)) if ((n == pkg[0]) and (a == pkg[4])): if (label not in conflicts): conflicts.append(('%s-%s' % (n, a))) break return conflicts
3,814,851,130,299,122,000
checks the package list to see if any packages are involved in an incomplete transaction
venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py
transaction_exists
aburan28/ansible-devops-pipeline
python
def transaction_exists(self, pkglist): '\n checks the package list to see if any packages are\n involved in an incomplete transaction\n ' conflicts = [] if (not transaction_helpers): return conflicts pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist) unfinished_transactions = find_unfinished_transactions() for trans in unfinished_transactions: steps = find_ts_remaining(trans) for step in steps: (action, step_spec) = step (n, v, r, e, a) = splitFilename(step_spec) for pkg in pkglist_nvreas: label = ('%s-%s' % (n, a)) if ((n == pkg[0]) and (a == pkg[4])): if (label not in conflicts): conflicts.append(('%s-%s' % (n, a))) break return conflicts
def local_envra(self, path): 'return envra of a local rpm passed in' ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) fd = os.open(path, os.O_RDONLY) try: header = ts.hdrFromFdno(fd) except rpm.error as e: return None finally: os.close(fd) return ('%s:%s-%s-%s.%s' % ((header[rpm.RPMTAG_EPOCH] or '0'), header[rpm.RPMTAG_NAME], header[rpm.RPMTAG_VERSION], header[rpm.RPMTAG_RELEASE], header[rpm.RPMTAG_ARCH]))
-6,192,923,276,369,877,000
return envra of a local rpm passed in
venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py
local_envra
aburan28/ansible-devops-pipeline
python
def local_envra(self, path): ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) fd = os.open(path, os.O_RDONLY) try: header = ts.hdrFromFdno(fd) except rpm.error as e: return None finally: os.close(fd) return ('%s:%s-%s-%s.%s' % ((header[rpm.RPMTAG_EPOCH] or '0'), header[rpm.RPMTAG_NAME], header[rpm.RPMTAG_VERSION], header[rpm.RPMTAG_RELEASE], header[rpm.RPMTAG_ARCH]))
def run(self): '\n actually execute the module code backend\n ' error_msgs = [] if (not HAS_RPM_PYTHON): error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.') if (not HAS_YUM_PYTHON): error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.') if (self.disable_excludes and (yum.__version_info__ < (3, 4))): self.module.fail_json(msg="'disable_includes' is available in yum version 3.4 and onwards.") if error_msgs: self.module.fail_json(msg='. '.join(error_msgs)) if self.module.get_bin_path('yum-deprecated'): yumbin = self.module.get_bin_path('yum-deprecated') else: yumbin = self.module.get_bin_path('yum') self.yum_basecmd = [yumbin, '-d', '2', '-y'] repoquerybin = self.module.get_bin_path('repoquery', required=False) if (self.install_repoquery and (not repoquerybin) and (not self.module.check_mode)): yum_path = self.module.get_bin_path('yum') if yum_path: self.module.run_command(('%s -y install yum-utils' % yum_path)) repoquerybin = self.module.get_bin_path('repoquery', required=False) if self.list: if (not repoquerybin): self.module.fail_json(msg='repoquery is required to use list= with this module. Please install the yum-utils package.') results = {'results': self.list_stuff(repoquerybin, self.list)} else: my = self.yum_base() my.conf repoquery = None try: yum_plugins = my.plugins._plugins except AttributeError: pass else: if ('rhnplugin' in yum_plugins): if repoquerybin: repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] if (self.installroot != '/'): repoquery.extend(['--installroot', self.installroot]) results = self.ensure(repoquery) if repoquery: results['msg'] = ('%s %s' % (results.get('msg', ''), 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.')) self.module.exit_json(**results)
6,903,917,648,374,279,000
actually execute the module code backend
venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py
run
aburan28/ansible-devops-pipeline
python
def run(self): '\n \n ' error_msgs = [] if (not HAS_RPM_PYTHON): error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.') if (not HAS_YUM_PYTHON): error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.') if (self.disable_excludes and (yum.__version_info__ < (3, 4))): self.module.fail_json(msg="'disable_includes' is available in yum version 3.4 and onwards.") if error_msgs: self.module.fail_json(msg='. '.join(error_msgs)) if self.module.get_bin_path('yum-deprecated'): yumbin = self.module.get_bin_path('yum-deprecated') else: yumbin = self.module.get_bin_path('yum') self.yum_basecmd = [yumbin, '-d', '2', '-y'] repoquerybin = self.module.get_bin_path('repoquery', required=False) if (self.install_repoquery and (not repoquerybin) and (not self.module.check_mode)): yum_path = self.module.get_bin_path('yum') if yum_path: self.module.run_command(('%s -y install yum-utils' % yum_path)) repoquerybin = self.module.get_bin_path('repoquery', required=False) if self.list: if (not repoquerybin): self.module.fail_json(msg='repoquery is required to use list= with this module. Please install the yum-utils package.') results = {'results': self.list_stuff(repoquerybin, self.list)} else: my = self.yum_base() my.conf repoquery = None try: yum_plugins = my.plugins._plugins except AttributeError: pass else: if ('rhnplugin' in yum_plugins): if repoquerybin: repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] if (self.installroot != '/'): repoquery.extend(['--installroot', self.installroot]) results = self.ensure(repoquery) if repoquery: results['msg'] = ('%s %s' % (results.get('msg', ), 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.')) self.module.exit_json(**results)
def graph_degree(A): '\n Returns the degree for the nodes (rows) of a symmetric \n graph in sparse CSR or CSC format, or a qobj.\n \n Parameters\n ----------\n A : qobj, csr_matrix, csc_matrix\n Input quantum object or csr_matrix.\n \n Returns\n -------\n degree : array\n Array of integers giving the degree for each node (row).\n \n ' if (A.__class__.__name__ == 'Qobj'): return _node_degrees(A.data.indices, A.data.indptr, A.shape[0]) else: return _node_degrees(A.indices, A.indptr, A.shape[0])
8,779,110,006,112,680,000
Returns the degree for the nodes (rows) of a symmetric graph in sparse CSR or CSC format, or a qobj. Parameters ---------- A : qobj, csr_matrix, csc_matrix Input quantum object or csr_matrix. Returns ------- degree : array Array of integers giving the degree for each node (row).
qutip/graph.py
graph_degree
trxw/qutip
python
def graph_degree(A): '\n Returns the degree for the nodes (rows) of a symmetric \n graph in sparse CSR or CSC format, or a qobj.\n \n Parameters\n ----------\n A : qobj, csr_matrix, csc_matrix\n Input quantum object or csr_matrix.\n \n Returns\n -------\n degree : array\n Array of integers giving the degree for each node (row).\n \n ' if (A.__class__.__name__ == 'Qobj'): return _node_degrees(A.data.indices, A.data.indptr, A.shape[0]) else: return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A, start): '\n Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting\n from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.\n \n This function requires a matrix with symmetric structure.\n Use A+trans(A) if original matrix is not symmetric or not sure.\n \n Parameters\n ----------\n A : qobj, csr_matrix\n Input graph in CSR matrix form\n \n start : int\n Staring node for BFS traversal.\n \n Returns\n -------\n order : array\n Order in which nodes are traversed from starting node.\n \n levels : array\n Level of the nodes in the order that they are traversed.\n \n ' if (A.__class__.__name__ == 'Qobj'): A = A.data num_rows = A.shape[0] start = int(start) (order, levels) = _breadth_first_search(A.indices, A.indptr, num_rows, start) return (order[(order != (- 1))], levels[(levels != (- 1))])
-5,681,492,159,195,273,000
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs. This function requires a matrix with symmetric structure. Use A+trans(A) if original matrix is not symmetric or not sure. Parameters ---------- A : qobj, csr_matrix Input graph in CSR matrix form start : int Staring node for BFS traversal. Returns ------- order : array Order in which nodes are traversed from starting node. levels : array Level of the nodes in the order that they are traversed.
qutip/graph.py
breadth_first_search
trxw/qutip
python
def breadth_first_search(A, start): '\n Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting\n from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.\n \n This function requires a matrix with symmetric structure.\n Use A+trans(A) if original matrix is not symmetric or not sure.\n \n Parameters\n ----------\n A : qobj, csr_matrix\n Input graph in CSR matrix form\n \n start : int\n Staring node for BFS traversal.\n \n Returns\n -------\n order : array\n Order in which nodes are traversed from starting node.\n \n levels : array\n Level of the nodes in the order that they are traversed.\n \n ' if (A.__class__.__name__ == 'Qobj'): A = A.data num_rows = A.shape[0] start = int(start) (order, levels) = _breadth_first_search(A.indices, A.indptr, num_rows, start) return (order[(order != (- 1))], levels[(levels != (- 1))])
def symrcm(A, sym=False): '\n Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj\n in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,\n this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).\n \n It is assumed by default (*sym=False*) that the input matrix is not symmetric. This\n is because it is faster to do A+Trans(A) than it is to check for symmetry for \n a generic matrix. If you are guaranteed that the matrix is symmetric in structure\n (values of matrix element do not matter) then set *sym=True*\n \n Parameters\n ----------\n A : csr_matrix, qobj\n Input sparse csr_matrix or Qobj.\n \n sym : bool {False, True}\n Flag to set whether input matrix is symmetric.\n \n Returns\n -------\n perm : array\n Array of permuted row and column indices.\n \n Notes\n -----\n This routine is used primarily for internal reordering of Lindblad super-operators\n for use in iterative solver routines.\n \n References\n ----------\n E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices",\n ACM \'69 Proceedings of the 1969 24th national conference, (1969).\n \n ' nrows = A.shape[0] if (A.__class__.__name__ == 'Qobj'): if (not sym): A = (A.data + A.data.transpose()) return _rcm(A.indices, A.indptr, nrows) else: return _rcm(A.data.indices, A.data.indptr, nrows) else: if (not sym): A = (A + A.transpose()) return _rcm(A.indices, A.indptr, nrows)
-2,374,158,014,856,256,000
Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric, this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default). It is assumed by default (*sym=False*) that the input matrix is not symmetric. This is because it is faster to do A+Trans(A) than it is to check for symmetry for a generic matrix. If you are guaranteed that the matrix is symmetric in structure (values of matrix element do not matter) then set *sym=True* Parameters ---------- A : csr_matrix, qobj Input sparse csr_matrix or Qobj. sym : bool {False, True} Flag to set whether input matrix is symmetric. Returns ------- perm : array Array of permuted row and column indices. Notes ----- This routine is used primarily for internal reordering of Lindblad super-operators for use in iterative solver routines. References ---------- E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices", ACM '69 Proceedings of the 1969 24th national conference, (1969).
qutip/graph.py
symrcm
trxw/qutip
python
def symrcm(A, sym=False): '\n Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj\n in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,\n this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).\n \n It is assumed by default (*sym=False*) that the input matrix is not symmetric. This\n is because it is faster to do A+Trans(A) than it is to check for symmetry for \n a generic matrix. If you are guaranteed that the matrix is symmetric in structure\n (values of matrix element do not matter) then set *sym=True*\n \n Parameters\n ----------\n A : csr_matrix, qobj\n Input sparse csr_matrix or Qobj.\n \n sym : bool {False, True}\n Flag to set whether input matrix is symmetric.\n \n Returns\n -------\n perm : array\n Array of permuted row and column indices.\n \n Notes\n -----\n This routine is used primarily for internal reordering of Lindblad super-operators\n for use in iterative solver routines.\n \n References\n ----------\n E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices",\n ACM \'69 Proceedings of the 1969 24th national conference, (1969).\n \n ' nrows = A.shape[0] if (A.__class__.__name__ == 'Qobj'): if (not sym): A = (A.data + A.data.transpose()) return _rcm(A.indices, A.indptr, nrows) else: return _rcm(A.data.indices, A.data.indptr, nrows) else: if (not sym): A = (A + A.transpose()) return _rcm(A.indices, A.indptr, nrows)
def bfs_matching(A): '\n Returns an array of row permutations that removes nonzero elements\n from the diagonal of a nonsingular square CSC sparse matrix. Such\n a permutation is always possible provided that the matrix is \n nonsingular.\n \n This function looks at the structure of the matrix only.\n \n Parameters\n ----------\n A : csc_matrix\n Input matrix\n \n Returns\n -------\n perm : array\n Array of row permutations.\n \n Notes\n -----\n This function relies on a maximum cardinality bipartite matching algorithm\n based on a breadth-first search (BFS) of the underlying graph[1]_.\n \n References\n ----------\n .. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and \n Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.\n 38, no. 2, (2011).\n \n ' nrows = A.shape[0] if (A.shape[0] != A.shape[1]): raise ValueError('bfs_matching requires a square matrix.') if (A.__class__.__name__ == 'Qobj'): A = A.data.tocsc() elif (not sp.isspmatrix_csc(A)): A = sp.csc_matrix(A) warn('bfs_matching requires CSC matrix format.', sp.SparseEfficiencyWarning) perm = _bfs_matching(A.indices, A.indptr, nrows) if np.any((perm == (- 1))): raise Exception('Possibly singular input matrix.') return perm
3,940,556,777,090,186,000
Returns an array of row permutations that removes nonzero elements from the diagonal of a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at the structure of the matrix only. Parameters ---------- A : csc_matrix Input matrix Returns ------- perm : array Array of row permutations. Notes ----- This function relies on a maximum cardinality bipartite matching algorithm based on a breadth-first search (BFS) of the underlying graph[1]_. References ---------- .. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw. 38, no. 2, (2011).
qutip/graph.py
bfs_matching
trxw/qutip
python
def bfs_matching(A): '\n Returns an array of row permutations that removes nonzero elements\n from the diagonal of a nonsingular square CSC sparse matrix. Such\n a permutation is always possible provided that the matrix is \n nonsingular.\n \n This function looks at the structure of the matrix only.\n \n Parameters\n ----------\n A : csc_matrix\n Input matrix\n \n Returns\n -------\n perm : array\n Array of row permutations.\n \n Notes\n -----\n This function relies on a maximum cardinality bipartite matching algorithm\n based on a breadth-first search (BFS) of the underlying graph[1]_.\n \n References\n ----------\n .. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and \n Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.\n 38, no. 2, (2011).\n \n ' nrows = A.shape[0] if (A.shape[0] != A.shape[1]): raise ValueError('bfs_matching requires a square matrix.') if (A.__class__.__name__ == 'Qobj'): A = A.data.tocsc() elif (not sp.isspmatrix_csc(A)): A = sp.csc_matrix(A) warn('bfs_matching requires CSC matrix format.', sp.SparseEfficiencyWarning) perm = _bfs_matching(A.indices, A.indptr, nrows) if np.any((perm == (- 1))): raise Exception('Possibly singular input matrix.') return perm
def weighted_bfs_matching(A): '\n Returns an array of row permutations that attempts to maximize\n the product of the ABS values of the diagonal elements in \n a nonsingular square CSC sparse matrix. Such a permutation is \n always possible provided that the matrix is nonsingular.\n \n This function looks at both the structure and ABS values of the \n underlying matrix.\n \n Parameters\n ----------\n A : csc_matrix\n Input matrix\n \n Returns\n -------\n perm : array\n Array of row permutations.\n \n Notes\n -----\n This function uses a weighted maximum cardinality bipartite matching \n algorithm based on breadth-first search (BFS). The columns are weighted\n according to the element of max ABS value in the associated rows and \n are traversed in descending order by weight. When performing the BFS \n traversal, the row associated to a given column is the one with maximum \n weight. Unlike other techniques[1]_, this algorithm does not guarantee the \n product of the diagonal is maximized. However, this limitation is offset\n by the substantially faster runtime of this method. \n \n References\n ----------\n .. [1] I. S. Duff and J. Koster, "The design and use of algorithms for \n permuting large entries to the diagonal of sparse matrices", SIAM J. \n Matrix Anal. and Applics. 20, no. 4, 889 (1997).\n \n ' nrows = A.shape[0] if (A.shape[0] != A.shape[1]): raise ValueError('weighted_bfs_matching requires a square matrix.') if (A.__class__.__name__ == 'Qobj'): A = A.data.tocsc() elif (not sp.isspmatrix_csc(A)): A = sp.csc_matrix(A) warn('weighted_bfs_matching requires CSC matrix format', sp.SparseEfficiencyWarning) perm = _weighted_bfs_matching(np.asarray(np.abs(A.data), dtype=float), A.indices, A.indptr, nrows) if np.any((perm == (- 1))): raise Exception('Possibly singular input matrix.') return perm
-5,521,932,354,056,884,000
Returns an array of row permutations that attempts to maximize the product of the ABS values of the diagonal elements in a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at both the structure and ABS values of the underlying matrix. Parameters ---------- A : csc_matrix Input matrix Returns ------- perm : array Array of row permutations. Notes ----- This function uses a weighted maximum cardinality bipartite matching algorithm based on breadth-first search (BFS). The columns are weighted according to the element of max ABS value in the associated rows and are traversed in descending order by weight. When performing the BFS traversal, the row associated to a given column is the one with maximum weight. Unlike other techniques[1]_, this algorithm does not guarantee the product of the diagonal is maximized. However, this limitation is offset by the substantially faster runtime of this method. References ---------- .. [1] I. S. Duff and J. Koster, "The design and use of algorithms for permuting large entries to the diagonal of sparse matrices", SIAM J. Matrix Anal. and Applics. 20, no. 4, 889 (1997).
qutip/graph.py
weighted_bfs_matching
trxw/qutip
python
def weighted_bfs_matching(A): '\n Returns an array of row permutations that attempts to maximize\n the product of the ABS values of the diagonal elements in \n a nonsingular square CSC sparse matrix. Such a permutation is \n always possible provided that the matrix is nonsingular.\n \n This function looks at both the structure and ABS values of the \n underlying matrix.\n \n Parameters\n ----------\n A : csc_matrix\n Input matrix\n \n Returns\n -------\n perm : array\n Array of row permutations.\n \n Notes\n -----\n This function uses a weighted maximum cardinality bipartite matching \n algorithm based on breadth-first search (BFS). The columns are weighted\n according to the element of max ABS value in the associated rows and \n are traversed in descending order by weight. When performing the BFS \n traversal, the row associated to a given column is the one with maximum \n weight. Unlike other techniques[1]_, this algorithm does not guarantee the \n product of the diagonal is maximized. However, this limitation is offset\n by the substantially faster runtime of this method. \n \n References\n ----------\n .. [1] I. S. Duff and J. Koster, "The design and use of algorithms for \n permuting large entries to the diagonal of sparse matrices", SIAM J. \n Matrix Anal. and Applics. 20, no. 4, 889 (1997).\n \n ' nrows = A.shape[0] if (A.shape[0] != A.shape[1]): raise ValueError('weighted_bfs_matching requires a square matrix.') if (A.__class__.__name__ == 'Qobj'): A = A.data.tocsc() elif (not sp.isspmatrix_csc(A)): A = sp.csc_matrix(A) warn('weighted_bfs_matching requires CSC matrix format', sp.SparseEfficiencyWarning) perm = _weighted_bfs_matching(np.asarray(np.abs(A.data), dtype=float), A.indices, A.indptr, nrows) if np.any((perm == (- 1))): raise Exception('Possibly singular input matrix.') return perm
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): 'A better wrapper over request for deferred signing' if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
-5,809,463,524,355,869,000
A better wrapper over request for deferred signing
python/ccxt/base/exchange.py
fetch2
newdime/ccxt
python
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None): 'Exchange.request is the entry point for all generated methods' return self.fetch2(path, api, method, params, headers, body)
6,673,804,092,993,897,000
Exchange.request is the entry point for all generated methods
python/ccxt/base/exchange.py
request
newdime/ccxt
python
def request(self, path, api='public', method='GET', params={}, headers=None, body=None): return self.fetch2(path, api, method, params, headers, body)
def find_broadly_matched_key(self, broad, string): 'A helper method for matching error strings exactly vs broadly' keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if (string.find(key) >= 0): return key return None
1,118,882,194,763,658,900
A helper method for matching error strings exactly vs broadly
python/ccxt/base/exchange.py
find_broadly_matched_key
newdime/ccxt
python
def find_broadly_matched_key(self, broad, string): keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if (string.find(key) >= 0): return key return None
def fetch(self, url, method='GET', headers=None, body=None): 'Perform a HTTP request and return decoded JSON data' request_headers = self.prepare_request_headers(headers) url = (self.proxy + url) if self.verbose: print('\nRequest:', method, url, request_headers, body) self.logger.debug('%s %s, Request: %s %s', method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None json_response = None try: response = self.session.request(method, url, data=body, headers=request_headers, timeout=int((self.timeout / 1000)), proxies=self.proxies) http_response = response.text json_response = (self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None) headers = response.headers if self.enableLastHttpResponse: self.last_http_response = http_response if self.enableLastJsonResponse: self.last_json_response = json_response if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print('\nResponse:', method, url, response.status_code, headers, http_response) self.logger.debug('%s %s, Response: %s %s %s', method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: error_string = str(e) if (('ECONNRESET' in error_string) or ('Connection aborted.' in error_string)): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_response(http_response, json_response, url, method, headers, body) if (json_response is not None): return json_response return http_response
-7,195,045,384,639,707,000
Perform a HTTP request and return decoded JSON data
python/ccxt/base/exchange.py
fetch
newdime/ccxt
python
def fetch(self, url, method='GET', headers=None, body=None): request_headers = self.prepare_request_headers(headers) url = (self.proxy + url) if self.verbose: print('\nRequest:', method, url, request_headers, body) self.logger.debug('%s %s, Request: %s %s', method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None json_response = None try: response = self.session.request(method, url, data=body, headers=request_headers, timeout=int((self.timeout / 1000)), proxies=self.proxies) http_response = response.text json_response = (self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None) headers = response.headers if self.enableLastHttpResponse: self.last_http_response = http_response if self.enableLastJsonResponse: self.last_json_response = json_response if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print('\nResponse:', method, url, response.status_code, headers, http_response) self.logger.debug('%s %s, Response: %s %s %s', method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: error_string = str(e) if (('ECONNRESET' in error_string) or ('Connection aborted.' in error_string)): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_response(http_response, json_response, url, method, headers, body) if (json_response is not None): return json_response return http_response
@staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): 'A helper-wrapper for the safe_value_2() family.' value = method(dictionary, key1) return (value if (value is not None) else method(dictionary, key2, default_value))
-2,371,737,021,285,098,500
A helper-wrapper for the safe_value_2() family.
python/ccxt/base/exchange.py
safe_either
newdime/ccxt
python
@staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): value = method(dictionary, key1) return (value if (value is not None) else method(dictionary, key2, default_value))
@staticmethod def truncate(num, precision=0): 'Deprecated, use decimal_to_precision instead' if (precision > 0): decimal_precision = math.pow(10, precision) return (math.trunc((num * decimal_precision)) / decimal_precision) return int(Exchange.truncate_to_string(num, precision))
5,881,430,384,757,220,000
Deprecated, use decimal_to_precision instead
python/ccxt/base/exchange.py
truncate
newdime/ccxt
python
@staticmethod def truncate(num, precision=0): if (precision > 0): decimal_precision = math.pow(10, precision) return (math.trunc((num * decimal_precision)) / decimal_precision) return int(Exchange.truncate_to_string(num, precision))
@staticmethod def truncate_to_string(num, precision=0): 'Deprecated, todo: remove references from subclasses' if (precision > 0): parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = (decimal_digits if len(decimal_digits) else '0') return ((parts[0] + '.') + decimal_digits) return ('%d' % num)
-3,156,627,279,850,857,000
Deprecated, todo: remove references from subclasses
python/ccxt/base/exchange.py
truncate_to_string
newdime/ccxt
python
@staticmethod def truncate_to_string(num, precision=0): if (precision > 0): parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = (decimal_digits if len(decimal_digits) else '0') return ((parts[0] + '.') + decimal_digits) return ('%d' % num)
def check_address(self, address): 'Checks an address is not the same character repeated or an empty sequence' if (address is None): self.raise_error(InvalidAddress, details='address is None') if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or (' ' in address)): self.raise_error(InvalidAddress, details=(((('address is invalid or has less than ' + str(self.minFundingAddressLength)) + ' characters: "') + str(address)) + '"')) return address
-2,909,175,738,945,414,700
Checks an address is not the same character repeated or an empty sequence
python/ccxt/base/exchange.py
check_address
newdime/ccxt
python
def check_address(self, address): if (address is None): self.raise_error(InvalidAddress, details='address is None') if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or (' ' in address)): self.raise_error(InvalidAddress, details=(((('address is invalid or has less than ' + str(self.minFundingAddressLength)) + ' characters: "') + str(address)) + '"')) return address
@functools.wraps(entry) def inner(_self, params=None): '\n Inner is called when a generated method (publicGetX) is called.\n _self is a reference to self created by function.__get__(exchange, type(exchange))\n https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial\n ' inner_kwargs = dict(outer_kwargs) if (params is not None): inner_kwargs['params'] = params return entry(_self, **inner_kwargs)
3,173,901,515,913,682,400
Inner is called when a generated method (publicGetX) is called. _self is a reference to self created by function.__get__(exchange, type(exchange)) https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
python/ccxt/base/exchange.py
inner
newdime/ccxt
python
@functools.wraps(entry) def inner(_self, params=None): '\n Inner is called when a generated method (publicGetX) is called.\n _self is a reference to self created by function.__get__(exchange, type(exchange))\n https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial\n ' inner_kwargs = dict(outer_kwargs) if (params is not None): inner_kwargs['params'] = params return entry(_self, **inner_kwargs)
def get_core_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text: 'Recursively collects all Core training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found Core training files.\n ' (core_files, _) = get_core_nlu_files(paths) return _copy_files_to_new_dir(core_files)
-2,413,637,754,033,914,000
Recursively collects all Core training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to temporary directory containing all found Core training files.
rasa/data.py
get_core_directory
Amirali-Shirkh/rasa-for-botfront
python
def get_core_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text: 'Recursively collects all Core training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found Core training files.\n ' (core_files, _) = get_core_nlu_files(paths) return _copy_files_to_new_dir(core_files)
def get_nlu_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text: 'Recursively collects all NLU training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found NLU training files.\n ' (_, nlu_files) = get_core_nlu_files(paths) return _copy_files_to_new_dir(nlu_files)
871,682,756,566,041,500
Recursively collects all NLU training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to temporary directory containing all found NLU training files.
rasa/data.py
get_nlu_directory
Amirali-Shirkh/rasa-for-botfront
python
def get_nlu_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text: 'Recursively collects all NLU training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found NLU training files.\n ' (_, nlu_files) = get_core_nlu_files(paths) return _copy_files_to_new_dir(nlu_files)
def get_core_nlu_directories(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(Text, Text)]: 'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to directory containing the Core files and path to directory\n containing the NLU training files.\n ' (story_files, nlu_data_files) = get_core_nlu_files(paths) story_directory = _copy_files_to_new_dir(story_files) nlu_directory = _copy_files_to_new_dir(nlu_data_files) return (story_directory, nlu_directory)
4,776,967,156,037,344,000
Recursively collects all training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to directory containing the Core files and path to directory containing the NLU training files.
rasa/data.py
get_core_nlu_directories
Amirali-Shirkh/rasa-for-botfront
python
def get_core_nlu_directories(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(Text, Text)]: 'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to directory containing the Core files and path to directory\n containing the NLU training files.\n ' (story_files, nlu_data_files) = get_core_nlu_files(paths) story_directory = _copy_files_to_new_dir(story_files) nlu_directory = _copy_files_to_new_dir(nlu_data_files) return (story_directory, nlu_directory)
def get_core_nlu_files(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(List[Text], List[Text])]: 'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Tuple of paths to story and NLU files.\n ' story_files = set() nlu_data_files = set() if (paths is None): paths = [] elif isinstance(paths, str): paths = [paths] for path in set(paths): if (not path): continue if _is_valid_filetype(path): if is_nlu_file(path): nlu_data_files.add(os.path.abspath(path)) elif is_story_file(path): story_files.add(os.path.abspath(path)) else: (new_story_files, new_nlu_data_files) = _find_core_nlu_files_in_directory(path) story_files.update(new_story_files) nlu_data_files.update(new_nlu_data_files) return (sorted(story_files), sorted(nlu_data_files))
3,364,048,093,809,867,300
Recursively collects all training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Tuple of paths to story and NLU files.
rasa/data.py
get_core_nlu_files
Amirali-Shirkh/rasa-for-botfront
python
def get_core_nlu_files(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(List[Text], List[Text])]: 'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Tuple of paths to story and NLU files.\n ' story_files = set() nlu_data_files = set() if (paths is None): paths = [] elif isinstance(paths, str): paths = [paths] for path in set(paths): if (not path): continue if _is_valid_filetype(path): if is_nlu_file(path): nlu_data_files.add(os.path.abspath(path)) elif is_story_file(path): story_files.add(os.path.abspath(path)) else: (new_story_files, new_nlu_data_files) = _find_core_nlu_files_in_directory(path) story_files.update(new_story_files) nlu_data_files.update(new_nlu_data_files) return (sorted(story_files), sorted(nlu_data_files))
def is_nlu_file(file_path: Text) -> bool: "Checks if a file is a Rasa compatible nlu file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a nlu file, otherwise `False`.\n " return (loading.guess_format(file_path) != loading.UNK)
-8,459,099,074,937,874,000
Checks if a file is a Rasa compatible nlu file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a nlu file, otherwise `False`.
rasa/data.py
is_nlu_file
Amirali-Shirkh/rasa-for-botfront
python
def is_nlu_file(file_path: Text) -> bool: "Checks if a file is a Rasa compatible nlu file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a nlu file, otherwise `False`.\n " return (loading.guess_format(file_path) != loading.UNK)
def is_story_file(file_path: Text) -> bool: "Checks if a file is a Rasa story file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a story file, otherwise `False`.\n " if (not file_path.endswith('.md')): return False try: with open(file_path, encoding=DEFAULT_ENCODING, errors='surrogateescape') as lines: return any((_contains_story_pattern(line) for line in lines)) except Exception as e: logger.error(f"Tried to check if '{file_path}' is a story file, but failed to read it. If this file contains story data, you should investigate this error, otherwise it is probably best to move the file to a different location. Error: {e}") return False
-1,701,745,109,258,489,300
Checks if a file is a Rasa story file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a story file, otherwise `False`.
rasa/data.py
is_story_file
Amirali-Shirkh/rasa-for-botfront
python
def is_story_file(file_path: Text) -> bool: "Checks if a file is a Rasa story file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a story file, otherwise `False`.\n " if (not file_path.endswith('.md')): return False try: with open(file_path, encoding=DEFAULT_ENCODING, errors='surrogateescape') as lines: return any((_contains_story_pattern(line) for line in lines)) except Exception as e: logger.error(f"Tried to check if '{file_path}' is a story file, but failed to read it. If this file contains story data, you should investigate this error, otherwise it is probably best to move the file to a different location. Error: {e}") return False
def is_domain_file(file_path: Text) -> bool: "Checks whether the given file path is a Rasa domain file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a domain file, otherwise `False`.\n " file_name = os.path.basename(file_path) return (file_name in ['domain.yml', 'domain.yaml'])
-5,027,564,738,567,654,000
Checks whether the given file path is a Rasa domain file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a domain file, otherwise `False`.
rasa/data.py
is_domain_file
Amirali-Shirkh/rasa-for-botfront
python
def is_domain_file(file_path: Text) -> bool: "Checks whether the given file path is a Rasa domain file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a domain file, otherwise `False`.\n " file_name = os.path.basename(file_path) return (file_name in ['domain.yml', 'domain.yaml'])
def is_config_file(file_path: Text) -> bool: "Checks whether the given file path is a Rasa config file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a Rasa config file, otherwise `False`.\n " file_name = os.path.basename(file_path) return (file_name in ['config.yml', 'config.yaml'])
-499,820,486,625,838,900
Checks whether the given file path is a Rasa config file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a Rasa config file, otherwise `False`.
rasa/data.py
is_config_file
Amirali-Shirkh/rasa-for-botfront
python
def is_config_file(file_path: Text) -> bool: "Checks whether the given file path is a Rasa config file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a Rasa config file, otherwise `False`.\n " file_name = os.path.basename(file_path) return (file_name in ['config.yml', 'config.yaml'])
def login(self, vmanage_ip, username, password): 'Login to vmanage' base_url_str = ('https://%s:8443/' % vmanage_ip) login_action = 'j_security_check' login_data = {'j_username': username, 'j_password': password} login_url = (base_url_str + login_action) url = (base_url_str + login_url) sess = requests.session() login_response = sess.post(url=login_url, data=login_data, verify=False) if (b'<html>' in login_response.content): print('Login Failed') sys.exit(0) self.session[vmanage_ip] = sess
8,070,638,483,843,328,000
Login to vmanage
app/Http/Controllers/Dashboard/Wan_edge_Health.py
login
victornguyen98/luanvan2020
python
def login(self, vmanage_ip, username, password): base_url_str = ('https://%s:8443/' % vmanage_ip) login_action = 'j_security_check' login_data = {'j_username': username, 'j_password': password} login_url = (base_url_str + login_action) url = (base_url_str + login_url) sess = requests.session() login_response = sess.post(url=login_url, data=login_data, verify=False) if (b'<html>' in login_response.content): print('Login Failed') sys.exit(0) self.session[vmanage_ip] = sess
@distributed_trace def list(self, **kwargs: Any) -> AsyncIterable['_models.OperationListResult']: 'Lists all of the available Microsoft.Resources REST API operations.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either OperationListResult or the result of cls(response)\n :rtype:\n ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.OperationListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' api_version = kwargs.pop('api_version', '2019-08-01') cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_operations_list_request(api_version=api_version, template_url=self.list.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_operations_list_request(template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request async def extract_data(pipeline_response): deserialized = self._deserialize('OperationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
-4,370,259,222,565,901,300
Lists all of the available Microsoft.Resources REST API operations. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either OperationListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.OperationListResult] :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
list
AikoBB/azure-sdk-for-python
python
@distributed_trace def list(self, **kwargs: Any) -> AsyncIterable['_models.OperationListResult']: 'Lists all of the available Microsoft.Resources REST API operations.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either OperationListResult or the result of cls(response)\n :rtype:\n ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.OperationListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' api_version = kwargs.pop('api_version', '2019-08-01') cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_operations_list_request(api_version=api_version, template_url=self.list.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_operations_list_request(template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request async def extract_data(pipeline_response): deserialized = self._deserialize('OperationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
@distributed_trace_async async def begin_delete_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]: 'Deletes a deployment from the deployment history.\n\n A template deployment that is currently running cannot be deleted. Deleting a template\n deployment removes the associated deployment operations. This is an asynchronous operation that\n returns a status of 202 until the template deployment is successfully deleted. The Location\n response header contains the URI that is used to obtain the status of the process. While the\n process is running, a call to the URI in the Location header returns a status of 202. When the\n process finishes, the URI in the Location header returns a status of 204 on success. If the\n asynchronous request failed, the URI in the Location header returns an error-level status code.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for\n this operation to not poll, or pass in your own initialized polling object for a personal\n polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' api_version = kwargs.pop('api_version', '2019-08-01') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_at_scope_initial(scope=scope, deployment_name=deployment_name, api_version=api_version, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
4,929,680,912,637,127,000
Deletes a deployment from the deployment history. A template deployment that is currently running cannot be deleted. Deleting a template deployment removes the associated deployment operations. This is an asynchronous operation that returns a status of 202 until the template deployment is successfully deleted. The Location response header contains the URI that is used to obtain the status of the process. While the process is running, a call to the URI in the Location header returns a status of 202. When the process finishes, the URI in the Location header returns a status of 204 on success. If the asynchronous request failed, the URI in the Location header returns an error-level status code. :param scope: The scope of a deployment. :type scope: str :param deployment_name: The name of the deployment. :type deployment_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
begin_delete_at_scope
AikoBB/azure-sdk-for-python
python
@distributed_trace_async async def begin_delete_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]: 'Deletes a deployment from the deployment history.\n\n A template deployment that is currently running cannot be deleted. Deleting a template\n deployment removes the associated deployment operations. This is an asynchronous operation that\n returns a status of 202 until the template deployment is successfully deleted. The Location\n response header contains the URI that is used to obtain the status of the process. While the\n process is running, a call to the URI in the Location header returns a status of 202. When the\n process finishes, the URI in the Location header returns a status of 204 on success. If the\n asynchronous request failed, the URI in the Location header returns an error-level status code.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for\n this operation to not poll, or pass in your own initialized polling object for a personal\n polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' api_version = kwargs.pop('api_version', '2019-08-01') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_at_scope_initial(scope=scope, deployment_name=deployment_name, api_version=api_version, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace_async async def check_existence_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> bool: 'Checks whether the deployment exists.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: bool, or the result of cls(response)\n :rtype: bool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') request = build_deployments_check_existence_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, template_url=self.check_existence_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [204, 404]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) return (200 <= response.status_code <= 299)
-7,118,669,282,943,952,000
Checks whether the deployment exists. :param scope: The scope of a deployment. :type scope: str :param deployment_name: The name of the deployment. :type deployment_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: bool, or the result of cls(response) :rtype: bool :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
check_existence_at_scope
AikoBB/azure-sdk-for-python
python
@distributed_trace_async async def check_existence_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> bool: 'Checks whether the deployment exists.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: bool, or the result of cls(response)\n :rtype: bool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') request = build_deployments_check_existence_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, template_url=self.check_existence_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [204, 404]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) return (200 <= response.status_code <= 299)
@distributed_trace_async async def begin_create_or_update_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> AsyncLROPoller['_models.DeploymentExtended']: 'Deploys resources at a given scope.\n\n You can provide the template and parameters directly in the request or link to JSON files.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :param parameters: Additional parameters supplied to the operation.\n :type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for\n this operation to not poll, or pass in your own initialized polling object for a personal\n polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of\n cls(response)\n :rtype:\n ~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' api_version = kwargs.pop('api_version', '2019-08-01') content_type = kwargs.pop('content_type', 'application/json') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_at_scope_initial(scope=scope, deployment_name=deployment_name, parameters=parameters, api_version=api_version, content_type=content_type, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('DeploymentExtended', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
3,362,381,667,887,774,700
Deploys resources at a given scope. You can provide the template and parameters directly in the request or link to JSON files. :param scope: The scope of a deployment. :type scope: str :param deployment_name: The name of the deployment. :type deployment_name: str :param parameters: Additional parameters supplied to the operation. :type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended] :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
begin_create_or_update_at_scope
AikoBB/azure-sdk-for-python
python
@distributed_trace_async async def begin_create_or_update_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> AsyncLROPoller['_models.DeploymentExtended']: 'Deploys resources at a given scope.\n\n You can provide the template and parameters directly in the request or link to JSON files.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :param parameters: Additional parameters supplied to the operation.\n :type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for\n this operation to not poll, or pass in your own initialized polling object for a personal\n polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of\n cls(response)\n :rtype:\n ~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' api_version = kwargs.pop('api_version', '2019-08-01') content_type = kwargs.pop('content_type', 'application/json') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_at_scope_initial(scope=scope, deployment_name=deployment_name, parameters=parameters, api_version=api_version, content_type=content_type, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('DeploymentExtended', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace_async async def get_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> '_models.DeploymentExtended': 'Gets a deployment.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DeploymentExtended, or the result of cls(response)\n :rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') request = build_deployments_get_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, template_url=self.get_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('DeploymentExtended', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
-2,818,057,004,809,382,000
Gets a deployment. :param scope: The scope of a deployment. :type scope: str :param deployment_name: The name of the deployment. :type deployment_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: DeploymentExtended, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
get_at_scope
AikoBB/azure-sdk-for-python
python
@distributed_trace_async async def get_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> '_models.DeploymentExtended': 'Gets a deployment.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DeploymentExtended, or the result of cls(response)\n :rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') request = build_deployments_get_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, template_url=self.get_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('DeploymentExtended', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace_async async def cancel_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> None: 'Cancels a currently running template deployment.\n\n You can cancel a deployment only if the provisioningState is Accepted or Running. After the\n deployment is canceled, the provisioningState is set to Canceled. Canceling a template\n deployment stops the currently running template deployment and leaves the resources partially\n deployed.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') request = build_deployments_cancel_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, template_url=self.cancel_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [204]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
5,202,299,697,922,900,000
Cancels a currently running template deployment. You can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running template deployment and leaves the resources partially deployed. :param scope: The scope of a deployment. :type scope: str :param deployment_name: The name of the deployment. :type deployment_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
cancel_at_scope
AikoBB/azure-sdk-for-python
python
@distributed_trace_async async def cancel_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> None: 'Cancels a currently running template deployment.\n\n You can cancel a deployment only if the provisioningState is Accepted or Running. After the\n deployment is canceled, the provisioningState is set to Canceled. Canceling a template\n deployment stops the currently running template deployment and leaves the resources partially\n deployed.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') request = build_deployments_cancel_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, template_url=self.cancel_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [204]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@distributed_trace_async async def validate_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> '_models.DeploymentValidateResult': 'Validates whether the specified template is syntactically correct and will be accepted by Azure\n Resource Manager..\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :param parameters: Parameters to validate.\n :type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DeploymentValidateResult, or the result of cls(response)\n :rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') content_type = kwargs.pop('content_type', 'application/json') _json = self._serialize.body(parameters, 'Deployment') request = build_deployments_validate_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, content_type=content_type, json=_json, template_url=self.validate_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200, 400]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if (response.status_code == 200): deserialized = self._deserialize('DeploymentValidateResult', pipeline_response) if (response.status_code == 400): deserialized = self._deserialize('DeploymentValidateResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
-5,833,968,429,805,891,000
Validates whether the specified template is syntactically correct and will be accepted by Azure Resource Manager.. :param scope: The scope of a deployment. :type scope: str :param deployment_name: The name of the deployment. :type deployment_name: str :param parameters: Parameters to validate. :type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment :keyword callable cls: A custom type or function that will be passed the direct response :return: DeploymentValidateResult, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult :raises: ~azure.core.exceptions.HttpResponseError
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
validate_at_scope
AikoBB/azure-sdk-for-python
python
@distributed_trace_async async def validate_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> '_models.DeploymentValidateResult': 'Validates whether the specified template is syntactically correct and will be accepted by Azure\n Resource Manager..\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :param parameters: Parameters to validate.\n :type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DeploymentValidateResult, or the result of cls(response)\n :rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', '2019-08-01') content_type = kwargs.pop('content_type', 'application/json') _json = self._serialize.body(parameters, 'Deployment') request = build_deployments_validate_at_scope_request(scope=scope, deployment_name=deployment_name, api_version=api_version, content_type=content_type, json=_json, template_url=self.validate_at_scope.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200, 400]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if (response.status_code == 200): deserialized = self._deserialize('DeploymentValidateResult', pipeline_response) if (response.status_code == 400): deserialized = self._deserialize('DeploymentValidateResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized