Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
4,800
crypto101/merlyn
merlyn/auth.py
UserMixin.user
def user(self): """The current user. This property is cached in the ``_user`` attribute. """ if self._user is not None: return self._user cert = self.transport.getPeerCertificate() self._user = user = userForCert(self.store, cert) return user
python
def user(self): """The current user. This property is cached in the ``_user`` attribute. """ if self._user is not None: return self._user cert = self.transport.getPeerCertificate() self._user = user = userForCert(self.store, cert) return user
['def', 'user', '(', 'self', ')', ':', 'if', 'self', '.', '_user', 'is', 'not', 'None', ':', 'return', 'self', '.', '_user', 'cert', '=', 'self', '.', 'transport', '.', 'getPeerCertificate', '(', ')', 'self', '.', '_user', '=', 'user', '=', 'userForCert', '(', 'self', '.', 'store', ',', 'cert', ')', 'return', 'user']
The current user. This property is cached in the ``_user`` attribute.
['The', 'current', 'user', '.']
train
https://github.com/crypto101/merlyn/blob/0f313210b9ea5385cc2e5b725dc766df9dc3284d/merlyn/auth.py#L30-L40
4,801
sdispater/poetry
poetry/mixology/version_solver.py
VersionSolver._resolve_conflict
def _resolve_conflict( self, incompatibility ): # type: (Incompatibility) -> Incompatibility """ Given an incompatibility that's satisfied by _solution, The `conflict resolution`_ constructs a new incompatibility that encapsulates the root cause of the conflict and backtracks _solution until the new incompatibility will allow _propagate() to deduce new assignments. Adds the new incompatibility to _incompatibilities and returns it. .. _conflict resolution: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution """ self._log("conflict: {}".format(incompatibility)) new_incompatibility = False while not incompatibility.is_failure(): # The term in incompatibility.terms that was most recently satisfied by # _solution. most_recent_term = None # The earliest assignment in _solution such that incompatibility is # satisfied by _solution up to and including this assignment. most_recent_satisfier = None # The difference between most_recent_satisfier and most_recent_term; # that is, the versions that are allowed by most_recent_satisfier and not # by most_recent_term. This is None if most_recent_satisfier totally # satisfies most_recent_term. difference = None # The decision level of the earliest assignment in _solution *before* # most_recent_satisfier such that incompatibility is satisfied by # _solution up to and including this assignment plus # most_recent_satisfier. # # Decision level 1 is the level where the root package was selected. It's # safe to go back to decision level 0, but stopping at 1 tends to produce # better error messages, because references to the root package end up # closer to the final conclusion that no solution exists. previous_satisfier_level = 1 for term in incompatibility.terms: satisfier = self._solution.satisfier(term) if most_recent_satisfier is None: most_recent_term = term most_recent_satisfier = satisfier elif most_recent_satisfier.index < satisfier.index: previous_satisfier_level = max( previous_satisfier_level, most_recent_satisfier.decision_level ) most_recent_term = term most_recent_satisfier = satisfier difference = None else: previous_satisfier_level = max( previous_satisfier_level, satisfier.decision_level ) if most_recent_term == term: # If most_recent_satisfier doesn't satisfy most_recent_term on its # own, then the next-most-recent satisfier may be the one that # satisfies the remainder. difference = most_recent_satisfier.difference(most_recent_term) if difference is not None: previous_satisfier_level = max( previous_satisfier_level, self._solution.satisfier(difference.inverse).decision_level, ) # If most_recent_identifier is the only satisfier left at its decision # level, or if it has no cause (indicating that it's a decision rather # than a derivation), then incompatibility is the root cause. We then # backjump to previous_satisfier_level, where incompatibility is # guaranteed to allow _propagate to produce more assignments. if ( previous_satisfier_level < most_recent_satisfier.decision_level or most_recent_satisfier.cause is None ): self._solution.backtrack(previous_satisfier_level) if new_incompatibility: self._add_incompatibility(incompatibility) return incompatibility # Create a new incompatibility by combining incompatibility with the # incompatibility that caused most_recent_satisfier to be assigned. Doing # this iteratively constructs an incompatibility that's guaranteed to be # true (that is, we know for sure no solution will satisfy the # incompatibility) while also approximating the intuitive notion of the # "root cause" of the conflict. new_terms = [] for term in incompatibility.terms: if term != most_recent_term: new_terms.append(term) for term in most_recent_satisfier.cause.terms: if term.dependency != most_recent_satisfier.dependency: new_terms.append(term) # The most_recent_satisfier may not satisfy most_recent_term on its own # if there are a collection of constraints on most_recent_term that # only satisfy it together. For example, if most_recent_term is # `foo ^1.0.0` and _solution contains `[foo >=1.0.0, # foo <2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even # though it doesn't totally satisfy `foo ^1.0.0`. # # In this case, we add `not (most_recent_satisfier \ most_recent_term)` to # the incompatibility as well, See the `algorithm documentation`_ for # details. # # .. _algorithm documentation: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution if difference is not None: new_terms.append(difference.inverse) incompatibility = Incompatibility( new_terms, ConflictCause(incompatibility, most_recent_satisfier.cause) ) new_incompatibility = True partially = "" if difference is None else " partially" bang = "!" self._log( "{} {} is{} satisfied by {}".format( bang, most_recent_term, partially, most_recent_satisfier ) ) self._log( '{} which is caused by "{}"'.format(bang, most_recent_satisfier.cause) ) self._log("{} thus: {}".format(bang, incompatibility)) raise SolveFailure(incompatibility)
python
def _resolve_conflict( self, incompatibility ): # type: (Incompatibility) -> Incompatibility """ Given an incompatibility that's satisfied by _solution, The `conflict resolution`_ constructs a new incompatibility that encapsulates the root cause of the conflict and backtracks _solution until the new incompatibility will allow _propagate() to deduce new assignments. Adds the new incompatibility to _incompatibilities and returns it. .. _conflict resolution: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution """ self._log("conflict: {}".format(incompatibility)) new_incompatibility = False while not incompatibility.is_failure(): # The term in incompatibility.terms that was most recently satisfied by # _solution. most_recent_term = None # The earliest assignment in _solution such that incompatibility is # satisfied by _solution up to and including this assignment. most_recent_satisfier = None # The difference between most_recent_satisfier and most_recent_term; # that is, the versions that are allowed by most_recent_satisfier and not # by most_recent_term. This is None if most_recent_satisfier totally # satisfies most_recent_term. difference = None # The decision level of the earliest assignment in _solution *before* # most_recent_satisfier such that incompatibility is satisfied by # _solution up to and including this assignment plus # most_recent_satisfier. # # Decision level 1 is the level where the root package was selected. It's # safe to go back to decision level 0, but stopping at 1 tends to produce # better error messages, because references to the root package end up # closer to the final conclusion that no solution exists. previous_satisfier_level = 1 for term in incompatibility.terms: satisfier = self._solution.satisfier(term) if most_recent_satisfier is None: most_recent_term = term most_recent_satisfier = satisfier elif most_recent_satisfier.index < satisfier.index: previous_satisfier_level = max( previous_satisfier_level, most_recent_satisfier.decision_level ) most_recent_term = term most_recent_satisfier = satisfier difference = None else: previous_satisfier_level = max( previous_satisfier_level, satisfier.decision_level ) if most_recent_term == term: # If most_recent_satisfier doesn't satisfy most_recent_term on its # own, then the next-most-recent satisfier may be the one that # satisfies the remainder. difference = most_recent_satisfier.difference(most_recent_term) if difference is not None: previous_satisfier_level = max( previous_satisfier_level, self._solution.satisfier(difference.inverse).decision_level, ) # If most_recent_identifier is the only satisfier left at its decision # level, or if it has no cause (indicating that it's a decision rather # than a derivation), then incompatibility is the root cause. We then # backjump to previous_satisfier_level, where incompatibility is # guaranteed to allow _propagate to produce more assignments. if ( previous_satisfier_level < most_recent_satisfier.decision_level or most_recent_satisfier.cause is None ): self._solution.backtrack(previous_satisfier_level) if new_incompatibility: self._add_incompatibility(incompatibility) return incompatibility # Create a new incompatibility by combining incompatibility with the # incompatibility that caused most_recent_satisfier to be assigned. Doing # this iteratively constructs an incompatibility that's guaranteed to be # true (that is, we know for sure no solution will satisfy the # incompatibility) while also approximating the intuitive notion of the # "root cause" of the conflict. new_terms = [] for term in incompatibility.terms: if term != most_recent_term: new_terms.append(term) for term in most_recent_satisfier.cause.terms: if term.dependency != most_recent_satisfier.dependency: new_terms.append(term) # The most_recent_satisfier may not satisfy most_recent_term on its own # if there are a collection of constraints on most_recent_term that # only satisfy it together. For example, if most_recent_term is # `foo ^1.0.0` and _solution contains `[foo >=1.0.0, # foo <2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even # though it doesn't totally satisfy `foo ^1.0.0`. # # In this case, we add `not (most_recent_satisfier \ most_recent_term)` to # the incompatibility as well, See the `algorithm documentation`_ for # details. # # .. _algorithm documentation: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution if difference is not None: new_terms.append(difference.inverse) incompatibility = Incompatibility( new_terms, ConflictCause(incompatibility, most_recent_satisfier.cause) ) new_incompatibility = True partially = "" if difference is None else " partially" bang = "!" self._log( "{} {} is{} satisfied by {}".format( bang, most_recent_term, partially, most_recent_satisfier ) ) self._log( '{} which is caused by "{}"'.format(bang, most_recent_satisfier.cause) ) self._log("{} thus: {}".format(bang, incompatibility)) raise SolveFailure(incompatibility)
['def', '_resolve_conflict', '(', 'self', ',', 'incompatibility', ')', ':', '# type: (Incompatibility) -> Incompatibility', 'self', '.', '_log', '(', '"conflict: {}"', '.', 'format', '(', 'incompatibility', ')', ')', 'new_incompatibility', '=', 'False', 'while', 'not', 'incompatibility', '.', 'is_failure', '(', ')', ':', '# The term in incompatibility.terms that was most recently satisfied by', '# _solution.', 'most_recent_term', '=', 'None', '# The earliest assignment in _solution such that incompatibility is', '# satisfied by _solution up to and including this assignment.', 'most_recent_satisfier', '=', 'None', '# The difference between most_recent_satisfier and most_recent_term;', '# that is, the versions that are allowed by most_recent_satisfier and not', '# by most_recent_term. This is None if most_recent_satisfier totally', '# satisfies most_recent_term.', 'difference', '=', 'None', '# The decision level of the earliest assignment in _solution *before*', '# most_recent_satisfier such that incompatibility is satisfied by', '# _solution up to and including this assignment plus', '# most_recent_satisfier.', '#', "# Decision level 1 is the level where the root package was selected. It's", '# safe to go back to decision level 0, but stopping at 1 tends to produce', '# better error messages, because references to the root package end up', '# closer to the final conclusion that no solution exists.', 'previous_satisfier_level', '=', '1', 'for', 'term', 'in', 'incompatibility', '.', 'terms', ':', 'satisfier', '=', 'self', '.', '_solution', '.', 'satisfier', '(', 'term', ')', 'if', 'most_recent_satisfier', 'is', 'None', ':', 'most_recent_term', '=', 'term', 'most_recent_satisfier', '=', 'satisfier', 'elif', 'most_recent_satisfier', '.', 'index', '<', 'satisfier', '.', 'index', ':', 'previous_satisfier_level', '=', 'max', '(', 'previous_satisfier_level', ',', 'most_recent_satisfier', '.', 'decision_level', ')', 'most_recent_term', '=', 'term', 'most_recent_satisfier', '=', 'satisfier', 'difference', '=', 'None', 'else', ':', 'previous_satisfier_level', '=', 'max', '(', 'previous_satisfier_level', ',', 'satisfier', '.', 'decision_level', ')', 'if', 'most_recent_term', '==', 'term', ':', "# If most_recent_satisfier doesn't satisfy most_recent_term on its", '# own, then the next-most-recent satisfier may be the one that', '# satisfies the remainder.', 'difference', '=', 'most_recent_satisfier', '.', 'difference', '(', 'most_recent_term', ')', 'if', 'difference', 'is', 'not', 'None', ':', 'previous_satisfier_level', '=', 'max', '(', 'previous_satisfier_level', ',', 'self', '.', '_solution', '.', 'satisfier', '(', 'difference', '.', 'inverse', ')', '.', 'decision_level', ',', ')', '# If most_recent_identifier is the only satisfier left at its decision', "# level, or if it has no cause (indicating that it's a decision rather", '# than a derivation), then incompatibility is the root cause. We then', '# backjump to previous_satisfier_level, where incompatibility is', '# guaranteed to allow _propagate to produce more assignments.', 'if', '(', 'previous_satisfier_level', '<', 'most_recent_satisfier', '.', 'decision_level', 'or', 'most_recent_satisfier', '.', 'cause', 'is', 'None', ')', ':', 'self', '.', '_solution', '.', 'backtrack', '(', 'previous_satisfier_level', ')', 'if', 'new_incompatibility', ':', 'self', '.', '_add_incompatibility', '(', 'incompatibility', ')', 'return', 'incompatibility', '# Create a new incompatibility by combining incompatibility with the', '# incompatibility that caused most_recent_satisfier to be assigned. Doing', "# this iteratively constructs an incompatibility that's guaranteed to be", '# true (that is, we know for sure no solution will satisfy the', '# incompatibility) while also approximating the intuitive notion of the', '# "root cause" of the conflict.', 'new_terms', '=', '[', ']', 'for', 'term', 'in', 'incompatibility', '.', 'terms', ':', 'if', 'term', '!=', 'most_recent_term', ':', 'new_terms', '.', 'append', '(', 'term', ')', 'for', 'term', 'in', 'most_recent_satisfier', '.', 'cause', '.', 'terms', ':', 'if', 'term', '.', 'dependency', '!=', 'most_recent_satisfier', '.', 'dependency', ':', 'new_terms', '.', 'append', '(', 'term', ')', '# The most_recent_satisfier may not satisfy most_recent_term on its own', '# if there are a collection of constraints on most_recent_term that', '# only satisfy it together. For example, if most_recent_term is', '# `foo\xa0^1.0.0` and _solution contains `[foo >=1.0.0,', '# foo\xa0<2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even', "# though it doesn't totally satisfy `foo ^1.0.0`.", '#', '# In this case, we add `not (most_recent_satisfier \\ most_recent_term)` to', '# the incompatibility as well, See the `algorithm documentation`_ for', '# details.', '#', '# .. _algorithm documentation: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution', 'if', 'difference', 'is', 'not', 'None', ':', 'new_terms', '.', 'append', '(', 'difference', '.', 'inverse', ')', 'incompatibility', '=', 'Incompatibility', '(', 'new_terms', ',', 'ConflictCause', '(', 'incompatibility', ',', 'most_recent_satisfier', '.', 'cause', ')', ')', 'new_incompatibility', '=', 'True', 'partially', '=', '""', 'if', 'difference', 'is', 'None', 'else', '" partially"', 'bang', '=', '"!"', 'self', '.', '_log', '(', '"{} {} is{} satisfied by {}"', '.', 'format', '(', 'bang', ',', 'most_recent_term', ',', 'partially', ',', 'most_recent_satisfier', ')', ')', 'self', '.', '_log', '(', '\'{} which is caused by "{}"\'', '.', 'format', '(', 'bang', ',', 'most_recent_satisfier', '.', 'cause', ')', ')', 'self', '.', '_log', '(', '"{} thus: {}"', '.', 'format', '(', 'bang', ',', 'incompatibility', ')', ')', 'raise', 'SolveFailure', '(', 'incompatibility', ')']
Given an incompatibility that's satisfied by _solution, The `conflict resolution`_ constructs a new incompatibility that encapsulates the root cause of the conflict and backtracks _solution until the new incompatibility will allow _propagate() to deduce new assignments. Adds the new incompatibility to _incompatibilities and returns it. .. _conflict resolution: https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
['Given', 'an', 'incompatibility', 'that', 's', 'satisfied', 'by', '_solution', 'The', 'conflict', 'resolution', '_', 'constructs', 'a', 'new', 'incompatibility', 'that', 'encapsulates', 'the', 'root', 'cause', 'of', 'the', 'conflict', 'and', 'backtracks', '_solution', 'until', 'the', 'new', 'incompatibility', 'will', 'allow', '_propagate', '()', 'to', 'deduce', 'new', 'assignments', '.']
train
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/mixology/version_solver.py#L183-L316
4,802
storax/upme
src/upme/main.py
is_outdated
def is_outdated(dist, dep=False): """Return a dict with outdated distributions If the given distribution has dependencies, they are checked as well. :param dist: a distribution to check :type dist: :class:`pkg_resources.Distribution` | str :param dep: If True, also return all outdated dependencies. If False, only check given dist. :type dep: :returns: dictionary of all distributions that are outdated and are either dependencies of the given distribution or the distribution itself. Keys are the outdated distributions and values are the newest parsed versions. :rtype: dict of :class:`pkg_resources.Distribution` :raises: class:`pkg_resources.DistributionNotFound` """ if dep: required = get_required(dist) else: required = set([dist]) ListCommand = pip.commands['list'] lc = ListCommand() options, args = lc.parse_args(['--outdated']) outdated = {} for d, raw_ver, parsed_ver in lc.find_packages_latests_versions(options): for r in required: if d.project_name == r.project_name and parsed_ver > r.parsed_version: outdated[r] = parsed_ver return outdated
python
def is_outdated(dist, dep=False): """Return a dict with outdated distributions If the given distribution has dependencies, they are checked as well. :param dist: a distribution to check :type dist: :class:`pkg_resources.Distribution` | str :param dep: If True, also return all outdated dependencies. If False, only check given dist. :type dep: :returns: dictionary of all distributions that are outdated and are either dependencies of the given distribution or the distribution itself. Keys are the outdated distributions and values are the newest parsed versions. :rtype: dict of :class:`pkg_resources.Distribution` :raises: class:`pkg_resources.DistributionNotFound` """ if dep: required = get_required(dist) else: required = set([dist]) ListCommand = pip.commands['list'] lc = ListCommand() options, args = lc.parse_args(['--outdated']) outdated = {} for d, raw_ver, parsed_ver in lc.find_packages_latests_versions(options): for r in required: if d.project_name == r.project_name and parsed_ver > r.parsed_version: outdated[r] = parsed_ver return outdated
['def', 'is_outdated', '(', 'dist', ',', 'dep', '=', 'False', ')', ':', 'if', 'dep', ':', 'required', '=', 'get_required', '(', 'dist', ')', 'else', ':', 'required', '=', 'set', '(', '[', 'dist', ']', ')', 'ListCommand', '=', 'pip', '.', 'commands', '[', "'list'", ']', 'lc', '=', 'ListCommand', '(', ')', 'options', ',', 'args', '=', 'lc', '.', 'parse_args', '(', '[', "'--outdated'", ']', ')', 'outdated', '=', '{', '}', 'for', 'd', ',', 'raw_ver', ',', 'parsed_ver', 'in', 'lc', '.', 'find_packages_latests_versions', '(', 'options', ')', ':', 'for', 'r', 'in', 'required', ':', 'if', 'd', '.', 'project_name', '==', 'r', '.', 'project_name', 'and', 'parsed_ver', '>', 'r', '.', 'parsed_version', ':', 'outdated', '[', 'r', ']', '=', 'parsed_ver', 'return', 'outdated']
Return a dict with outdated distributions If the given distribution has dependencies, they are checked as well. :param dist: a distribution to check :type dist: :class:`pkg_resources.Distribution` | str :param dep: If True, also return all outdated dependencies. If False, only check given dist. :type dep: :returns: dictionary of all distributions that are outdated and are either dependencies of the given distribution or the distribution itself. Keys are the outdated distributions and values are the newest parsed versions. :rtype: dict of :class:`pkg_resources.Distribution` :raises: class:`pkg_resources.DistributionNotFound`
['Return', 'a', 'dict', 'with', 'outdated', 'distributions']
train
https://github.com/storax/upme/blob/41c2d91f922691e31ff940f33b755d2cb64dfef8/src/upme/main.py#L32-L60
4,803
MacHu-GWU/crawlib-project
crawlib/pipeline/rds/query_builder.py
unfinished
def unfinished(finished_status, update_interval, table, status_column, edit_at_column): """ Create text sql statement query for sqlalchemy that getting all unfinished task. :param finished_status: int, status code that less than this will be considered as unfinished. :param update_interval: int, the record will be updated every x seconds. :return: sqlalchemy text sql statement. **中文文档** 状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值. """ sql = select([table]).where( or_(*[ status_column < finished_status, edit_at_column < x_seconds_before_now(update_interval) ]) ) return sql
python
def unfinished(finished_status, update_interval, table, status_column, edit_at_column): """ Create text sql statement query for sqlalchemy that getting all unfinished task. :param finished_status: int, status code that less than this will be considered as unfinished. :param update_interval: int, the record will be updated every x seconds. :return: sqlalchemy text sql statement. **中文文档** 状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值. """ sql = select([table]).where( or_(*[ status_column < finished_status, edit_at_column < x_seconds_before_now(update_interval) ]) ) return sql
['def', 'unfinished', '(', 'finished_status', ',', 'update_interval', ',', 'table', ',', 'status_column', ',', 'edit_at_column', ')', ':', 'sql', '=', 'select', '(', '[', 'table', ']', ')', '.', 'where', '(', 'or_', '(', '*', '[', 'status_column', '<', 'finished_status', ',', 'edit_at_column', '<', 'x_seconds_before_now', '(', 'update_interval', ')', ']', ')', ')', 'return', 'sql']
Create text sql statement query for sqlalchemy that getting all unfinished task. :param finished_status: int, status code that less than this will be considered as unfinished. :param update_interval: int, the record will be updated every x seconds. :return: sqlalchemy text sql statement. **中文文档** 状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值.
['Create', 'text', 'sql', 'statement', 'query', 'for', 'sqlalchemy', 'that', 'getting', 'all', 'unfinished', 'task', '.']
train
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/pipeline/rds/query_builder.py#L51-L76
4,804
thoughtworksarts/EmoPy
EmoPy/src/fermodel.py
FERModel._choose_model_from_target_emotions
def _choose_model_from_target_emotions(self): """ Initializes pre-trained deep learning model for the set of target emotions supplied by user. """ model_indices = [self.emotion_index_map[emotion] for emotion in self.target_emotions] sorted_indices = [str(idx) for idx in sorted(model_indices)] model_suffix = ''.join(sorted_indices) #Modify the path to choose the model file and the emotion map that you want to use model_file = 'models/conv_model_%s.hdf5' % model_suffix emotion_map_file = 'models/conv_emotion_map_%s.json' % model_suffix emotion_map = json.loads(open(resource_filename('EmoPy', emotion_map_file)).read()) return load_model(resource_filename('EmoPy', model_file)), emotion_map
python
def _choose_model_from_target_emotions(self): """ Initializes pre-trained deep learning model for the set of target emotions supplied by user. """ model_indices = [self.emotion_index_map[emotion] for emotion in self.target_emotions] sorted_indices = [str(idx) for idx in sorted(model_indices)] model_suffix = ''.join(sorted_indices) #Modify the path to choose the model file and the emotion map that you want to use model_file = 'models/conv_model_%s.hdf5' % model_suffix emotion_map_file = 'models/conv_emotion_map_%s.json' % model_suffix emotion_map = json.loads(open(resource_filename('EmoPy', emotion_map_file)).read()) return load_model(resource_filename('EmoPy', model_file)), emotion_map
['def', '_choose_model_from_target_emotions', '(', 'self', ')', ':', 'model_indices', '=', '[', 'self', '.', 'emotion_index_map', '[', 'emotion', ']', 'for', 'emotion', 'in', 'self', '.', 'target_emotions', ']', 'sorted_indices', '=', '[', 'str', '(', 'idx', ')', 'for', 'idx', 'in', 'sorted', '(', 'model_indices', ')', ']', 'model_suffix', '=', "''", '.', 'join', '(', 'sorted_indices', ')', '#Modify the path to choose the model file and the emotion map that you want to use', 'model_file', '=', "'models/conv_model_%s.hdf5'", '%', 'model_suffix', 'emotion_map_file', '=', "'models/conv_emotion_map_%s.json'", '%', 'model_suffix', 'emotion_map', '=', 'json', '.', 'loads', '(', 'open', '(', 'resource_filename', '(', "'EmoPy'", ',', 'emotion_map_file', ')', ')', '.', 'read', '(', ')', ')', 'return', 'load_model', '(', 'resource_filename', '(', "'EmoPy'", ',', 'model_file', ')', ')', ',', 'emotion_map']
Initializes pre-trained deep learning model for the set of target emotions supplied by user.
['Initializes', 'pre', '-', 'trained', 'deep', 'learning', 'model', 'for', 'the', 'set', 'of', 'target', 'emotions', 'supplied', 'by', 'user', '.']
train
https://github.com/thoughtworksarts/EmoPy/blob/a0ab97b3719ebe0a9de9bfc5adae5e46c9b77fd7/EmoPy/src/fermodel.py#L89-L100
4,805
evhub/coconut
coconut/compiler/compiler.py
Compiler.await_item_handle
def await_item_handle(self, original, loc, tokens): """Check for Python 3.5 await expression.""" internal_assert(len(tokens) == 1, "invalid await statement tokens", tokens) if not self.target: self.make_err( CoconutTargetError, "await requires a specific target", original, loc, target="sys", ) elif self.target_info >= (3, 5): return "await " + tokens[0] elif self.target_info >= (3, 3): return "(yield from " + tokens[0] + ")" else: return "(yield _coconut.asyncio.From(" + tokens[0] + "))"
python
def await_item_handle(self, original, loc, tokens): """Check for Python 3.5 await expression.""" internal_assert(len(tokens) == 1, "invalid await statement tokens", tokens) if not self.target: self.make_err( CoconutTargetError, "await requires a specific target", original, loc, target="sys", ) elif self.target_info >= (3, 5): return "await " + tokens[0] elif self.target_info >= (3, 3): return "(yield from " + tokens[0] + ")" else: return "(yield _coconut.asyncio.From(" + tokens[0] + "))"
['def', 'await_item_handle', '(', 'self', ',', 'original', ',', 'loc', ',', 'tokens', ')', ':', 'internal_assert', '(', 'len', '(', 'tokens', ')', '==', '1', ',', '"invalid await statement tokens"', ',', 'tokens', ')', 'if', 'not', 'self', '.', 'target', ':', 'self', '.', 'make_err', '(', 'CoconutTargetError', ',', '"await requires a specific target"', ',', 'original', ',', 'loc', ',', 'target', '=', '"sys"', ',', ')', 'elif', 'self', '.', 'target_info', '>=', '(', '3', ',', '5', ')', ':', 'return', '"await "', '+', 'tokens', '[', '0', ']', 'elif', 'self', '.', 'target_info', '>=', '(', '3', ',', '3', ')', ':', 'return', '"(yield from "', '+', 'tokens', '[', '0', ']', '+', '")"', 'else', ':', 'return', '"(yield _coconut.asyncio.From("', '+', 'tokens', '[', '0', ']', '+', '"))"']
Check for Python 3.5 await expression.
['Check', 'for', 'Python', '3', '.', '5', 'await', 'expression', '.']
train
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1771-L1786
4,806
openstax/cnx-archive
cnxarchive/__init__.py
main
def main(global_config, **settings): """Main WSGI application factory.""" initialize_sentry_integration() config = Configurator(settings=settings) declare_api_routes(config) declare_type_info(config) # allowing the pyramid templates to render rss and xml config.include('pyramid_jinja2') config.add_jinja2_renderer('.rss') config.add_jinja2_renderer('.xml') mandatory_settings = ['exports-directories', 'exports-allowable-types'] for setting in mandatory_settings: if not settings.get(setting, None): raise ValueError('Missing {} config setting.'.format(setting)) config.scan(ignore='.tests') config.include('cnxarchive.events.main') config.add_tween('cnxarchive.tweens.conditional_http_tween_factory') return config.make_wsgi_app()
python
def main(global_config, **settings): """Main WSGI application factory.""" initialize_sentry_integration() config = Configurator(settings=settings) declare_api_routes(config) declare_type_info(config) # allowing the pyramid templates to render rss and xml config.include('pyramid_jinja2') config.add_jinja2_renderer('.rss') config.add_jinja2_renderer('.xml') mandatory_settings = ['exports-directories', 'exports-allowable-types'] for setting in mandatory_settings: if not settings.get(setting, None): raise ValueError('Missing {} config setting.'.format(setting)) config.scan(ignore='.tests') config.include('cnxarchive.events.main') config.add_tween('cnxarchive.tweens.conditional_http_tween_factory') return config.make_wsgi_app()
['def', 'main', '(', 'global_config', ',', '*', '*', 'settings', ')', ':', 'initialize_sentry_integration', '(', ')', 'config', '=', 'Configurator', '(', 'settings', '=', 'settings', ')', 'declare_api_routes', '(', 'config', ')', 'declare_type_info', '(', 'config', ')', '# allowing the pyramid templates to render rss and xml', 'config', '.', 'include', '(', "'pyramid_jinja2'", ')', 'config', '.', 'add_jinja2_renderer', '(', "'.rss'", ')', 'config', '.', 'add_jinja2_renderer', '(', "'.xml'", ')', 'mandatory_settings', '=', '[', "'exports-directories'", ',', "'exports-allowable-types'", ']', 'for', 'setting', 'in', 'mandatory_settings', ':', 'if', 'not', 'settings', '.', 'get', '(', 'setting', ',', 'None', ')', ':', 'raise', 'ValueError', '(', "'Missing {} config setting.'", '.', 'format', '(', 'setting', ')', ')', 'config', '.', 'scan', '(', 'ignore', '=', "'.tests'", ')', 'config', '.', 'include', '(', "'cnxarchive.events.main'", ')', 'config', '.', 'add_tween', '(', "'cnxarchive.tweens.conditional_http_tween_factory'", ')', 'return', 'config', '.', 'make_wsgi_app', '(', ')']
Main WSGI application factory.
['Main', 'WSGI', 'application', 'factory', '.']
train
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L137-L159
4,807
pylover/khayyam
khayyam/jalali_date.py
JalaliDate.strptime
def strptime(cls, date_string, fmt): """ This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`, and used to parse date strings into date object. `ValueError` is raised if the date_string and format can’t be parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a complete list of formatting directives, see :doc:`/directives`. :param date_string: :param fmt: :return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format :rtype: :py:class:`khayyam.JalaiDate` """ # noinspection PyUnresolvedReferences result = cls.formatterfactory(fmt).parse(date_string) result = {k: v for k, v in result.items() if k in ('year', 'month', 'day')} return cls(**result)
python
def strptime(cls, date_string, fmt): """ This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`, and used to parse date strings into date object. `ValueError` is raised if the date_string and format can’t be parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a complete list of formatting directives, see :doc:`/directives`. :param date_string: :param fmt: :return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format :rtype: :py:class:`khayyam.JalaiDate` """ # noinspection PyUnresolvedReferences result = cls.formatterfactory(fmt).parse(date_string) result = {k: v for k, v in result.items() if k in ('year', 'month', 'day')} return cls(**result)
['def', 'strptime', '(', 'cls', ',', 'date_string', ',', 'fmt', ')', ':', '# noinspection PyUnresolvedReferences', 'result', '=', 'cls', '.', 'formatterfactory', '(', 'fmt', ')', '.', 'parse', '(', 'date_string', ')', 'result', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'result', '.', 'items', '(', ')', 'if', 'k', 'in', '(', "'year'", ',', "'month'", ',', "'day'", ')', '}', 'return', 'cls', '(', '*', '*', 'result', ')']
This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`, and used to parse date strings into date object. `ValueError` is raised if the date_string and format can’t be parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a complete list of formatting directives, see :doc:`/directives`. :param date_string: :param fmt: :return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format :rtype: :py:class:`khayyam.JalaiDate`
['This', 'is', 'opposite', 'of', 'the', ':', 'py', ':', 'meth', ':', 'khayyam', '.', 'JalaliDate', '.', 'strftime', 'and', 'used', 'to', 'parse', 'date', 'strings', 'into', 'date', 'object', '.']
train
https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_date.py#L155-L173
4,808
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
demix1
def demix1(servo1, servo2, gain=0.5): '''de-mix a mixed servo output''' s1 = servo1 - 1500 s2 = servo2 - 1500 out1 = (s1+s2)*gain out2 = (s1-s2)*gain return out1+1500
python
def demix1(servo1, servo2, gain=0.5): '''de-mix a mixed servo output''' s1 = servo1 - 1500 s2 = servo2 - 1500 out1 = (s1+s2)*gain out2 = (s1-s2)*gain return out1+1500
['def', 'demix1', '(', 'servo1', ',', 'servo2', ',', 'gain', '=', '0.5', ')', ':', 's1', '=', 'servo1', '-', '1500', 's2', '=', 'servo2', '-', '1500', 'out1', '=', '(', 's1', '+', 's2', ')', '*', 'gain', 'out2', '=', '(', 's1', '-', 's2', ')', '*', 'gain', 'return', 'out1', '+', '1500']
de-mix a mixed servo output
['de', '-', 'mix', 'a', 'mixed', 'servo', 'output']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L708-L714
4,809
JasonKessler/scattertext
scattertext/indexstore/IndexStoreFromDict.py
IndexStoreFromDict.build
def build(term_to_index_dict): ''' Parameters ---------- term_to_index_dict: term -> idx dictionary Returns ------- IndexStore ''' idxstore = IndexStore() idxstore._val2i = term_to_index_dict idxstore._next_i = len(term_to_index_dict) idxstore._i2val = [None for _ in range(idxstore._next_i)] for term, idx in idxstore._val2i.items(): idxstore._i2val[idx] = term return idxstore
python
def build(term_to_index_dict): ''' Parameters ---------- term_to_index_dict: term -> idx dictionary Returns ------- IndexStore ''' idxstore = IndexStore() idxstore._val2i = term_to_index_dict idxstore._next_i = len(term_to_index_dict) idxstore._i2val = [None for _ in range(idxstore._next_i)] for term, idx in idxstore._val2i.items(): idxstore._i2val[idx] = term return idxstore
['def', 'build', '(', 'term_to_index_dict', ')', ':', 'idxstore', '=', 'IndexStore', '(', ')', 'idxstore', '.', '_val2i', '=', 'term_to_index_dict', 'idxstore', '.', '_next_i', '=', 'len', '(', 'term_to_index_dict', ')', 'idxstore', '.', '_i2val', '=', '[', 'None', 'for', '_', 'in', 'range', '(', 'idxstore', '.', '_next_i', ')', ']', 'for', 'term', ',', 'idx', 'in', 'idxstore', '.', '_val2i', '.', 'items', '(', ')', ':', 'idxstore', '.', '_i2val', '[', 'idx', ']', '=', 'term', 'return', 'idxstore']
Parameters ---------- term_to_index_dict: term -> idx dictionary Returns ------- IndexStore
['Parameters', '----------', 'term_to_index_dict', ':', 'term', '-', '>', 'idx', 'dictionary']
train
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/indexstore/IndexStoreFromDict.py#L6-L22
4,810
rosenbrockc/fortpy
fortpy/elements.py
Executable.rt_update
def rt_update(self, statement, linenum, mode, xparser): """Uses the specified line parser to parse the given line. :arg statement: a string of lines that are part of a single statement. :arg linenum: the line number of the first line in the list relative to the entire module contents. arg mode: either 'insert', 'replace' or 'delete' :arg xparser: an instance of the executable parser from the real time update module's line parser. """ section = self.find_section(self.module.charindex(linenum, 1)) if section == "body": xparser.parse_line(statement, self, mode) elif section == "signature": if mode == "insert": xparser.parse_signature(statement, self)
python
def rt_update(self, statement, linenum, mode, xparser): """Uses the specified line parser to parse the given line. :arg statement: a string of lines that are part of a single statement. :arg linenum: the line number of the first line in the list relative to the entire module contents. arg mode: either 'insert', 'replace' or 'delete' :arg xparser: an instance of the executable parser from the real time update module's line parser. """ section = self.find_section(self.module.charindex(linenum, 1)) if section == "body": xparser.parse_line(statement, self, mode) elif section == "signature": if mode == "insert": xparser.parse_signature(statement, self)
['def', 'rt_update', '(', 'self', ',', 'statement', ',', 'linenum', ',', 'mode', ',', 'xparser', ')', ':', 'section', '=', 'self', '.', 'find_section', '(', 'self', '.', 'module', '.', 'charindex', '(', 'linenum', ',', '1', ')', ')', 'if', 'section', '==', '"body"', ':', 'xparser', '.', 'parse_line', '(', 'statement', ',', 'self', ',', 'mode', ')', 'elif', 'section', '==', '"signature"', ':', 'if', 'mode', '==', '"insert"', ':', 'xparser', '.', 'parse_signature', '(', 'statement', ',', 'self', ')']
Uses the specified line parser to parse the given line. :arg statement: a string of lines that are part of a single statement. :arg linenum: the line number of the first line in the list relative to the entire module contents. arg mode: either 'insert', 'replace' or 'delete' :arg xparser: an instance of the executable parser from the real time update module's line parser.
['Uses', 'the', 'specified', 'line', 'parser', 'to', 'parse', 'the', 'given', 'line', '.']
train
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L741-L757
4,811
DinoTools/python-overpy
overpy/__init__.py
OSMSAXHandler._handle_start_relation
def _handle_start_relation(self, attrs): """ Handle opening relation element :param attrs: Attributes of the element :type attrs: Dict """ self._curr = { 'attributes': dict(attrs), 'members': [], 'rel_id': None, 'tags': {} } if attrs.get('id', None) is not None: self._curr['rel_id'] = int(attrs['id']) del self._curr['attributes']['id']
python
def _handle_start_relation(self, attrs): """ Handle opening relation element :param attrs: Attributes of the element :type attrs: Dict """ self._curr = { 'attributes': dict(attrs), 'members': [], 'rel_id': None, 'tags': {} } if attrs.get('id', None) is not None: self._curr['rel_id'] = int(attrs['id']) del self._curr['attributes']['id']
['def', '_handle_start_relation', '(', 'self', ',', 'attrs', ')', ':', 'self', '.', '_curr', '=', '{', "'attributes'", ':', 'dict', '(', 'attrs', ')', ',', "'members'", ':', '[', ']', ',', "'rel_id'", ':', 'None', ',', "'tags'", ':', '{', '}', '}', 'if', 'attrs', '.', 'get', '(', "'id'", ',', 'None', ')', 'is', 'not', 'None', ':', 'self', '.', '_curr', '[', "'rel_id'", ']', '=', 'int', '(', 'attrs', '[', "'id'", ']', ')', 'del', 'self', '.', '_curr', '[', "'attributes'", ']', '[', "'id'", ']']
Handle opening relation element :param attrs: Attributes of the element :type attrs: Dict
['Handle', 'opening', 'relation', 'element']
train
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1567-L1582
4,812
myint/unify
unify.py
open_with_encoding
def open_with_encoding(filename, encoding, mode='r'): """Return opened file with a specific encoding.""" return io.open(filename, mode=mode, encoding=encoding, newline='')
python
def open_with_encoding(filename, encoding, mode='r'): """Return opened file with a specific encoding.""" return io.open(filename, mode=mode, encoding=encoding, newline='')
['def', 'open_with_encoding', '(', 'filename', ',', 'encoding', ',', 'mode', '=', "'r'", ')', ':', 'return', 'io', '.', 'open', '(', 'filename', ',', 'mode', '=', 'mode', ',', 'encoding', '=', 'encoding', ',', 'newline', '=', "''", ')']
Return opened file with a specific encoding.
['Return', 'opened', 'file', 'with', 'a', 'specific', 'encoding', '.']
train
https://github.com/myint/unify/blob/ae699f5980a715cadc4a2f07bf16d11083c59401/unify.py#L113-L116
4,813
bitesofcode/projex
projex/text.py
singularize
def singularize(word): """ Converts the inputted word to the single form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.singular_noun. If you do not have that module, then a simpler and less impressive singularization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word <str> :return <str> """ word = toUtf8(word) if inflect_engine: result = inflect_engine.singular_noun(word) if result is False: return word return result # go through the different plural expressions, searching for the # proper replacement if word.endswith('ies'): return word[:-3] + 'y' elif word.endswith('IES'): return word[:-3] + 'Y' elif word.endswith('s') or word.endswith('S'): return word[:-1] return word
python
def singularize(word): """ Converts the inputted word to the single form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.singular_noun. If you do not have that module, then a simpler and less impressive singularization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word <str> :return <str> """ word = toUtf8(word) if inflect_engine: result = inflect_engine.singular_noun(word) if result is False: return word return result # go through the different plural expressions, searching for the # proper replacement if word.endswith('ies'): return word[:-3] + 'y' elif word.endswith('IES'): return word[:-3] + 'Y' elif word.endswith('s') or word.endswith('S'): return word[:-1] return word
['def', 'singularize', '(', 'word', ')', ':', 'word', '=', 'toUtf8', '(', 'word', ')', 'if', 'inflect_engine', ':', 'result', '=', 'inflect_engine', '.', 'singular_noun', '(', 'word', ')', 'if', 'result', 'is', 'False', ':', 'return', 'word', 'return', 'result', '# go through the different plural expressions, searching for the', '# proper replacement', 'if', 'word', '.', 'endswith', '(', "'ies'", ')', ':', 'return', 'word', '[', ':', '-', '3', ']', '+', "'y'", 'elif', 'word', '.', 'endswith', '(', "'IES'", ')', ':', 'return', 'word', '[', ':', '-', '3', ']', '+', "'Y'", 'elif', 'word', '.', 'endswith', '(', "'s'", ')', 'or', 'word', '.', 'endswith', '(', "'S'", ')', ':', 'return', 'word', '[', ':', '-', '1', ']', 'return', 'word']
Converts the inputted word to the single form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.singular_noun. If you do not have that module, then a simpler and less impressive singularization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word <str> :return <str>
['Converts', 'the', 'inputted', 'word', 'to', 'the', 'single', 'form', 'of', 'it', '.', 'This', 'method', 'works', 'best', 'if', 'you', 'use', 'the', 'inflect', 'module', 'as', 'it', 'will', 'just', 'pass', 'along', 'the', 'request', 'to', 'inflect', '.', 'singular_noun', '.', 'If', 'you', 'do', 'not', 'have', 'that', 'module', 'then', 'a', 'simpler', 'and', 'less', 'impressive', 'singularization', 'technique', 'will', 'be', 'used', '.', ':', 'sa', 'https', ':', '//', 'pypi', '.', 'python', '.', 'org', '/', 'pypi', '/', 'inflect', ':', 'param', 'word', '<str', '>', ':', 'return', '<str', '>']
train
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/text.py#L629-L658
4,814
saltstack/salt
salt/modules/bigip.py
modify_node
def modify_node(hostname, username, password, name, connection_limit=None, description=None, dynamic_ratio=None, logging=None, monitor=None, rate_limit=None, ratio=None, session=None, state=None, trans_label=None): ''' A function to connect to a bigip device and modify an existing node. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the node to modify connection_limit [integer] description [string] dynamic_ratio [integer] logging [enabled | disabled] monitor [[name] | none | default] rate_limit [integer] ratio [integer] session [user-enabled | user-disabled] state [user-down | user-up ] trans_label The label of the transaction stored within the grain: ``bigip_f5_trans:<label>`` CLI Example:: salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled ''' params = { 'connection-limit': connection_limit, 'description': description, 'dynamic-ratio': dynamic_ratio, 'logging': logging, 'monitor': monitor, 'rate-limit': rate_limit, 'ratio': ratio, 'session': session, 'state': state, } #build session bigip_session = _build_session(username, password, trans_label) #build payload payload = _loop_payload(params) payload['name'] = name #put to REST try: response = bigip_session.put( BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node/{name}'.format(name=name), data=salt.utils.json.dumps(payload) ) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) return _load_response(response)
python
def modify_node(hostname, username, password, name, connection_limit=None, description=None, dynamic_ratio=None, logging=None, monitor=None, rate_limit=None, ratio=None, session=None, state=None, trans_label=None): ''' A function to connect to a bigip device and modify an existing node. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the node to modify connection_limit [integer] description [string] dynamic_ratio [integer] logging [enabled | disabled] monitor [[name] | none | default] rate_limit [integer] ratio [integer] session [user-enabled | user-disabled] state [user-down | user-up ] trans_label The label of the transaction stored within the grain: ``bigip_f5_trans:<label>`` CLI Example:: salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled ''' params = { 'connection-limit': connection_limit, 'description': description, 'dynamic-ratio': dynamic_ratio, 'logging': logging, 'monitor': monitor, 'rate-limit': rate_limit, 'ratio': ratio, 'session': session, 'state': state, } #build session bigip_session = _build_session(username, password, trans_label) #build payload payload = _loop_payload(params) payload['name'] = name #put to REST try: response = bigip_session.put( BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node/{name}'.format(name=name), data=salt.utils.json.dumps(payload) ) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) return _load_response(response)
['def', 'modify_node', '(', 'hostname', ',', 'username', ',', 'password', ',', 'name', ',', 'connection_limit', '=', 'None', ',', 'description', '=', 'None', ',', 'dynamic_ratio', '=', 'None', ',', 'logging', '=', 'None', ',', 'monitor', '=', 'None', ',', 'rate_limit', '=', 'None', ',', 'ratio', '=', 'None', ',', 'session', '=', 'None', ',', 'state', '=', 'None', ',', 'trans_label', '=', 'None', ')', ':', 'params', '=', '{', "'connection-limit'", ':', 'connection_limit', ',', "'description'", ':', 'description', ',', "'dynamic-ratio'", ':', 'dynamic_ratio', ',', "'logging'", ':', 'logging', ',', "'monitor'", ':', 'monitor', ',', "'rate-limit'", ':', 'rate_limit', ',', "'ratio'", ':', 'ratio', ',', "'session'", ':', 'session', ',', "'state'", ':', 'state', ',', '}', '#build session', 'bigip_session', '=', '_build_session', '(', 'username', ',', 'password', ',', 'trans_label', ')', '#build payload', 'payload', '=', '_loop_payload', '(', 'params', ')', 'payload', '[', "'name'", ']', '=', 'name', '#put to REST', 'try', ':', 'response', '=', 'bigip_session', '.', 'put', '(', 'BIG_IP_URL_BASE', '.', 'format', '(', 'host', '=', 'hostname', ')', '+', "'/ltm/node/{name}'", '.', 'format', '(', 'name', '=', 'name', ')', ',', 'data', '=', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'payload', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'ConnectionError', 'as', 'e', ':', 'return', '_load_connection_error', '(', 'hostname', ',', 'e', ')', 'return', '_load_response', '(', 'response', ')']
A function to connect to a bigip device and modify an existing node. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the node to modify connection_limit [integer] description [string] dynamic_ratio [integer] logging [enabled | disabled] monitor [[name] | none | default] rate_limit [integer] ratio [integer] session [user-enabled | user-disabled] state [user-down | user-up ] trans_label The label of the transaction stored within the grain: ``bigip_f5_trans:<label>`` CLI Example:: salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled
['A', 'function', 'to', 'connect', 'to', 'a', 'bigip', 'device', 'and', 'modify', 'an', 'existing', 'node', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bigip.py#L472-L549
4,815
log2timeline/plaso
plaso/parsers/text_parser.py
PyParseJoinList
def PyParseJoinList(string, location, tokens): """Return a joined token from a list of tokens. This is a callback method for pyparsing setParseAction that modifies the returned token list to join all the elements in the list to a single token. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored. """ join_list = [] for token in tokens: try: join_list.append(str(token)) except UnicodeDecodeError: join_list.append(repr(token)) tokens[0] = ''.join(join_list) del tokens[1:]
python
def PyParseJoinList(string, location, tokens): """Return a joined token from a list of tokens. This is a callback method for pyparsing setParseAction that modifies the returned token list to join all the elements in the list to a single token. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored. """ join_list = [] for token in tokens: try: join_list.append(str(token)) except UnicodeDecodeError: join_list.append(repr(token)) tokens[0] = ''.join(join_list) del tokens[1:]
['def', 'PyParseJoinList', '(', 'string', ',', 'location', ',', 'tokens', ')', ':', 'join_list', '=', '[', ']', 'for', 'token', 'in', 'tokens', ':', 'try', ':', 'join_list', '.', 'append', '(', 'str', '(', 'token', ')', ')', 'except', 'UnicodeDecodeError', ':', 'join_list', '.', 'append', '(', 'repr', '(', 'token', ')', ')', 'tokens', '[', '0', ']', '=', "''", '.', 'join', '(', 'join_list', ')', 'del', 'tokens', '[', '1', ':', ']']
Return a joined token from a list of tokens. This is a callback method for pyparsing setParseAction that modifies the returned token list to join all the elements in the list to a single token. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored.
['Return', 'a', 'joined', 'token', 'from', 'a', 'list', 'of', 'tokens', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/text_parser.py#L129-L150
4,816
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool.get_tagged
def get_tagged(self, tags): """ Tag format. The format for the tags list is tuples for tags: mongos, config, shard, secondary tags of the form (tag, number), e.g. ('mongos', 2) which references the second mongos in the list. For all other tags, it is simply the string, e.g. 'primary'. """ # if tags is a simple string, make it a list (note: tuples like # ('mongos', 2) must be in a surrounding list) if not hasattr(tags, '__iter__') and type(tags) == str: tags = [tags] nodes = set(self.cluster_tags['all']) for tag in tags: if re.match(r"\w+ \d{1,2}", tag): # special case for tuple tags: mongos, config, shard, # secondary. These can contain a number tag, number = tag.split() try: branch = self.cluster_tree[tag][int(number) - 1] except (IndexError, KeyError): continue if hasattr(branch, '__iter__'): subset = set(branch) else: subset = set([branch]) else: # otherwise use tags dict to get the subset subset = set(self.cluster_tags[tag]) nodes = nodes.intersection(subset) return nodes
python
def get_tagged(self, tags): """ Tag format. The format for the tags list is tuples for tags: mongos, config, shard, secondary tags of the form (tag, number), e.g. ('mongos', 2) which references the second mongos in the list. For all other tags, it is simply the string, e.g. 'primary'. """ # if tags is a simple string, make it a list (note: tuples like # ('mongos', 2) must be in a surrounding list) if not hasattr(tags, '__iter__') and type(tags) == str: tags = [tags] nodes = set(self.cluster_tags['all']) for tag in tags: if re.match(r"\w+ \d{1,2}", tag): # special case for tuple tags: mongos, config, shard, # secondary. These can contain a number tag, number = tag.split() try: branch = self.cluster_tree[tag][int(number) - 1] except (IndexError, KeyError): continue if hasattr(branch, '__iter__'): subset = set(branch) else: subset = set([branch]) else: # otherwise use tags dict to get the subset subset = set(self.cluster_tags[tag]) nodes = nodes.intersection(subset) return nodes
['def', 'get_tagged', '(', 'self', ',', 'tags', ')', ':', '# if tags is a simple string, make it a list (note: tuples like', "# ('mongos', 2) must be in a surrounding list)", 'if', 'not', 'hasattr', '(', 'tags', ',', "'__iter__'", ')', 'and', 'type', '(', 'tags', ')', '==', 'str', ':', 'tags', '=', '[', 'tags', ']', 'nodes', '=', 'set', '(', 'self', '.', 'cluster_tags', '[', "'all'", ']', ')', 'for', 'tag', 'in', 'tags', ':', 'if', 're', '.', 'match', '(', 'r"\\w+ \\d{1,2}"', ',', 'tag', ')', ':', '# special case for tuple tags: mongos, config, shard,', '# secondary. These can contain a number', 'tag', ',', 'number', '=', 'tag', '.', 'split', '(', ')', 'try', ':', 'branch', '=', 'self', '.', 'cluster_tree', '[', 'tag', ']', '[', 'int', '(', 'number', ')', '-', '1', ']', 'except', '(', 'IndexError', ',', 'KeyError', ')', ':', 'continue', 'if', 'hasattr', '(', 'branch', ',', "'__iter__'", ')', ':', 'subset', '=', 'set', '(', 'branch', ')', 'else', ':', 'subset', '=', 'set', '(', '[', 'branch', ']', ')', 'else', ':', '# otherwise use tags dict to get the subset', 'subset', '=', 'set', '(', 'self', '.', 'cluster_tags', '[', 'tag', ']', ')', 'nodes', '=', 'nodes', '.', 'intersection', '(', 'subset', ')', 'return', 'nodes']
Tag format. The format for the tags list is tuples for tags: mongos, config, shard, secondary tags of the form (tag, number), e.g. ('mongos', 2) which references the second mongos in the list. For all other tags, it is simply the string, e.g. 'primary'.
['Tag', 'format', '.']
train
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1300-L1337
4,817
xflr6/concepts
concepts/definitions.py
Definition.add_property
def add_property(self, prop, objects=()): """Add a property to the definition and add ``objects`` as related.""" self._properties.add(prop) self._objects |= objects self._pairs.update((o, prop) for o in objects)
python
def add_property(self, prop, objects=()): """Add a property to the definition and add ``objects`` as related.""" self._properties.add(prop) self._objects |= objects self._pairs.update((o, prop) for o in objects)
['def', 'add_property', '(', 'self', ',', 'prop', ',', 'objects', '=', '(', ')', ')', ':', 'self', '.', '_properties', '.', 'add', '(', 'prop', ')', 'self', '.', '_objects', '|=', 'objects', 'self', '.', '_pairs', '.', 'update', '(', '(', 'o', ',', 'prop', ')', 'for', 'o', 'in', 'objects', ')']
Add a property to the definition and add ``objects`` as related.
['Add', 'a', 'property', 'to', 'the', 'definition', 'and', 'add', 'objects', 'as', 'related', '.']
train
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L356-L360
4,818
PMEAL/OpenPNM
openpnm/algorithms/ReactiveTransport.py
ReactiveTransport.set_source
def set_source(self, propname, pores): r""" Applies a given source term to the specified pores Parameters ---------- propname : string The property name of the source term model to be applied pores : array_like The pore indices where the source term should be applied Notes ----- Source terms cannot be applied in pores where boundary conditions have already been set. Attempting to do so will result in an error being raised. """ locs = self.tomask(pores=pores) if (not np.all(np.isnan(self['pore.bc_value'][locs]))) or \ (not np.all(np.isnan(self['pore.bc_rate'][locs]))): raise Exception('Boundary conditions already present in given ' + 'pores, cannot also assign source terms') self[propname] = locs self.settings['sources'].append(propname)
python
def set_source(self, propname, pores): r""" Applies a given source term to the specified pores Parameters ---------- propname : string The property name of the source term model to be applied pores : array_like The pore indices where the source term should be applied Notes ----- Source terms cannot be applied in pores where boundary conditions have already been set. Attempting to do so will result in an error being raised. """ locs = self.tomask(pores=pores) if (not np.all(np.isnan(self['pore.bc_value'][locs]))) or \ (not np.all(np.isnan(self['pore.bc_rate'][locs]))): raise Exception('Boundary conditions already present in given ' + 'pores, cannot also assign source terms') self[propname] = locs self.settings['sources'].append(propname)
['def', 'set_source', '(', 'self', ',', 'propname', ',', 'pores', ')', ':', 'locs', '=', 'self', '.', 'tomask', '(', 'pores', '=', 'pores', ')', 'if', '(', 'not', 'np', '.', 'all', '(', 'np', '.', 'isnan', '(', 'self', '[', "'pore.bc_value'", ']', '[', 'locs', ']', ')', ')', ')', 'or', '(', 'not', 'np', '.', 'all', '(', 'np', '.', 'isnan', '(', 'self', '[', "'pore.bc_rate'", ']', '[', 'locs', ']', ')', ')', ')', ':', 'raise', 'Exception', '(', "'Boundary conditions already present in given '", '+', "'pores, cannot also assign source terms'", ')', 'self', '[', 'propname', ']', '=', 'locs', 'self', '.', 'settings', '[', "'sources'", ']', '.', 'append', '(', 'propname', ')']
r""" Applies a given source term to the specified pores Parameters ---------- propname : string The property name of the source term model to be applied pores : array_like The pore indices where the source term should be applied Notes ----- Source terms cannot be applied in pores where boundary conditions have already been set. Attempting to do so will result in an error being raised.
['r', 'Applies', 'a', 'given', 'source', 'term', 'to', 'the', 'specified', 'pores']
train
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/ReactiveTransport.py#L122-L147
4,819
JoelBender/bacpypes
py25/bacpypes/tcp.py
StreamToPacket.confirmation
def confirmation(self, pdu): """Message going upstream.""" if _debug: StreamToPacket._debug("StreamToPacket.confirmation %r", pdu) # hack it up into chunks for packet in self.packetize(pdu, self.upstreamBuffer): self.response(packet)
python
def confirmation(self, pdu): """Message going upstream.""" if _debug: StreamToPacket._debug("StreamToPacket.confirmation %r", pdu) # hack it up into chunks for packet in self.packetize(pdu, self.upstreamBuffer): self.response(packet)
['def', 'confirmation', '(', 'self', ',', 'pdu', ')', ':', 'if', '_debug', ':', 'StreamToPacket', '.', '_debug', '(', '"StreamToPacket.confirmation %r"', ',', 'pdu', ')', '# hack it up into chunks', 'for', 'packet', 'in', 'self', '.', 'packetize', '(', 'pdu', ',', 'self', '.', 'upstreamBuffer', ')', ':', 'self', '.', 'response', '(', 'packet', ')']
Message going upstream.
['Message', 'going', 'upstream', '.']
train
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/tcp.py#L855-L861
4,820
apache/incubator-mxnet
example/ssd/detect/detector.py
Detector.detect_iter
def detect_iter(self, det_iter, show_timer=False): """ detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results """ num_images = det_iter._size if not isinstance(det_iter, mx.io.PrefetchingIter): det_iter = mx.io.PrefetchingIter(det_iter) start = timer() detections = self.mod.predict(det_iter).asnumpy() time_elapsed = timer() - start if show_timer: logging.info("Detection time for {} images: {:.4f} sec".format( num_images, time_elapsed)) result = Detector.filter_positive_detections(detections) return result
python
def detect_iter(self, det_iter, show_timer=False): """ detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results """ num_images = det_iter._size if not isinstance(det_iter, mx.io.PrefetchingIter): det_iter = mx.io.PrefetchingIter(det_iter) start = timer() detections = self.mod.predict(det_iter).asnumpy() time_elapsed = timer() - start if show_timer: logging.info("Detection time for {} images: {:.4f} sec".format( num_images, time_elapsed)) result = Detector.filter_positive_detections(detections) return result
['def', 'detect_iter', '(', 'self', ',', 'det_iter', ',', 'show_timer', '=', 'False', ')', ':', 'num_images', '=', 'det_iter', '.', '_size', 'if', 'not', 'isinstance', '(', 'det_iter', ',', 'mx', '.', 'io', '.', 'PrefetchingIter', ')', ':', 'det_iter', '=', 'mx', '.', 'io', '.', 'PrefetchingIter', '(', 'det_iter', ')', 'start', '=', 'timer', '(', ')', 'detections', '=', 'self', '.', 'mod', '.', 'predict', '(', 'det_iter', ')', '.', 'asnumpy', '(', ')', 'time_elapsed', '=', 'timer', '(', ')', '-', 'start', 'if', 'show_timer', ':', 'logging', '.', 'info', '(', '"Detection time for {} images: {:.4f} sec"', '.', 'format', '(', 'num_images', ',', 'time_elapsed', ')', ')', 'result', '=', 'Detector', '.', 'filter_positive_detections', '(', 'detections', ')', 'return', 'result']
detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results
['detect', 'all', 'images', 'in', 'iterator']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/detect/detector.py#L82-L107
4,821
baruwa-enterprise/BaruwaAPI
BaruwaAPI/resource.py
BaruwaAPIClient.create_org_smarthost
def create_org_smarthost(self, orgid, data): """Create an organization smarthost""" return self.api_call( ENDPOINTS['orgsmarthosts']['new'], dict(orgid=orgid), body=data)
python
def create_org_smarthost(self, orgid, data): """Create an organization smarthost""" return self.api_call( ENDPOINTS['orgsmarthosts']['new'], dict(orgid=orgid), body=data)
['def', 'create_org_smarthost', '(', 'self', ',', 'orgid', ',', 'data', ')', ':', 'return', 'self', '.', 'api_call', '(', 'ENDPOINTS', '[', "'orgsmarthosts'", ']', '[', "'new'", ']', ',', 'dict', '(', 'orgid', '=', 'orgid', ')', ',', 'body', '=', 'data', ')']
Create an organization smarthost
['Create', 'an', 'organization', 'smarthost']
train
https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L477-L482
4,822
anayjoshi/platypus
platypus/cfg/cfg.py
BasicBlock.add_instruction
def add_instruction (self, instr): """ Adds the argument instruction in the list of instructions of this basic block. Also updates the variable lists (used_variables, defined_variables) """ assert(isinstance(instr, Instruction)) self.instruction_list.append(instr) if instr.lhs not in self.defined_variables: if isinstance(instr.lhs, Variable): self.defined_variables.append(instr.lhs) if isinstance(instr, EqInstruction): if isinstance(instr.rhs, Variable): if instr.rhs not in self.used_variables: self.used_variables.append(instr.rhs) else: if isinstance(instr.rhs_1, Variable): if instr.rhs_1 not in self.used_variables: self.used_variables.append(instr.rhs_1) if isinstance(instr.rhs_2, Variable): if instr.rhs_2 not in self.used_variables: self.used_variables.append(instr.rhs_2)
python
def add_instruction (self, instr): """ Adds the argument instruction in the list of instructions of this basic block. Also updates the variable lists (used_variables, defined_variables) """ assert(isinstance(instr, Instruction)) self.instruction_list.append(instr) if instr.lhs not in self.defined_variables: if isinstance(instr.lhs, Variable): self.defined_variables.append(instr.lhs) if isinstance(instr, EqInstruction): if isinstance(instr.rhs, Variable): if instr.rhs not in self.used_variables: self.used_variables.append(instr.rhs) else: if isinstance(instr.rhs_1, Variable): if instr.rhs_1 not in self.used_variables: self.used_variables.append(instr.rhs_1) if isinstance(instr.rhs_2, Variable): if instr.rhs_2 not in self.used_variables: self.used_variables.append(instr.rhs_2)
['def', 'add_instruction', '(', 'self', ',', 'instr', ')', ':', 'assert', '(', 'isinstance', '(', 'instr', ',', 'Instruction', ')', ')', 'self', '.', 'instruction_list', '.', 'append', '(', 'instr', ')', 'if', 'instr', '.', 'lhs', 'not', 'in', 'self', '.', 'defined_variables', ':', 'if', 'isinstance', '(', 'instr', '.', 'lhs', ',', 'Variable', ')', ':', 'self', '.', 'defined_variables', '.', 'append', '(', 'instr', '.', 'lhs', ')', 'if', 'isinstance', '(', 'instr', ',', 'EqInstruction', ')', ':', 'if', 'isinstance', '(', 'instr', '.', 'rhs', ',', 'Variable', ')', ':', 'if', 'instr', '.', 'rhs', 'not', 'in', 'self', '.', 'used_variables', ':', 'self', '.', 'used_variables', '.', 'append', '(', 'instr', '.', 'rhs', ')', 'else', ':', 'if', 'isinstance', '(', 'instr', '.', 'rhs_1', ',', 'Variable', ')', ':', 'if', 'instr', '.', 'rhs_1', 'not', 'in', 'self', '.', 'used_variables', ':', 'self', '.', 'used_variables', '.', 'append', '(', 'instr', '.', 'rhs_1', ')', 'if', 'isinstance', '(', 'instr', '.', 'rhs_2', ',', 'Variable', ')', ':', 'if', 'instr', '.', 'rhs_2', 'not', 'in', 'self', '.', 'used_variables', ':', 'self', '.', 'used_variables', '.', 'append', '(', 'instr', '.', 'rhs_2', ')']
Adds the argument instruction in the list of instructions of this basic block. Also updates the variable lists (used_variables, defined_variables)
['Adds', 'the', 'argument', 'instruction', 'in', 'the', 'list', 'of', 'instructions', 'of', 'this', 'basic', 'block', '.']
train
https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/cfg.py#L169-L190
4,823
Atomistica/atomistica
src/python/atomistica/join_calculators.py
JoinCalculators.set_atoms
def set_atoms(self, a): """Assign an atoms object.""" for c in self.calcs: if hasattr(c, "set_atoms"): c.set_atoms(a)
python
def set_atoms(self, a): """Assign an atoms object.""" for c in self.calcs: if hasattr(c, "set_atoms"): c.set_atoms(a)
['def', 'set_atoms', '(', 'self', ',', 'a', ')', ':', 'for', 'c', 'in', 'self', '.', 'calcs', ':', 'if', 'hasattr', '(', 'c', ',', '"set_atoms"', ')', ':', 'c', '.', 'set_atoms', '(', 'a', ')']
Assign an atoms object.
['Assign', 'an', 'atoms', 'object', '.']
train
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/join_calculators.py#L79-L83
4,824
sernst/cauldron
cauldron/docgen/parsing.py
parse_function
def parse_function( name: str, target: typing.Callable ) -> typing.Union[None, dict]: """ Parses the documentation for a function, which is specified by the name of the function and the function itself. :param name: Name of the function to parse :param target: The function to parse into documentation :return: A dictionary containing documentation for the specified function, or None if the target was not a function. """ if not hasattr(target, '__code__'): return None lines = get_doc_entries(target) docs = ' '.join(filter(lambda line: not line.startswith(':'), lines)) params = parse_params(target, lines) returns = parse_returns(target, lines) return dict( name=getattr(target, '__name__'), doc=docs, params=params, returns=returns )
python
def parse_function( name: str, target: typing.Callable ) -> typing.Union[None, dict]: """ Parses the documentation for a function, which is specified by the name of the function and the function itself. :param name: Name of the function to parse :param target: The function to parse into documentation :return: A dictionary containing documentation for the specified function, or None if the target was not a function. """ if not hasattr(target, '__code__'): return None lines = get_doc_entries(target) docs = ' '.join(filter(lambda line: not line.startswith(':'), lines)) params = parse_params(target, lines) returns = parse_returns(target, lines) return dict( name=getattr(target, '__name__'), doc=docs, params=params, returns=returns )
['def', 'parse_function', '(', 'name', ':', 'str', ',', 'target', ':', 'typing', '.', 'Callable', ')', '->', 'typing', '.', 'Union', '[', 'None', ',', 'dict', ']', ':', 'if', 'not', 'hasattr', '(', 'target', ',', "'__code__'", ')', ':', 'return', 'None', 'lines', '=', 'get_doc_entries', '(', 'target', ')', 'docs', '=', "' '", '.', 'join', '(', 'filter', '(', 'lambda', 'line', ':', 'not', 'line', '.', 'startswith', '(', "':'", ')', ',', 'lines', ')', ')', 'params', '=', 'parse_params', '(', 'target', ',', 'lines', ')', 'returns', '=', 'parse_returns', '(', 'target', ',', 'lines', ')', 'return', 'dict', '(', 'name', '=', 'getattr', '(', 'target', ',', "'__name__'", ')', ',', 'doc', '=', 'docs', ',', 'params', '=', 'params', ',', 'returns', '=', 'returns', ')']
Parses the documentation for a function, which is specified by the name of the function and the function itself. :param name: Name of the function to parse :param target: The function to parse into documentation :return: A dictionary containing documentation for the specified function, or None if the target was not a function.
['Parses', 'the', 'documentation', 'for', 'a', 'function', 'which', 'is', 'specified', 'by', 'the', 'name', 'of', 'the', 'function', 'and', 'the', 'function', 'itself', '.']
train
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/docgen/parsing.py#L68-L98
4,825
Fantomas42/django-blog-zinnia
zinnia/sitemaps.py
TagSitemap.cache_infos
def cache_infos(self, queryset): """ Cache the number of entries published and the last modification date under each tag. """ self.cache = {} for item in queryset: # If the sitemap is too slow, don't hesitate to do this : # self.cache[item.pk] = (item.count, None) self.cache[item.pk] = ( item.count, TaggedItem.objects.get_by_model( self.entries_qs, item)[0].last_update)
python
def cache_infos(self, queryset): """ Cache the number of entries published and the last modification date under each tag. """ self.cache = {} for item in queryset: # If the sitemap is too slow, don't hesitate to do this : # self.cache[item.pk] = (item.count, None) self.cache[item.pk] = ( item.count, TaggedItem.objects.get_by_model( self.entries_qs, item)[0].last_update)
['def', 'cache_infos', '(', 'self', ',', 'queryset', ')', ':', 'self', '.', 'cache', '=', '{', '}', 'for', 'item', 'in', 'queryset', ':', "# If the sitemap is too slow, don't hesitate to do this :", '# self.cache[item.pk] = (item.count, None)', 'self', '.', 'cache', '[', 'item', '.', 'pk', ']', '=', '(', 'item', '.', 'count', ',', 'TaggedItem', '.', 'objects', '.', 'get_by_model', '(', 'self', '.', 'entries_qs', ',', 'item', ')', '[', '0', ']', '.', 'last_update', ')']
Cache the number of entries published and the last modification date under each tag.
['Cache', 'the', 'number', 'of', 'entries', 'published', 'and', 'the', 'last', 'modification', 'date', 'under', 'each', 'tag', '.']
train
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/sitemaps.py#L131-L142
4,826
f3at/feat
src/feat/common/fiber.py
break_fiber
def break_fiber(depth=0): """After calling break_fiber, get_state() will return None.""" set_stack_var(SECTION_BOUNDARY_TAG, True, depth=depth+1) set_stack_var(SECTION_STATE_TAG, None, depth=depth+1)
python
def break_fiber(depth=0): """After calling break_fiber, get_state() will return None.""" set_stack_var(SECTION_BOUNDARY_TAG, True, depth=depth+1) set_stack_var(SECTION_STATE_TAG, None, depth=depth+1)
['def', 'break_fiber', '(', 'depth', '=', '0', ')', ':', 'set_stack_var', '(', 'SECTION_BOUNDARY_TAG', ',', 'True', ',', 'depth', '=', 'depth', '+', '1', ')', 'set_stack_var', '(', 'SECTION_STATE_TAG', ',', 'None', ',', 'depth', '=', 'depth', '+', '1', ')']
After calling break_fiber, get_state() will return None.
['After', 'calling', 'break_fiber', 'get_state', '()', 'will', 'return', 'None', '.']
train
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/fiber.py#L265-L268
4,827
UDST/urbansim
urbansim/models/regression.py
_FakeRegressionResults.predict
def predict(self, data): """ Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values. """ with log_start_finish('_FakeRegressionResults prediction', logger): model_design = dmatrix( self._rhs, data=data, return_type='dataframe') return model_design.dot(self.params).values
python
def predict(self, data): """ Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values. """ with log_start_finish('_FakeRegressionResults prediction', logger): model_design = dmatrix( self._rhs, data=data, return_type='dataframe') return model_design.dot(self.params).values
['def', 'predict', '(', 'self', ',', 'data', ')', ':', 'with', 'log_start_finish', '(', "'_FakeRegressionResults prediction'", ',', 'logger', ')', ':', 'model_design', '=', 'dmatrix', '(', 'self', '.', '_rhs', ',', 'data', '=', 'data', ',', 'return_type', '=', "'dataframe'", ')', 'return', 'model_design', '.', 'dot', '(', 'self', '.', 'params', ')', '.', 'values']
Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values.
['Predict', 'new', 'values', 'by', 'running', 'data', 'through', 'the', 'fit', 'model', '.']
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L157-L175
4,828
spacetelescope/drizzlepac
drizzlepac/hlautils/astrometric_utils.py
generate_sky_catalog
def generate_sky_catalog(image, refwcs, **kwargs): """Build source catalog from input image using photutils. This script borrows heavily from build_source_catalog. The catalog returned by this function includes sources found in all chips of the input image with the positions translated to the coordinate frame defined by the reference WCS `refwcs`. The sources will be - identified using photutils segmentation-based source finding code - ignore any input pixel which has been flagged as 'bad' in the DQ array, should a DQ array be found in the input HDUList. - classified as probable cosmic-rays (if enabled) using central_moments properties of each source, with these sources being removed from the catalog. Parameters ---------- image : ~astropy.io.fits.HDUList` Input image. refwcs : `~stwcs.wcsutils.HSTWCS` Definition of the reference frame WCS. dqname : str EXTNAME for the DQ array, if present, in the input image. output : bool Specify whether or not to write out a separate catalog file for all the sources found in each chip. Default: None (False) threshold : float, optional This parameter controls the S/N threshold used for identifying sources in the image relative to the background RMS in much the same way that the 'threshold' parameter in 'tweakreg' works. fwhm : float, optional FWHM (in pixels) of the expected sources from the image, comparable to the 'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to this value will be identified as sources in the catalog. Returns -------- master_cat : `~astropy.table.Table` Source catalog for all 'valid' sources identified from all chips of the input image with positions translated to the reference WCS coordinate frame. """ # Extract source catalogs for each chip source_cats = generate_source_catalog(image, **kwargs) # Build source catalog for entire image master_cat = None numSci = countExtn(image, extname='SCI') # if no refwcs specified, build one now... if refwcs is None: refwcs = build_reference_wcs([image]) for chip in range(numSci): chip += 1 # work with sources identified from this specific chip seg_tab_phot = source_cats[chip] if seg_tab_phot is None: continue # Convert pixel coordinates from this chip to sky coordinates chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip)) seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'], seg_tab_phot['ycentroid'], 1) # Convert sky positions to pixel positions in the reference WCS frame seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1) seg_tab_phot['xcentroid'] = seg_xy_out[0] seg_tab_phot['ycentroid'] = seg_xy_out[1] if master_cat is None: master_cat = seg_tab_phot else: master_cat = vstack([master_cat, seg_tab_phot]) return master_cat
python
def generate_sky_catalog(image, refwcs, **kwargs): """Build source catalog from input image using photutils. This script borrows heavily from build_source_catalog. The catalog returned by this function includes sources found in all chips of the input image with the positions translated to the coordinate frame defined by the reference WCS `refwcs`. The sources will be - identified using photutils segmentation-based source finding code - ignore any input pixel which has been flagged as 'bad' in the DQ array, should a DQ array be found in the input HDUList. - classified as probable cosmic-rays (if enabled) using central_moments properties of each source, with these sources being removed from the catalog. Parameters ---------- image : ~astropy.io.fits.HDUList` Input image. refwcs : `~stwcs.wcsutils.HSTWCS` Definition of the reference frame WCS. dqname : str EXTNAME for the DQ array, if present, in the input image. output : bool Specify whether or not to write out a separate catalog file for all the sources found in each chip. Default: None (False) threshold : float, optional This parameter controls the S/N threshold used for identifying sources in the image relative to the background RMS in much the same way that the 'threshold' parameter in 'tweakreg' works. fwhm : float, optional FWHM (in pixels) of the expected sources from the image, comparable to the 'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to this value will be identified as sources in the catalog. Returns -------- master_cat : `~astropy.table.Table` Source catalog for all 'valid' sources identified from all chips of the input image with positions translated to the reference WCS coordinate frame. """ # Extract source catalogs for each chip source_cats = generate_source_catalog(image, **kwargs) # Build source catalog for entire image master_cat = None numSci = countExtn(image, extname='SCI') # if no refwcs specified, build one now... if refwcs is None: refwcs = build_reference_wcs([image]) for chip in range(numSci): chip += 1 # work with sources identified from this specific chip seg_tab_phot = source_cats[chip] if seg_tab_phot is None: continue # Convert pixel coordinates from this chip to sky coordinates chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip)) seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'], seg_tab_phot['ycentroid'], 1) # Convert sky positions to pixel positions in the reference WCS frame seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1) seg_tab_phot['xcentroid'] = seg_xy_out[0] seg_tab_phot['ycentroid'] = seg_xy_out[1] if master_cat is None: master_cat = seg_tab_phot else: master_cat = vstack([master_cat, seg_tab_phot]) return master_cat
['def', 'generate_sky_catalog', '(', 'image', ',', 'refwcs', ',', '*', '*', 'kwargs', ')', ':', '# Extract source catalogs for each chip', 'source_cats', '=', 'generate_source_catalog', '(', 'image', ',', '*', '*', 'kwargs', ')', '# Build source catalog for entire image', 'master_cat', '=', 'None', 'numSci', '=', 'countExtn', '(', 'image', ',', 'extname', '=', "'SCI'", ')', '# if no refwcs specified, build one now...', 'if', 'refwcs', 'is', 'None', ':', 'refwcs', '=', 'build_reference_wcs', '(', '[', 'image', ']', ')', 'for', 'chip', 'in', 'range', '(', 'numSci', ')', ':', 'chip', '+=', '1', '# work with sources identified from this specific chip', 'seg_tab_phot', '=', 'source_cats', '[', 'chip', ']', 'if', 'seg_tab_phot', 'is', 'None', ':', 'continue', '# Convert pixel coordinates from this chip to sky coordinates', 'chip_wcs', '=', 'wcsutil', '.', 'HSTWCS', '(', 'image', ',', 'ext', '=', '(', "'sci'", ',', 'chip', ')', ')', 'seg_ra', ',', 'seg_dec', '=', 'chip_wcs', '.', 'all_pix2world', '(', 'seg_tab_phot', '[', "'xcentroid'", ']', ',', 'seg_tab_phot', '[', "'ycentroid'", ']', ',', '1', ')', '# Convert sky positions to pixel positions in the reference WCS frame', 'seg_xy_out', '=', 'refwcs', '.', 'all_world2pix', '(', 'seg_ra', ',', 'seg_dec', ',', '1', ')', 'seg_tab_phot', '[', "'xcentroid'", ']', '=', 'seg_xy_out', '[', '0', ']', 'seg_tab_phot', '[', "'ycentroid'", ']', '=', 'seg_xy_out', '[', '1', ']', 'if', 'master_cat', 'is', 'None', ':', 'master_cat', '=', 'seg_tab_phot', 'else', ':', 'master_cat', '=', 'vstack', '(', '[', 'master_cat', ',', 'seg_tab_phot', ']', ')', 'return', 'master_cat']
Build source catalog from input image using photutils. This script borrows heavily from build_source_catalog. The catalog returned by this function includes sources found in all chips of the input image with the positions translated to the coordinate frame defined by the reference WCS `refwcs`. The sources will be - identified using photutils segmentation-based source finding code - ignore any input pixel which has been flagged as 'bad' in the DQ array, should a DQ array be found in the input HDUList. - classified as probable cosmic-rays (if enabled) using central_moments properties of each source, with these sources being removed from the catalog. Parameters ---------- image : ~astropy.io.fits.HDUList` Input image. refwcs : `~stwcs.wcsutils.HSTWCS` Definition of the reference frame WCS. dqname : str EXTNAME for the DQ array, if present, in the input image. output : bool Specify whether or not to write out a separate catalog file for all the sources found in each chip. Default: None (False) threshold : float, optional This parameter controls the S/N threshold used for identifying sources in the image relative to the background RMS in much the same way that the 'threshold' parameter in 'tweakreg' works. fwhm : float, optional FWHM (in pixels) of the expected sources from the image, comparable to the 'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to this value will be identified as sources in the catalog. Returns -------- master_cat : `~astropy.table.Table` Source catalog for all 'valid' sources identified from all chips of the input image with positions translated to the reference WCS coordinate frame.
['Build', 'source', 'catalog', 'from', 'input', 'image', 'using', 'photutils', '.']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/hlautils/astrometric_utils.py#L648-L723
4,829
mapmyfitness/jtime
jtime/jtime.py
main
def main(): """ Set up the context and connectors """ try: init() except custom_exceptions.NotConfigured: configure() init() # Adding this in case users are trying to run without adding a jira url. # I would like to take this out in a release or two. # TODO: REMOVE except (AttributeError, ConfigParser.NoOptionError): logging.error('It appears that your configuration is invalid, please reconfigure the app and try again.') configure() init() parser = argparse.ArgumentParser() # Now simply auto-discovering the methods listed in this module current_module = sys.modules[__name__] module_methods = [getattr(current_module, a, None) for a in dir(current_module) if isinstance(getattr(current_module, a, None), types.FunctionType) and a != 'main'] argh.add_commands(parser, module_methods) # Putting the error logging after the app is initialized because # we want to adhere to the user's preferences try: argh.dispatch(parser) # We don't want to report keyboard interrupts to rollbar except (KeyboardInterrupt, SystemExit): raise except Exception as e: if isinstance(e, jira.exceptions.JIRAError) and "HTTP 400" in e: logging.warning('It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'.format(configuration.load_config['jira'].get('url'))) elif configured.get('jira').get('error_reporting', True): # Configure rollbar so that we report errors import rollbar from . import __version__ as version root_path = os.path.dirname(os.path.realpath(__file__)) rollbar.init('7541b8e188044831b6728fa8475eab9f', 'v%s' % version, root=root_path) logging.error('Sorry. It appears that there was an error when handling your command. ' 'This error has been reported to our error tracking system. To disable ' 'this reporting, please re-configure the app: `jtime config`.') extra_data = { # grab the command that we're running 'cmd': sys.argv[1], # we really don't want to see jtime in the args 'args': sys.argv[2:], # lets grab anything useful, python version? 'python': str(sys.version), } # We really shouldn't thit this line of code when running tests, so let's not cover it. rollbar.report_exc_info(extra_data=extra_data) # pragma: no cover else: logging.error('It appears that there was an error when handling your command.') raise
python
def main(): """ Set up the context and connectors """ try: init() except custom_exceptions.NotConfigured: configure() init() # Adding this in case users are trying to run without adding a jira url. # I would like to take this out in a release or two. # TODO: REMOVE except (AttributeError, ConfigParser.NoOptionError): logging.error('It appears that your configuration is invalid, please reconfigure the app and try again.') configure() init() parser = argparse.ArgumentParser() # Now simply auto-discovering the methods listed in this module current_module = sys.modules[__name__] module_methods = [getattr(current_module, a, None) for a in dir(current_module) if isinstance(getattr(current_module, a, None), types.FunctionType) and a != 'main'] argh.add_commands(parser, module_methods) # Putting the error logging after the app is initialized because # we want to adhere to the user's preferences try: argh.dispatch(parser) # We don't want to report keyboard interrupts to rollbar except (KeyboardInterrupt, SystemExit): raise except Exception as e: if isinstance(e, jira.exceptions.JIRAError) and "HTTP 400" in e: logging.warning('It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'.format(configuration.load_config['jira'].get('url'))) elif configured.get('jira').get('error_reporting', True): # Configure rollbar so that we report errors import rollbar from . import __version__ as version root_path = os.path.dirname(os.path.realpath(__file__)) rollbar.init('7541b8e188044831b6728fa8475eab9f', 'v%s' % version, root=root_path) logging.error('Sorry. It appears that there was an error when handling your command. ' 'This error has been reported to our error tracking system. To disable ' 'this reporting, please re-configure the app: `jtime config`.') extra_data = { # grab the command that we're running 'cmd': sys.argv[1], # we really don't want to see jtime in the args 'args': sys.argv[2:], # lets grab anything useful, python version? 'python': str(sys.version), } # We really shouldn't thit this line of code when running tests, so let's not cover it. rollbar.report_exc_info(extra_data=extra_data) # pragma: no cover else: logging.error('It appears that there was an error when handling your command.') raise
['def', 'main', '(', ')', ':', 'try', ':', 'init', '(', ')', 'except', 'custom_exceptions', '.', 'NotConfigured', ':', 'configure', '(', ')', 'init', '(', ')', '# Adding this in case users are trying to run without adding a jira url.', '# I would like to take this out in a release or two.', '# TODO: REMOVE', 'except', '(', 'AttributeError', ',', 'ConfigParser', '.', 'NoOptionError', ')', ':', 'logging', '.', 'error', '(', "'It appears that your configuration is invalid, please reconfigure the app and try again.'", ')', 'configure', '(', ')', 'init', '(', ')', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', ')', '# Now simply auto-discovering the methods listed in this module', 'current_module', '=', 'sys', '.', 'modules', '[', '__name__', ']', 'module_methods', '=', '[', 'getattr', '(', 'current_module', ',', 'a', ',', 'None', ')', 'for', 'a', 'in', 'dir', '(', 'current_module', ')', 'if', 'isinstance', '(', 'getattr', '(', 'current_module', ',', 'a', ',', 'None', ')', ',', 'types', '.', 'FunctionType', ')', 'and', 'a', '!=', "'main'", ']', 'argh', '.', 'add_commands', '(', 'parser', ',', 'module_methods', ')', '# Putting the error logging after the app is initialized because', "# we want to adhere to the user's preferences", 'try', ':', 'argh', '.', 'dispatch', '(', 'parser', ')', "# We don't want to report keyboard interrupts to rollbar", 'except', '(', 'KeyboardInterrupt', ',', 'SystemExit', ')', ':', 'raise', 'except', 'Exception', 'as', 'e', ':', 'if', 'isinstance', '(', 'e', ',', 'jira', '.', 'exceptions', '.', 'JIRAError', ')', 'and', '"HTTP 400"', 'in', 'e', ':', 'logging', '.', 'warning', '(', "'It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'", '.', 'format', '(', 'configuration', '.', 'load_config', '[', "'jira'", ']', '.', 'get', '(', "'url'", ')', ')', ')', 'elif', 'configured', '.', 'get', '(', "'jira'", ')', '.', 'get', '(', "'error_reporting'", ',', 'True', ')', ':', '# Configure rollbar so that we report errors', 'import', 'rollbar', 'from', '.', 'import', '__version__', 'as', 'version', 'root_path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'realpath', '(', '__file__', ')', ')', 'rollbar', '.', 'init', '(', "'7541b8e188044831b6728fa8475eab9f'", ',', "'v%s'", '%', 'version', ',', 'root', '=', 'root_path', ')', 'logging', '.', 'error', '(', "'Sorry. It appears that there was an error when handling your command. '", "'This error has been reported to our error tracking system. To disable '", "'this reporting, please re-configure the app: `jtime config`.'", ')', 'extra_data', '=', '{', "# grab the command that we're running", "'cmd'", ':', 'sys', '.', 'argv', '[', '1', ']', ',', "# we really don't want to see jtime in the args", "'args'", ':', 'sys', '.', 'argv', '[', '2', ':', ']', ',', '# lets grab anything useful, python version?', "'python'", ':', 'str', '(', 'sys', '.', 'version', ')', ',', '}', "# We really shouldn't thit this line of code when running tests, so let's not cover it.", 'rollbar', '.', 'report_exc_info', '(', 'extra_data', '=', 'extra_data', ')', '# pragma: no cover', 'else', ':', 'logging', '.', 'error', '(', "'It appears that there was an error when handling your command.'", ')', 'raise']
Set up the context and connectors
['Set', 'up', 'the', 'context', 'and', 'connectors']
train
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jtime.py#L264-L321
4,830
saltstack/salt
salt/modules/win_file.py
set_mode
def set_mode(path, mode): ''' Set the mode of a file This just calls get_mode, which returns None because we don't use mode on Windows Args: path: The path to the file or directory mode: The mode (not used) Returns: None CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' func_name = '{0}.set_mode'.format(__virtualname__) if __opts__.get('fun', '') == func_name: log.info('The function %s should not be used on Windows systems; ' 'see function docs for details. The value returned is ' 'always None. Use set_perms instead.', func_name) return get_mode(path)
python
def set_mode(path, mode): ''' Set the mode of a file This just calls get_mode, which returns None because we don't use mode on Windows Args: path: The path to the file or directory mode: The mode (not used) Returns: None CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' func_name = '{0}.set_mode'.format(__virtualname__) if __opts__.get('fun', '') == func_name: log.info('The function %s should not be used on Windows systems; ' 'see function docs for details. The value returned is ' 'always None. Use set_perms instead.', func_name) return get_mode(path)
['def', 'set_mode', '(', 'path', ',', 'mode', ')', ':', 'func_name', '=', "'{0}.set_mode'", '.', 'format', '(', '__virtualname__', ')', 'if', '__opts__', '.', 'get', '(', "'fun'", ',', "''", ')', '==', 'func_name', ':', 'log', '.', 'info', '(', "'The function %s should not be used on Windows systems; '", "'see function docs for details. The value returned is '", "'always None. Use set_perms instead.'", ',', 'func_name', ')', 'return', 'get_mode', '(', 'path', ')']
Set the mode of a file This just calls get_mode, which returns None because we don't use mode on Windows Args: path: The path to the file or directory mode: The mode (not used) Returns: None CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644
['Set', 'the', 'mode', 'of', 'a', 'file']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L1019-L1045
4,831
dswah/pyGAM
pygam/pygam.py
GAM.fit
def fit(self, X, y, weights=None): """Fit the generalized additive model. Parameters ---------- X : array-like, shape (n_samples, m_features) Training vectors. y : array-like, shape (n_samples,) Target values, ie integers in classification, real numbers in regression) weights : array-like shape (n_samples,) or None, optional Sample weights. if None, defaults to array of ones Returns ------- self : object Returns fitted GAM object """ # validate parameters self._validate_params() # validate data y = check_y(y, self.link, self.distribution, verbose=self.verbose) X = check_X(X, verbose=self.verbose) check_X_y(X, y) if weights is not None: weights = np.array(weights).astype('f').ravel() weights = check_array(weights, name='sample weights', ndim=1, verbose=self.verbose) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') # validate data-dependent parameters self._validate_data_dep_params(X) # set up logging if not hasattr(self, 'logs_'): self.logs_ = defaultdict(list) # begin capturing statistics self.statistics_ = {} self.statistics_['n_samples'] = len(y) self.statistics_['m_features'] = X.shape[1] # optimize self._pirls(X, y, weights) # if self._opt == 0: # self._pirls(X, y, weights) # if self._opt == 1: # self._pirls_naive(X, y) return self
python
def fit(self, X, y, weights=None): """Fit the generalized additive model. Parameters ---------- X : array-like, shape (n_samples, m_features) Training vectors. y : array-like, shape (n_samples,) Target values, ie integers in classification, real numbers in regression) weights : array-like shape (n_samples,) or None, optional Sample weights. if None, defaults to array of ones Returns ------- self : object Returns fitted GAM object """ # validate parameters self._validate_params() # validate data y = check_y(y, self.link, self.distribution, verbose=self.verbose) X = check_X(X, verbose=self.verbose) check_X_y(X, y) if weights is not None: weights = np.array(weights).astype('f').ravel() weights = check_array(weights, name='sample weights', ndim=1, verbose=self.verbose) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') # validate data-dependent parameters self._validate_data_dep_params(X) # set up logging if not hasattr(self, 'logs_'): self.logs_ = defaultdict(list) # begin capturing statistics self.statistics_ = {} self.statistics_['n_samples'] = len(y) self.statistics_['m_features'] = X.shape[1] # optimize self._pirls(X, y, weights) # if self._opt == 0: # self._pirls(X, y, weights) # if self._opt == 1: # self._pirls_naive(X, y) return self
['def', 'fit', '(', 'self', ',', 'X', ',', 'y', ',', 'weights', '=', 'None', ')', ':', '# validate parameters', 'self', '.', '_validate_params', '(', ')', '# validate data', 'y', '=', 'check_y', '(', 'y', ',', 'self', '.', 'link', ',', 'self', '.', 'distribution', ',', 'verbose', '=', 'self', '.', 'verbose', ')', 'X', '=', 'check_X', '(', 'X', ',', 'verbose', '=', 'self', '.', 'verbose', ')', 'check_X_y', '(', 'X', ',', 'y', ')', 'if', 'weights', 'is', 'not', 'None', ':', 'weights', '=', 'np', '.', 'array', '(', 'weights', ')', '.', 'astype', '(', "'f'", ')', '.', 'ravel', '(', ')', 'weights', '=', 'check_array', '(', 'weights', ',', 'name', '=', "'sample weights'", ',', 'ndim', '=', '1', ',', 'verbose', '=', 'self', '.', 'verbose', ')', 'check_lengths', '(', 'y', ',', 'weights', ')', 'else', ':', 'weights', '=', 'np', '.', 'ones_like', '(', 'y', ')', '.', 'astype', '(', "'float64'", ')', '# validate data-dependent parameters', 'self', '.', '_validate_data_dep_params', '(', 'X', ')', '# set up logging', 'if', 'not', 'hasattr', '(', 'self', ',', "'logs_'", ')', ':', 'self', '.', 'logs_', '=', 'defaultdict', '(', 'list', ')', '# begin capturing statistics', 'self', '.', 'statistics_', '=', '{', '}', 'self', '.', 'statistics_', '[', "'n_samples'", ']', '=', 'len', '(', 'y', ')', 'self', '.', 'statistics_', '[', "'m_features'", ']', '=', 'X', '.', 'shape', '[', '1', ']', '# optimize', 'self', '.', '_pirls', '(', 'X', ',', 'y', ',', 'weights', ')', '# if self._opt == 0:', '# self._pirls(X, y, weights)', '# if self._opt == 1:', '# self._pirls_naive(X, y)', 'return', 'self']
Fit the generalized additive model. Parameters ---------- X : array-like, shape (n_samples, m_features) Training vectors. y : array-like, shape (n_samples,) Target values, ie integers in classification, real numbers in regression) weights : array-like shape (n_samples,) or None, optional Sample weights. if None, defaults to array of ones Returns ------- self : object Returns fitted GAM object
['Fit', 'the', 'generalized', 'additive', 'model', '.']
train
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L870-L925
4,832
pandas-dev/pandas
pandas/io/html.py
_HtmlFrameParser.parse_tables
def parse_tables(self): """ Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables. """ tables = self._parse_tables(self._build_doc(), self.match, self.attrs) return (self._parse_thead_tbody_tfoot(table) for table in tables)
python
def parse_tables(self): """ Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables. """ tables = self._parse_tables(self._build_doc(), self.match, self.attrs) return (self._parse_thead_tbody_tfoot(table) for table in tables)
['def', 'parse_tables', '(', 'self', ')', ':', 'tables', '=', 'self', '.', '_parse_tables', '(', 'self', '.', '_build_doc', '(', ')', ',', 'self', '.', 'match', ',', 'self', '.', 'attrs', ')', 'return', '(', 'self', '.', '_parse_thead_tbody_tfoot', '(', 'table', ')', 'for', 'table', 'in', 'tables', ')']
Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables.
['Parse', 'and', 'return', 'all', 'tables', 'from', 'the', 'DOM', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L208-L217
4,833
senseobservationsystems/commonsense-python-lib
senseapi.py
SenseAPI.ServicesSetMetod
def ServicesSetMetod (self, sensor_id, service_id, method, parameters): """ Set expression for the math service. @param sensor_id (int) - Sensor id of the sensor the service is connected to. @param service_id (int) - Service id of the service for which to set the expression. @param method (string) - The set method name. @param parameters (dictonary) - Parameters to set the expression of the math service. @return (bool) - Boolean indicating whether ServicesSetMethod was successful. """ if self.__SenseApiCall__('/sensors/{0}/services/{1}/{2}.json'.format(sensor_id, service_id, method), 'POST', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
python
def ServicesSetMetod (self, sensor_id, service_id, method, parameters): """ Set expression for the math service. @param sensor_id (int) - Sensor id of the sensor the service is connected to. @param service_id (int) - Service id of the service for which to set the expression. @param method (string) - The set method name. @param parameters (dictonary) - Parameters to set the expression of the math service. @return (bool) - Boolean indicating whether ServicesSetMethod was successful. """ if self.__SenseApiCall__('/sensors/{0}/services/{1}/{2}.json'.format(sensor_id, service_id, method), 'POST', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
['def', 'ServicesSetMetod', '(', 'self', ',', 'sensor_id', ',', 'service_id', ',', 'method', ',', 'parameters', ')', ':', 'if', 'self', '.', '__SenseApiCall__', '(', "'/sensors/{0}/services/{1}/{2}.json'", '.', 'format', '(', 'sensor_id', ',', 'service_id', ',', 'method', ')', ',', "'POST'", ',', 'parameters', '=', 'parameters', ')', ':', 'return', 'True', 'else', ':', 'self', '.', '__error__', '=', '"api call unsuccessful"', 'return', 'False']
Set expression for the math service. @param sensor_id (int) - Sensor id of the sensor the service is connected to. @param service_id (int) - Service id of the service for which to set the expression. @param method (string) - The set method name. @param parameters (dictonary) - Parameters to set the expression of the math service. @return (bool) - Boolean indicating whether ServicesSetMethod was successful.
['Set', 'expression', 'for', 'the', 'math', 'service', '.']
train
https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L953-L968
4,834
rwl/pylon
pyreto/rlopf.py
CaseEnvironment.reset
def reset(self): """ Re-initialises the environment. """ logger.info("Reseting environment.") self._step = 0 # Reset the set-point of each generator to its original value. gs = [g for g in self.case.online_generators if g.bus.type !=REFERENCE] for i, g in enumerate(gs): g.p = self._Pg0[i] # Apply load profile to the original demand at each bus. for i, b in enumerate([b for b in self.case.buses if b.type == PQ]): b.p_demand = self._Pd0[i] * self.profile[self._step] # Initialise the record of generator set-points. self._Pg = zeros((len(self.case.online_generators), len(self.profile))) # Apply the first load profile value. # self.step() self.case.reset()
python
def reset(self): """ Re-initialises the environment. """ logger.info("Reseting environment.") self._step = 0 # Reset the set-point of each generator to its original value. gs = [g for g in self.case.online_generators if g.bus.type !=REFERENCE] for i, g in enumerate(gs): g.p = self._Pg0[i] # Apply load profile to the original demand at each bus. for i, b in enumerate([b for b in self.case.buses if b.type == PQ]): b.p_demand = self._Pd0[i] * self.profile[self._step] # Initialise the record of generator set-points. self._Pg = zeros((len(self.case.online_generators), len(self.profile))) # Apply the first load profile value. # self.step() self.case.reset()
['def', 'reset', '(', 'self', ')', ':', 'logger', '.', 'info', '(', '"Reseting environment."', ')', 'self', '.', '_step', '=', '0', '# Reset the set-point of each generator to its original value.', 'gs', '=', '[', 'g', 'for', 'g', 'in', 'self', '.', 'case', '.', 'online_generators', 'if', 'g', '.', 'bus', '.', 'type', '!=', 'REFERENCE', ']', 'for', 'i', ',', 'g', 'in', 'enumerate', '(', 'gs', ')', ':', 'g', '.', 'p', '=', 'self', '.', '_Pg0', '[', 'i', ']', '# Apply load profile to the original demand at each bus.', 'for', 'i', ',', 'b', 'in', 'enumerate', '(', '[', 'b', 'for', 'b', 'in', 'self', '.', 'case', '.', 'buses', 'if', 'b', '.', 'type', '==', 'PQ', ']', ')', ':', 'b', '.', 'p_demand', '=', 'self', '.', '_Pd0', '[', 'i', ']', '*', 'self', '.', 'profile', '[', 'self', '.', '_step', ']', '# Initialise the record of generator set-points.', 'self', '.', '_Pg', '=', 'zeros', '(', '(', 'len', '(', 'self', '.', 'case', '.', 'online_generators', ')', ',', 'len', '(', 'self', '.', 'profile', ')', ')', ')', '# Apply the first load profile value.', '# self.step()', 'self', '.', 'case', '.', 'reset', '(', ')']
Re-initialises the environment.
['Re', '-', 'initialises', 'the', 'environment', '.']
train
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/rlopf.py#L140-L162
4,835
awslabs/sockeye
sockeye/data_io.py
are_token_parallel
def are_token_parallel(sequences: Sequence[Sized]) -> bool: """ Returns True if all sequences in the list have the same length. """ if not sequences or len(sequences) == 1: return True return all(len(s) == len(sequences[0]) for s in sequences)
python
def are_token_parallel(sequences: Sequence[Sized]) -> bool: """ Returns True if all sequences in the list have the same length. """ if not sequences or len(sequences) == 1: return True return all(len(s) == len(sequences[0]) for s in sequences)
['def', 'are_token_parallel', '(', 'sequences', ':', 'Sequence', '[', 'Sized', ']', ')', '->', 'bool', ':', 'if', 'not', 'sequences', 'or', 'len', '(', 'sequences', ')', '==', '1', ':', 'return', 'True', 'return', 'all', '(', 'len', '(', 's', ')', '==', 'len', '(', 'sequences', '[', '0', ']', ')', 'for', 's', 'in', 'sequences', ')']
Returns True if all sequences in the list have the same length.
['Returns', 'True', 'if', 'all', 'sequences', 'in', 'the', 'list', 'have', 'the', 'same', 'length', '.']
train
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/data_io.py#L264-L270
4,836
juicer/juicer
juicer/common/Connectors.py
Connectors.check_api_version
def check_api_version(self): """ Self check that the client expects the api version used by the server. /status/ is available without authentication so it will not interfere with hello. """ url = self.base_url + "/status/" juicer.utils.Log.log_debug("[REST:GET:%s]", url) _r = requests.get(url, auth=self.auth, headers=self.headers, verify=False) if _r.status_code == Constants.PULP_GET_OK: # server is up, cool. version = juicer.utils.load_json_str(_r.content)['api_version'].strip() if version != Constants.EXPECTED_SERVER_VERSION: # we done goofed raise JuicerPulpError("Client expects %s and got %s -- you should probably update!" \ % (Constants.EXPECTED_SERVER_VERSION, version)) return True
python
def check_api_version(self): """ Self check that the client expects the api version used by the server. /status/ is available without authentication so it will not interfere with hello. """ url = self.base_url + "/status/" juicer.utils.Log.log_debug("[REST:GET:%s]", url) _r = requests.get(url, auth=self.auth, headers=self.headers, verify=False) if _r.status_code == Constants.PULP_GET_OK: # server is up, cool. version = juicer.utils.load_json_str(_r.content)['api_version'].strip() if version != Constants.EXPECTED_SERVER_VERSION: # we done goofed raise JuicerPulpError("Client expects %s and got %s -- you should probably update!" \ % (Constants.EXPECTED_SERVER_VERSION, version)) return True
['def', 'check_api_version', '(', 'self', ')', ':', 'url', '=', 'self', '.', 'base_url', '+', '"/status/"', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_debug', '(', '"[REST:GET:%s]"', ',', 'url', ')', '_r', '=', 'requests', '.', 'get', '(', 'url', ',', 'auth', '=', 'self', '.', 'auth', ',', 'headers', '=', 'self', '.', 'headers', ',', 'verify', '=', 'False', ')', 'if', '_r', '.', 'status_code', '==', 'Constants', '.', 'PULP_GET_OK', ':', '# server is up, cool.', 'version', '=', 'juicer', '.', 'utils', '.', 'load_json_str', '(', '_r', '.', 'content', ')', '[', "'api_version'", ']', '.', 'strip', '(', ')', 'if', 'version', '!=', 'Constants', '.', 'EXPECTED_SERVER_VERSION', ':', '# we done goofed', 'raise', 'JuicerPulpError', '(', '"Client expects %s and got %s -- you should probably update!"', '%', '(', 'Constants', '.', 'EXPECTED_SERVER_VERSION', ',', 'version', ')', ')', 'return', 'True']
Self check that the client expects the api version used by the server. /status/ is available without authentication so it will not interfere with hello.
['Self', 'check', 'that', 'the', 'client', 'expects', 'the', 'api', 'version', 'used', 'by', 'the', 'server', '.', '/', 'status', '/', 'is', 'available', 'without', 'authentication', 'so', 'it', 'will', 'not', 'interfere', 'with', 'hello', '.']
train
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/common/Connectors.py#L75-L92
4,837
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
brocade_vswitch.get_vnetwork_vms_input_datacenter
def get_vnetwork_vms_input_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_vms = ET.Element("get_vnetwork_vms") config = get_vnetwork_vms input = ET.SubElement(get_vnetwork_vms, "input") datacenter = ET.SubElement(input, "datacenter") datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_vnetwork_vms_input_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_vms = ET.Element("get_vnetwork_vms") config = get_vnetwork_vms input = ET.SubElement(get_vnetwork_vms, "input") datacenter = ET.SubElement(input, "datacenter") datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_vnetwork_vms_input_datacenter', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_vnetwork_vms', '=', 'ET', '.', 'Element', '(', '"get_vnetwork_vms"', ')', 'config', '=', 'get_vnetwork_vms', 'input', '=', 'ET', '.', 'SubElement', '(', 'get_vnetwork_vms', ',', '"input"', ')', 'datacenter', '=', 'ET', '.', 'SubElement', '(', 'input', ',', '"datacenter"', ')', 'datacenter', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'datacenter'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L214-L225
4,838
fedora-infra/fedora-messaging
fedora_messaging/twisted/protocol.py
FedoraMessagingProtocol.stopProducing
def stopProducing(self): """ Stop producing messages and disconnect from the server. Returns: Deferred: fired when the production is stopped. """ _legacy_twisted_log.msg("Disconnecting from the AMQP broker") yield self.pauseProducing() yield self.close() self._consumers = {} self._channel = None
python
def stopProducing(self): """ Stop producing messages and disconnect from the server. Returns: Deferred: fired when the production is stopped. """ _legacy_twisted_log.msg("Disconnecting from the AMQP broker") yield self.pauseProducing() yield self.close() self._consumers = {} self._channel = None
['def', 'stopProducing', '(', 'self', ')', ':', '_legacy_twisted_log', '.', 'msg', '(', '"Disconnecting from the AMQP broker"', ')', 'yield', 'self', '.', 'pauseProducing', '(', ')', 'yield', 'self', '.', 'close', '(', ')', 'self', '.', '_consumers', '=', '{', '}', 'self', '.', '_channel', '=', 'None']
Stop producing messages and disconnect from the server. Returns: Deferred: fired when the production is stopped.
['Stop', 'producing', 'messages', 'and', 'disconnect', 'from', 'the', 'server', '.', 'Returns', ':', 'Deferred', ':', 'fired', 'when', 'the', 'production', 'is', 'stopped', '.']
train
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/twisted/protocol.py#L934-L944
4,839
kolypto/py-good
good/schema/util.py
register_type_name
def register_type_name(t, name): """ Register a human-friendly name for the given type. This will be used in Invalid errors :param t: The type to register :type t: type :param name: Name for the type :type name: unicode """ assert isinstance(t, type) assert isinstance(name, unicode) __type_names[t] = name
python
def register_type_name(t, name): """ Register a human-friendly name for the given type. This will be used in Invalid errors :param t: The type to register :type t: type :param name: Name for the type :type name: unicode """ assert isinstance(t, type) assert isinstance(name, unicode) __type_names[t] = name
['def', 'register_type_name', '(', 't', ',', 'name', ')', ':', 'assert', 'isinstance', '(', 't', ',', 'type', ')', 'assert', 'isinstance', '(', 'name', ',', 'unicode', ')', '__type_names', '[', 't', ']', '=', 'name']
Register a human-friendly name for the given type. This will be used in Invalid errors :param t: The type to register :type t: type :param name: Name for the type :type name: unicode
['Register', 'a', 'human', '-', 'friendly', 'name', 'for', 'the', 'given', 'type', '.', 'This', 'will', 'be', 'used', 'in', 'Invalid', 'errors']
train
https://github.com/kolypto/py-good/blob/192ef19e79f6fd95c1cbd7c378a3074c7ad7a6d4/good/schema/util.py#L61-L71
4,840
cozy/python_cozy_management
cozy_management/ssl.py
generate_certificate
def generate_certificate(common_name, size=DEFAULT_KEY_SIZE): ''' Generate private key and certificate for https ''' private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name) certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name) if not os.path.isfile(certificate_path): print 'Create {}'.format(certificate_path) cmd = 'openssl req -x509 -nodes -newkey rsa:{size} -keyout {private_key_path} -out {certificate_path} -days 3650 -subj "/CN={common_name}"'.format( size=size, private_key_path=private_key_path, certificate_path=certificate_path, common_name=common_name) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, close_fds=True) p.communicate() helpers.file_rights(private_key_path, mode=0400, uid=0, gid=0) helpers.file_rights(certificate_path, mode=0444, uid=0, gid=0) else: print 'Already exist: {}'.format(certificate_path) clean_links() make_links(common_name)
python
def generate_certificate(common_name, size=DEFAULT_KEY_SIZE): ''' Generate private key and certificate for https ''' private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name) certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name) if not os.path.isfile(certificate_path): print 'Create {}'.format(certificate_path) cmd = 'openssl req -x509 -nodes -newkey rsa:{size} -keyout {private_key_path} -out {certificate_path} -days 3650 -subj "/CN={common_name}"'.format( size=size, private_key_path=private_key_path, certificate_path=certificate_path, common_name=common_name) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, close_fds=True) p.communicate() helpers.file_rights(private_key_path, mode=0400, uid=0, gid=0) helpers.file_rights(certificate_path, mode=0444, uid=0, gid=0) else: print 'Already exist: {}'.format(certificate_path) clean_links() make_links(common_name)
['def', 'generate_certificate', '(', 'common_name', ',', 'size', '=', 'DEFAULT_KEY_SIZE', ')', ':', 'private_key_path', '=', "'{}/{}.key'", '.', 'format', '(', 'CERTIFICATES_PATH', ',', 'common_name', ')', 'certificate_path', '=', "'{}/{}.crt'", '.', 'format', '(', 'CERTIFICATES_PATH', ',', 'common_name', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'certificate_path', ')', ':', 'print', "'Create {}'", '.', 'format', '(', 'certificate_path', ')', 'cmd', '=', '\'openssl req -x509 -nodes -newkey rsa:{size} -keyout {private_key_path} -out {certificate_path} -days 3650 -subj "/CN={common_name}"\'', '.', 'format', '(', 'size', '=', 'size', ',', 'private_key_path', '=', 'private_key_path', ',', 'certificate_path', '=', 'certificate_path', ',', 'common_name', '=', 'common_name', ')', 'p', '=', 'subprocess', '.', 'Popen', '(', 'cmd', ',', 'shell', '=', 'True', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'close_fds', '=', 'True', ')', 'p', '.', 'communicate', '(', ')', 'helpers', '.', 'file_rights', '(', 'private_key_path', ',', 'mode', '=', '0400', ',', 'uid', '=', '0', ',', 'gid', '=', '0', ')', 'helpers', '.', 'file_rights', '(', 'certificate_path', ',', 'mode', '=', '0444', ',', 'uid', '=', '0', ',', 'gid', '=', '0', ')', 'else', ':', 'print', "'Already exist: {}'", '.', 'format', '(', 'certificate_path', ')', 'clean_links', '(', ')', 'make_links', '(', 'common_name', ')']
Generate private key and certificate for https
['Generate', 'private', 'key', 'and', 'certificate', 'for', 'https']
train
https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L31-L54
4,841
CybOXProject/mixbox
mixbox/namespaces.py
NamespaceSet.remove_prefix
def remove_prefix(self, prefix): """Removes prefix from this set. This is a no-op if the prefix doesn't exist in it. """ if prefix not in self.__prefix_map: return ni = self.__lookup_prefix(prefix) ni.prefixes.discard(prefix) del self.__prefix_map[prefix] # If we removed the preferred prefix, find a new one. if ni.preferred_prefix == prefix: ni.preferred_prefix = next(iter(ni.prefixes), None)
python
def remove_prefix(self, prefix): """Removes prefix from this set. This is a no-op if the prefix doesn't exist in it. """ if prefix not in self.__prefix_map: return ni = self.__lookup_prefix(prefix) ni.prefixes.discard(prefix) del self.__prefix_map[prefix] # If we removed the preferred prefix, find a new one. if ni.preferred_prefix == prefix: ni.preferred_prefix = next(iter(ni.prefixes), None)
['def', 'remove_prefix', '(', 'self', ',', 'prefix', ')', ':', 'if', 'prefix', 'not', 'in', 'self', '.', '__prefix_map', ':', 'return', 'ni', '=', 'self', '.', '__lookup_prefix', '(', 'prefix', ')', 'ni', '.', 'prefixes', '.', 'discard', '(', 'prefix', ')', 'del', 'self', '.', '__prefix_map', '[', 'prefix', ']', '# If we removed the preferred prefix, find a new one.', 'if', 'ni', '.', 'preferred_prefix', '==', 'prefix', ':', 'ni', '.', 'preferred_prefix', '=', 'next', '(', 'iter', '(', 'ni', '.', 'prefixes', ')', ',', 'None', ')']
Removes prefix from this set. This is a no-op if the prefix doesn't exist in it.
['Removes', 'prefix', 'from', 'this', 'set', '.', 'This', 'is', 'a', 'no', '-', 'op', 'if', 'the', 'prefix', 'doesn', 't', 'exist', 'in', 'it', '.']
train
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/namespaces.py#L453-L466
4,842
apache/incubator-mxnet
example/svrg_module/api_usage_example/example_inference.py
create_network
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization """ head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) data = np.random.randint(1, 5, [1000, 2]) #Test_Train data split n_train = int(data.shape[0] * 0.8) weights = np.array([1.0, 2.0]) label = data.dot(weights) di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label') val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size) X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging) return di, val_iter, mod
python
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization """ head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) data = np.random.randint(1, 5, [1000, 2]) #Test_Train data split n_train = int(data.shape[0] * 0.8) weights = np.array([1.0, 2.0]) label = data.dot(weights) di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label') val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size) X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging) return di, val_iter, mod
['def', 'create_network', '(', 'batch_size', ',', 'update_freq', ')', ':', 'head', '=', "'%(asctime)-15s %(message)s'", 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ',', 'format', '=', 'head', ')', 'data', '=', 'np', '.', 'random', '.', 'randint', '(', '1', ',', '5', ',', '[', '1000', ',', '2', ']', ')', '#Test_Train data split', 'n_train', '=', 'int', '(', 'data', '.', 'shape', '[', '0', ']', '*', '0.8', ')', 'weights', '=', 'np', '.', 'array', '(', '[', '1.0', ',', '2.0', ']', ')', 'label', '=', 'data', '.', 'dot', '(', 'weights', ')', 'di', '=', 'mx', '.', 'io', '.', 'NDArrayIter', '(', 'data', '[', ':', 'n_train', ',', ':', ']', ',', 'label', '[', ':', 'n_train', ']', ',', 'batch_size', '=', 'batch_size', ',', 'shuffle', '=', 'True', ',', 'label_name', '=', "'lin_reg_label'", ')', 'val_iter', '=', 'mx', '.', 'io', '.', 'NDArrayIter', '(', 'data', '[', 'n_train', ':', ',', ':', ']', ',', 'label', '[', 'n_train', ':', ']', ',', 'batch_size', '=', 'batch_size', ')', 'X', '=', 'mx', '.', 'sym', '.', 'Variable', '(', "'data'", ')', 'Y', '=', 'mx', '.', 'symbol', '.', 'Variable', '(', "'lin_reg_label'", ')', 'fully_connected_layer', '=', 'mx', '.', 'sym', '.', 'FullyConnected', '(', 'data', '=', 'X', ',', 'name', '=', "'fc1'", ',', 'num_hidden', '=', '1', ')', 'lro', '=', 'mx', '.', 'sym', '.', 'LinearRegressionOutput', '(', 'data', '=', 'fully_connected_layer', ',', 'label', '=', 'Y', ',', 'name', '=', '"lro"', ')', 'mod', '=', 'SVRGModule', '(', 'symbol', '=', 'lro', ',', 'data_names', '=', '[', "'data'", ']', ',', 'label_names', '=', '[', "'lin_reg_label'", ']', ',', 'update_freq', '=', 'update_freq', ',', 'logger', '=', 'logging', ')', 'return', 'di', ',', 'val_iter', ',', 'mod']
Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization
['Create', 'a', 'linear', 'regression', 'network', 'for', 'performing', 'SVRG', 'optimization', '.', ':', 'return', ':', 'an', 'instance', 'of', 'mx', '.', 'io', '.', 'NDArrayIter', ':', 'return', ':', 'an', 'instance', 'of', 'mx', '.', 'mod', '.', 'svrgmodule', 'for', 'performing', 'SVRG', 'optimization']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/svrg_module/api_usage_example/example_inference.py#L64-L91
4,843
e-dard/flask-s3
flask_s3.py
_bp_static_url
def _bp_static_url(blueprint): """ builds the absolute url path for a blueprint's static folder """ u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
python
def _bp_static_url(blueprint): """ builds the absolute url path for a blueprint's static folder """ u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
['def', '_bp_static_url', '(', 'blueprint', ')', ':', 'u', '=', 'six', '.', 'u', '(', "'%s%s'", '%', '(', 'blueprint', '.', 'url_prefix', 'or', "''", ',', 'blueprint', '.', 'static_url_path', 'or', "''", ')', ')', 'return', 'u']
builds the absolute url path for a blueprint's static folder
['builds', 'the', 'absolute', 'url', 'path', 'for', 'a', 'blueprint', 's', 'static', 'folder']
train
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L168-L171
4,844
malinoff/structures
structures/core.py
Construct.parse_stream
def parse_stream(self, stream: BytesIO, context=None): """ Parse some python object from the stream. :param stream: Stream from which the data is read and parsed. :param context: Optional context dictionary. """ if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._parse_stream(stream, context) except Error: raise except Exception as exc: raise ParsingError(str(exc))
python
def parse_stream(self, stream: BytesIO, context=None): """ Parse some python object from the stream. :param stream: Stream from which the data is read and parsed. :param context: Optional context dictionary. """ if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._parse_stream(stream, context) except Error: raise except Exception as exc: raise ParsingError(str(exc))
['def', 'parse_stream', '(', 'self', ',', 'stream', ':', 'BytesIO', ',', 'context', '=', 'None', ')', ':', 'if', 'context', 'is', 'None', ':', 'context', '=', 'Context', '(', ')', 'if', 'not', 'isinstance', '(', 'context', ',', 'Context', ')', ':', 'context', '=', 'Context', '(', 'context', ')', 'try', ':', 'return', 'self', '.', '_parse_stream', '(', 'stream', ',', 'context', ')', 'except', 'Error', ':', 'raise', 'except', 'Exception', 'as', 'exc', ':', 'raise', 'ParsingError', '(', 'str', '(', 'exc', ')', ')']
Parse some python object from the stream. :param stream: Stream from which the data is read and parsed. :param context: Optional context dictionary.
['Parse', 'some', 'python', 'object', 'from', 'the', 'stream', '.']
train
https://github.com/malinoff/structures/blob/36b1d641d399cd0b2a824704da53d8b5c8bd4f10/structures/core.py#L81-L97
4,845
fermiPy/fermipy
fermipy/utils.py
make_gaussian_kernel
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=None, ypix=None): """Make kernel for a 2D gaussian. Parameters ---------- sigma : float Standard deviation in degrees. """ sigma /= cdelt def fn(t, s): return 1. / (2 * np.pi * s ** 2) * np.exp( -t ** 2 / (s ** 2 * 2.0)) dxy = make_pixel_distance(npix, xpix, ypix) k = fn(dxy, sigma) k /= (np.sum(k) * np.radians(cdelt) ** 2) return k
python
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=None, ypix=None): """Make kernel for a 2D gaussian. Parameters ---------- sigma : float Standard deviation in degrees. """ sigma /= cdelt def fn(t, s): return 1. / (2 * np.pi * s ** 2) * np.exp( -t ** 2 / (s ** 2 * 2.0)) dxy = make_pixel_distance(npix, xpix, ypix) k = fn(dxy, sigma) k /= (np.sum(k) * np.radians(cdelt) ** 2) return k
['def', 'make_gaussian_kernel', '(', 'sigma', ',', 'npix', '=', '501', ',', 'cdelt', '=', '0.01', ',', 'xpix', '=', 'None', ',', 'ypix', '=', 'None', ')', ':', 'sigma', '/=', 'cdelt', 'def', 'fn', '(', 't', ',', 's', ')', ':', 'return', '1.', '/', '(', '2', '*', 'np', '.', 'pi', '*', 's', '**', '2', ')', '*', 'np', '.', 'exp', '(', '-', 't', '**', '2', '/', '(', 's', '**', '2', '*', '2.0', ')', ')', 'dxy', '=', 'make_pixel_distance', '(', 'npix', ',', 'xpix', ',', 'ypix', ')', 'k', '=', 'fn', '(', 'dxy', ',', 'sigma', ')', 'k', '/=', '(', 'np', '.', 'sum', '(', 'k', ')', '*', 'np', '.', 'radians', '(', 'cdelt', ')', '**', '2', ')', 'return', 'k']
Make kernel for a 2D gaussian. Parameters ---------- sigma : float Standard deviation in degrees.
['Make', 'kernel', 'for', 'a', '2D', 'gaussian', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/utils.py#L1542-L1560
4,846
ansible/ansible-runner
ansible_runner/output.py
configure
def configure(): ''' Configures the logging facility This function will setup an initial logging facility for handling display and debug outputs. The default facility will send display messages to stdout and the default debug facility will do nothing. :returns: None ''' root_logger = logging.getLogger() root_logger.addHandler(logging.NullHandler()) root_logger.setLevel(99) _display_logger.setLevel(70) _debug_logger.setLevel(10) display_handlers = [h.get_name() for h in _display_logger.handlers] if 'stdout' not in display_handlers: stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.set_name('stdout') formatter = logging.Formatter('%(message)s') stdout_handler.setFormatter(formatter) _display_logger.addHandler(stdout_handler)
python
def configure(): ''' Configures the logging facility This function will setup an initial logging facility for handling display and debug outputs. The default facility will send display messages to stdout and the default debug facility will do nothing. :returns: None ''' root_logger = logging.getLogger() root_logger.addHandler(logging.NullHandler()) root_logger.setLevel(99) _display_logger.setLevel(70) _debug_logger.setLevel(10) display_handlers = [h.get_name() for h in _display_logger.handlers] if 'stdout' not in display_handlers: stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.set_name('stdout') formatter = logging.Formatter('%(message)s') stdout_handler.setFormatter(formatter) _display_logger.addHandler(stdout_handler)
['def', 'configure', '(', ')', ':', 'root_logger', '=', 'logging', '.', 'getLogger', '(', ')', 'root_logger', '.', 'addHandler', '(', 'logging', '.', 'NullHandler', '(', ')', ')', 'root_logger', '.', 'setLevel', '(', '99', ')', '_display_logger', '.', 'setLevel', '(', '70', ')', '_debug_logger', '.', 'setLevel', '(', '10', ')', 'display_handlers', '=', '[', 'h', '.', 'get_name', '(', ')', 'for', 'h', 'in', '_display_logger', '.', 'handlers', ']', 'if', "'stdout'", 'not', 'in', 'display_handlers', ':', 'stdout_handler', '=', 'logging', '.', 'StreamHandler', '(', 'sys', '.', 'stdout', ')', 'stdout_handler', '.', 'set_name', '(', "'stdout'", ')', 'formatter', '=', 'logging', '.', 'Formatter', '(', "'%(message)s'", ')', 'stdout_handler', '.', 'setFormatter', '(', 'formatter', ')', '_display_logger', '.', 'addHandler', '(', 'stdout_handler', ')']
Configures the logging facility This function will setup an initial logging facility for handling display and debug outputs. The default facility will send display messages to stdout and the default debug facility will do nothing. :returns: None
['Configures', 'the', 'logging', 'facility']
train
https://github.com/ansible/ansible-runner/blob/8ce485480a5d0b602428d9d64a752e06fb46cdb8/ansible_runner/output.py#L67-L91
4,847
pypa/pipenv
pipenv/vendor/click/core.py
Context.scope
def scope(self, cleanup=True): """This helper method can be used with the context object to promote it to the current thread local (see :func:`get_current_context`). The default behavior of this is to invoke the cleanup functions which can be disabled by setting `cleanup` to `False`. The cleanup functions are typically used for things such as closing file handles. If the cleanup is intended the context object can also be directly used as a context manager. Example usage:: with ctx.scope(): assert get_current_context() is ctx This is equivalent:: with ctx: assert get_current_context() is ctx .. versionadded:: 5.0 :param cleanup: controls if the cleanup functions should be run or not. The default is to run these functions. In some situations the context only wants to be temporarily pushed in which case this can be disabled. Nested pushes automatically defer the cleanup. """ if not cleanup: self._depth += 1 try: with self as rv: yield rv finally: if not cleanup: self._depth -= 1
python
def scope(self, cleanup=True): """This helper method can be used with the context object to promote it to the current thread local (see :func:`get_current_context`). The default behavior of this is to invoke the cleanup functions which can be disabled by setting `cleanup` to `False`. The cleanup functions are typically used for things such as closing file handles. If the cleanup is intended the context object can also be directly used as a context manager. Example usage:: with ctx.scope(): assert get_current_context() is ctx This is equivalent:: with ctx: assert get_current_context() is ctx .. versionadded:: 5.0 :param cleanup: controls if the cleanup functions should be run or not. The default is to run these functions. In some situations the context only wants to be temporarily pushed in which case this can be disabled. Nested pushes automatically defer the cleanup. """ if not cleanup: self._depth += 1 try: with self as rv: yield rv finally: if not cleanup: self._depth -= 1
['def', 'scope', '(', 'self', ',', 'cleanup', '=', 'True', ')', ':', 'if', 'not', 'cleanup', ':', 'self', '.', '_depth', '+=', '1', 'try', ':', 'with', 'self', 'as', 'rv', ':', 'yield', 'rv', 'finally', ':', 'if', 'not', 'cleanup', ':', 'self', '.', '_depth', '-=', '1']
This helper method can be used with the context object to promote it to the current thread local (see :func:`get_current_context`). The default behavior of this is to invoke the cleanup functions which can be disabled by setting `cleanup` to `False`. The cleanup functions are typically used for things such as closing file handles. If the cleanup is intended the context object can also be directly used as a context manager. Example usage:: with ctx.scope(): assert get_current_context() is ctx This is equivalent:: with ctx: assert get_current_context() is ctx .. versionadded:: 5.0 :param cleanup: controls if the cleanup functions should be run or not. The default is to run these functions. In some situations the context only wants to be temporarily pushed in which case this can be disabled. Nested pushes automatically defer the cleanup.
['This', 'helper', 'method', 'can', 'be', 'used', 'with', 'the', 'context', 'object', 'to', 'promote', 'it', 'to', 'the', 'current', 'thread', 'local', '(', 'see', ':', 'func', ':', 'get_current_context', ')', '.', 'The', 'default', 'behavior', 'of', 'this', 'is', 'to', 'invoke', 'the', 'cleanup', 'functions', 'which', 'can', 'be', 'disabled', 'by', 'setting', 'cleanup', 'to', 'False', '.', 'The', 'cleanup', 'functions', 'are', 'typically', 'used', 'for', 'things', 'such', 'as', 'closing', 'file', 'handles', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/core.py#L355-L390
4,848
CellProfiler/centrosome
centrosome/cpmorphology.py
convex_hull_image
def convex_hull_image(image): '''Given a binary image, return an image of the convex hull''' labels = image.astype(int) points, counts = convex_hull(labels, np.array([1])) output = np.zeros(image.shape, int) for i in range(counts[0]): inext = (i+1) % counts[0] draw_line(output, points[i,1:], points[inext,1:],1) output = fill_labeled_holes(output) return output == 1
python
def convex_hull_image(image): '''Given a binary image, return an image of the convex hull''' labels = image.astype(int) points, counts = convex_hull(labels, np.array([1])) output = np.zeros(image.shape, int) for i in range(counts[0]): inext = (i+1) % counts[0] draw_line(output, points[i,1:], points[inext,1:],1) output = fill_labeled_holes(output) return output == 1
['def', 'convex_hull_image', '(', 'image', ')', ':', 'labels', '=', 'image', '.', 'astype', '(', 'int', ')', 'points', ',', 'counts', '=', 'convex_hull', '(', 'labels', ',', 'np', '.', 'array', '(', '[', '1', ']', ')', ')', 'output', '=', 'np', '.', 'zeros', '(', 'image', '.', 'shape', ',', 'int', ')', 'for', 'i', 'in', 'range', '(', 'counts', '[', '0', ']', ')', ':', 'inext', '=', '(', 'i', '+', '1', ')', '%', 'counts', '[', '0', ']', 'draw_line', '(', 'output', ',', 'points', '[', 'i', ',', '1', ':', ']', ',', 'points', '[', 'inext', ',', '1', ':', ']', ',', '1', ')', 'output', '=', 'fill_labeled_holes', '(', 'output', ')', 'return', 'output', '==', '1']
Given a binary image, return an image of the convex hull
['Given', 'a', 'binary', 'image', 'return', 'an', 'image', 'of', 'the', 'convex', 'hull']
train
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L598-L607
4,849
FNNDSC/med2image
med2image/systemMisc.py
find
def find(pattern, root=os.curdir): '''Helper around 'locate' ''' hits = '' for F in locate(pattern, root): hits = hits + F + '\n' l = hits.split('\n') if(not len(l[-1])): l.pop() if len(l) == 1 and not len(l[0]): return None else: return l
python
def find(pattern, root=os.curdir): '''Helper around 'locate' ''' hits = '' for F in locate(pattern, root): hits = hits + F + '\n' l = hits.split('\n') if(not len(l[-1])): l.pop() if len(l) == 1 and not len(l[0]): return None else: return l
['def', 'find', '(', 'pattern', ',', 'root', '=', 'os', '.', 'curdir', ')', ':', 'hits', '=', "''", 'for', 'F', 'in', 'locate', '(', 'pattern', ',', 'root', ')', ':', 'hits', '=', 'hits', '+', 'F', '+', "'\\n'", 'l', '=', 'hits', '.', 'split', '(', "'\\n'", ')', 'if', '(', 'not', 'len', '(', 'l', '[', '-', '1', ']', ')', ')', ':', 'l', '.', 'pop', '(', ')', 'if', 'len', '(', 'l', ')', '==', '1', 'and', 'not', 'len', '(', 'l', '[', '0', ']', ')', ':', 'return', 'None', 'else', ':', 'return', 'l']
Helper around 'locate'
['Helper', 'around', 'locate']
train
https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/systemMisc.py#L1136-L1146
4,850
gbowerman/azurerm
azurerm/amsrp.py
create_media_assetfile
def create_media_assetfile(access_token, parent_asset_id, name, is_primary="false", \ is_encrypted="false", encryption_scheme="None", encryptionkey_id="None"): '''Create Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): Media Service Parent Asset ID. name (str): Media Service Asset Name. is_primary (str): Media Service Primary Flag. is_encrypted (str): Media Service Encryption Flag. encryption_scheme (str): Media Service Encryption Scheme. encryptionkey_id (str): Media Service Encryption Key ID. Returns: HTTP response. JSON body. ''' path = '/Files' endpoint = ''.join([ams_rest_endpoint, path]) if encryption_scheme == "StorageEncryption": body = '{ \ "IsEncrypted": "' + is_encrypted + '", \ "EncryptionScheme": "' + encryption_scheme + '", \ "EncryptionVersion": "' + "1.0" + '", \ "EncryptionKeyId": "' + encryptionkey_id + '", \ "IsPrimary": "' + is_primary + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' else: body = '{ \ "IsPrimary": "' + is_primary + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' return do_ams_post(endpoint, path, body, access_token)
python
def create_media_assetfile(access_token, parent_asset_id, name, is_primary="false", \ is_encrypted="false", encryption_scheme="None", encryptionkey_id="None"): '''Create Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): Media Service Parent Asset ID. name (str): Media Service Asset Name. is_primary (str): Media Service Primary Flag. is_encrypted (str): Media Service Encryption Flag. encryption_scheme (str): Media Service Encryption Scheme. encryptionkey_id (str): Media Service Encryption Key ID. Returns: HTTP response. JSON body. ''' path = '/Files' endpoint = ''.join([ams_rest_endpoint, path]) if encryption_scheme == "StorageEncryption": body = '{ \ "IsEncrypted": "' + is_encrypted + '", \ "EncryptionScheme": "' + encryption_scheme + '", \ "EncryptionVersion": "' + "1.0" + '", \ "EncryptionKeyId": "' + encryptionkey_id + '", \ "IsPrimary": "' + is_primary + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' else: body = '{ \ "IsPrimary": "' + is_primary + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' return do_ams_post(endpoint, path, body, access_token)
['def', 'create_media_assetfile', '(', 'access_token', ',', 'parent_asset_id', ',', 'name', ',', 'is_primary', '=', '"false"', ',', 'is_encrypted', '=', '"false"', ',', 'encryption_scheme', '=', '"None"', ',', 'encryptionkey_id', '=', '"None"', ')', ':', 'path', '=', "'/Files'", 'endpoint', '=', "''", '.', 'join', '(', '[', 'ams_rest_endpoint', ',', 'path', ']', ')', 'if', 'encryption_scheme', '==', '"StorageEncryption"', ':', 'body', '=', '\'{ \\\n\t\t\t"IsEncrypted": "\'', '+', 'is_encrypted', '+', '\'", \\\n\t\t\t"EncryptionScheme": "\'', '+', 'encryption_scheme', '+', '\'", \\\n\t\t\t"EncryptionVersion": "\'', '+', '"1.0"', '+', '\'", \\\n\t\t\t"EncryptionKeyId": "\'', '+', 'encryptionkey_id', '+', '\'", \\\n\t\t\t"IsPrimary": "\'', '+', 'is_primary', '+', '\'", \\\n\t\t\t"MimeType": "video/mp4", \\\n\t\t\t"Name": "\'', '+', 'name', '+', '\'", \\\n\t\t\t"ParentAssetId": "\'', '+', 'parent_asset_id', '+', '\'" \\\n\t\t}\'', 'else', ':', 'body', '=', '\'{ \\\n\t\t\t"IsPrimary": "\'', '+', 'is_primary', '+', '\'", \\\n\t\t\t"MimeType": "video/mp4", \\\n\t\t\t"Name": "\'', '+', 'name', '+', '\'", \\\n\t\t\t"ParentAssetId": "\'', '+', 'parent_asset_id', '+', '\'" \\\n\t\t}\'', 'return', 'do_ams_post', '(', 'endpoint', ',', 'path', ',', 'body', ',', 'access_token', ')']
Create Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): Media Service Parent Asset ID. name (str): Media Service Asset Name. is_primary (str): Media Service Primary Flag. is_encrypted (str): Media Service Encryption Flag. encryption_scheme (str): Media Service Encryption Scheme. encryptionkey_id (str): Media Service Encryption Key ID. Returns: HTTP response. JSON body.
['Create', 'Media', 'Service', 'Asset', 'File', '.']
train
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/amsrp.py#L421-L457
4,851
ejeschke/ginga
ginga/Bindings.py
ImageViewBindings.pi_zoom_origin
def pi_zoom_origin(self, viewer, event, msg=True): """Like pi_zoom(), but pans the image as well to keep the coordinate under the cursor in that same position relative to the window. """ origin = (event.data_x, event.data_y) return self._pinch_zoom_rotate(viewer, event.state, event.rot_deg, event.scale, msg=msg, origin=origin)
python
def pi_zoom_origin(self, viewer, event, msg=True): """Like pi_zoom(), but pans the image as well to keep the coordinate under the cursor in that same position relative to the window. """ origin = (event.data_x, event.data_y) return self._pinch_zoom_rotate(viewer, event.state, event.rot_deg, event.scale, msg=msg, origin=origin)
['def', 'pi_zoom_origin', '(', 'self', ',', 'viewer', ',', 'event', ',', 'msg', '=', 'True', ')', ':', 'origin', '=', '(', 'event', '.', 'data_x', ',', 'event', '.', 'data_y', ')', 'return', 'self', '.', '_pinch_zoom_rotate', '(', 'viewer', ',', 'event', '.', 'state', ',', 'event', '.', 'rot_deg', ',', 'event', '.', 'scale', ',', 'msg', '=', 'msg', ',', 'origin', '=', 'origin', ')']
Like pi_zoom(), but pans the image as well to keep the coordinate under the cursor in that same position relative to the window.
['Like', 'pi_zoom', '()', 'but', 'pans', 'the', 'image', 'as', 'well', 'to', 'keep', 'the', 'coordinate', 'under', 'the', 'cursor', 'in', 'that', 'same', 'position', 'relative', 'to', 'the', 'window', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L2208-L2215
4,852
Galarzaa90/tibia.py
tibiapy/world.py
World._parse_tables
def _parse_tables(cls, parsed_content): """ Parses the information tables found in a world's information page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[:class:`str`, :class:`list`[:class:`bs4.Tag`]] A dictionary containing all the table rows, with the table headers as keys. """ tables = parsed_content.find_all('div', attrs={'class': 'TableContainer'}) output = OrderedDict() for table in tables: title = table.find("div", attrs={'class': 'Text'}).text title = title.split("[")[0].strip() inner_table = table.find("div", attrs={'class': 'InnerTableContainer'}) output[title] = inner_table.find_all("tr") return output
python
def _parse_tables(cls, parsed_content): """ Parses the information tables found in a world's information page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[:class:`str`, :class:`list`[:class:`bs4.Tag`]] A dictionary containing all the table rows, with the table headers as keys. """ tables = parsed_content.find_all('div', attrs={'class': 'TableContainer'}) output = OrderedDict() for table in tables: title = table.find("div", attrs={'class': 'Text'}).text title = title.split("[")[0].strip() inner_table = table.find("div", attrs={'class': 'InnerTableContainer'}) output[title] = inner_table.find_all("tr") return output
['def', '_parse_tables', '(', 'cls', ',', 'parsed_content', ')', ':', 'tables', '=', 'parsed_content', '.', 'find_all', '(', "'div'", ',', 'attrs', '=', '{', "'class'", ':', "'TableContainer'", '}', ')', 'output', '=', 'OrderedDict', '(', ')', 'for', 'table', 'in', 'tables', ':', 'title', '=', 'table', '.', 'find', '(', '"div"', ',', 'attrs', '=', '{', "'class'", ':', "'Text'", '}', ')', '.', 'text', 'title', '=', 'title', '.', 'split', '(', '"["', ')', '[', '0', ']', '.', 'strip', '(', ')', 'inner_table', '=', 'table', '.', 'find', '(', '"div"', ',', 'attrs', '=', '{', "'class'", ':', "'InnerTableContainer'", '}', ')', 'output', '[', 'title', ']', '=', 'inner_table', '.', 'find_all', '(', '"tr"', ')', 'return', 'output']
Parses the information tables found in a world's information page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[:class:`str`, :class:`list`[:class:`bs4.Tag`]] A dictionary containing all the table rows, with the table headers as keys.
['Parses', 'the', 'information', 'tables', 'found', 'in', 'a', 'world', 's', 'information', 'page', '.']
train
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/world.py#L371-L392
4,853
dmirecki/pyMorfologik
pymorfologik/morfologik.py
Morfologik.stem
def stem(self, words, parser, **kwargs): """ Get stems for the words using a given parser Example: from .parsing import ListParser parser = ListParser() stemmer = Morfologik() stemmer.stem(['ja tańczę a ona śpi], parser) [ ('ja': ['ja']), ('tańczę': ['tańczyć']), ('a': ['a']), ('ona': ['on']), ('śpi': ['spać']) ] """ output = self._run_morfologik(words) return parser.parse(output, **kwargs)
python
def stem(self, words, parser, **kwargs): """ Get stems for the words using a given parser Example: from .parsing import ListParser parser = ListParser() stemmer = Morfologik() stemmer.stem(['ja tańczę a ona śpi], parser) [ ('ja': ['ja']), ('tańczę': ['tańczyć']), ('a': ['a']), ('ona': ['on']), ('śpi': ['spać']) ] """ output = self._run_morfologik(words) return parser.parse(output, **kwargs)
['def', 'stem', '(', 'self', ',', 'words', ',', 'parser', ',', '*', '*', 'kwargs', ')', ':', 'output', '=', 'self', '.', '_run_morfologik', '(', 'words', ')', 'return', 'parser', '.', 'parse', '(', 'output', ',', '*', '*', 'kwargs', ')']
Get stems for the words using a given parser Example: from .parsing import ListParser parser = ListParser() stemmer = Morfologik() stemmer.stem(['ja tańczę a ona śpi], parser) [ ('ja': ['ja']), ('tańczę': ['tańczyć']), ('a': ['a']), ('ona': ['on']), ('śpi': ['spać']) ]
['Get', 'stems', 'for', 'the', 'words', 'using', 'a', 'given', 'parser']
train
https://github.com/dmirecki/pyMorfologik/blob/e4d93a82e8b4c7a108f01e0456fbeb8024df0259/pymorfologik/morfologik.py#L29-L49
4,854
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
FakeFilesystem.makedir
def makedir(self, dir_name, mode=PERM_DEF): """Create a leaf Fake directory. Args: dir_name: (str) Name of directory to create. Relative paths are assumed to be relative to '/'. mode: (int) Mode to create directory with. This argument defaults to 0o777. The umask is applied to this mode. Raises: OSError: if the directory name is invalid or parent directory is read only or as per :py:meth:`add_object`. """ dir_name = make_string_path(dir_name) ends_with_sep = self.ends_with_path_separator(dir_name) dir_name = self._path_without_trailing_separators(dir_name) if not dir_name: self.raise_os_error(errno.ENOENT, '') if self.is_windows_fs: dir_name = self.absnormpath(dir_name) parent_dir, _ = self.splitpath(dir_name) if parent_dir: base_dir = self.normpath(parent_dir) ellipsis = self._matching_string( parent_dir, self.path_separator + '..') if parent_dir.endswith(ellipsis) and not self.is_windows_fs: base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis) if not self.exists(base_dir): self.raise_os_error(errno.ENOENT, base_dir) dir_name = self.absnormpath(dir_name) if self.exists(dir_name, check_link=True): if self.is_windows_fs and dir_name == self.path_separator: error_nr = errno.EACCES else: error_nr = errno.EEXIST if ends_with_sep and self.is_macos and not self.exists(dir_name): # to avoid EEXIST exception, remove the link self.remove_object(dir_name) else: self.raise_os_error(error_nr, dir_name) head, tail = self.splitpath(dir_name) self.add_object( head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
python
def makedir(self, dir_name, mode=PERM_DEF): """Create a leaf Fake directory. Args: dir_name: (str) Name of directory to create. Relative paths are assumed to be relative to '/'. mode: (int) Mode to create directory with. This argument defaults to 0o777. The umask is applied to this mode. Raises: OSError: if the directory name is invalid or parent directory is read only or as per :py:meth:`add_object`. """ dir_name = make_string_path(dir_name) ends_with_sep = self.ends_with_path_separator(dir_name) dir_name = self._path_without_trailing_separators(dir_name) if not dir_name: self.raise_os_error(errno.ENOENT, '') if self.is_windows_fs: dir_name = self.absnormpath(dir_name) parent_dir, _ = self.splitpath(dir_name) if parent_dir: base_dir = self.normpath(parent_dir) ellipsis = self._matching_string( parent_dir, self.path_separator + '..') if parent_dir.endswith(ellipsis) and not self.is_windows_fs: base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis) if not self.exists(base_dir): self.raise_os_error(errno.ENOENT, base_dir) dir_name = self.absnormpath(dir_name) if self.exists(dir_name, check_link=True): if self.is_windows_fs and dir_name == self.path_separator: error_nr = errno.EACCES else: error_nr = errno.EEXIST if ends_with_sep and self.is_macos and not self.exists(dir_name): # to avoid EEXIST exception, remove the link self.remove_object(dir_name) else: self.raise_os_error(error_nr, dir_name) head, tail = self.splitpath(dir_name) self.add_object( head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
['def', 'makedir', '(', 'self', ',', 'dir_name', ',', 'mode', '=', 'PERM_DEF', ')', ':', 'dir_name', '=', 'make_string_path', '(', 'dir_name', ')', 'ends_with_sep', '=', 'self', '.', 'ends_with_path_separator', '(', 'dir_name', ')', 'dir_name', '=', 'self', '.', '_path_without_trailing_separators', '(', 'dir_name', ')', 'if', 'not', 'dir_name', ':', 'self', '.', 'raise_os_error', '(', 'errno', '.', 'ENOENT', ',', "''", ')', 'if', 'self', '.', 'is_windows_fs', ':', 'dir_name', '=', 'self', '.', 'absnormpath', '(', 'dir_name', ')', 'parent_dir', ',', '_', '=', 'self', '.', 'splitpath', '(', 'dir_name', ')', 'if', 'parent_dir', ':', 'base_dir', '=', 'self', '.', 'normpath', '(', 'parent_dir', ')', 'ellipsis', '=', 'self', '.', '_matching_string', '(', 'parent_dir', ',', 'self', '.', 'path_separator', '+', "'..'", ')', 'if', 'parent_dir', '.', 'endswith', '(', 'ellipsis', ')', 'and', 'not', 'self', '.', 'is_windows_fs', ':', 'base_dir', ',', 'dummy_dotdot', ',', '_', '=', 'parent_dir', '.', 'partition', '(', 'ellipsis', ')', 'if', 'not', 'self', '.', 'exists', '(', 'base_dir', ')', ':', 'self', '.', 'raise_os_error', '(', 'errno', '.', 'ENOENT', ',', 'base_dir', ')', 'dir_name', '=', 'self', '.', 'absnormpath', '(', 'dir_name', ')', 'if', 'self', '.', 'exists', '(', 'dir_name', ',', 'check_link', '=', 'True', ')', ':', 'if', 'self', '.', 'is_windows_fs', 'and', 'dir_name', '==', 'self', '.', 'path_separator', ':', 'error_nr', '=', 'errno', '.', 'EACCES', 'else', ':', 'error_nr', '=', 'errno', '.', 'EEXIST', 'if', 'ends_with_sep', 'and', 'self', '.', 'is_macos', 'and', 'not', 'self', '.', 'exists', '(', 'dir_name', ')', ':', '# to avoid EEXIST exception, remove the link', 'self', '.', 'remove_object', '(', 'dir_name', ')', 'else', ':', 'self', '.', 'raise_os_error', '(', 'error_nr', ',', 'dir_name', ')', 'head', ',', 'tail', '=', 'self', '.', 'splitpath', '(', 'dir_name', ')', 'self', '.', 'add_object', '(', 'head', ',', 'FakeDirectory', '(', 'tail', ',', 'mode', '&', '~', 'self', '.', 'umask', ',', 'filesystem', '=', 'self', ')', ')']
Create a leaf Fake directory. Args: dir_name: (str) Name of directory to create. Relative paths are assumed to be relative to '/'. mode: (int) Mode to create directory with. This argument defaults to 0o777. The umask is applied to this mode. Raises: OSError: if the directory name is invalid or parent directory is read only or as per :py:meth:`add_object`.
['Create', 'a', 'leaf', 'Fake', 'directory', '.']
train
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L2748-L2793
4,855
collectiveacuity/labPack
labpack/storage/appdata.py
appdataClient._delete
def _delete(self, _file_path, _method_title, _record_key): ''' a helper method for non-blocking deletion of files :param _file_path: string with path to file to remove :param _method_title: string with name of method calling _delete :param _record_key: string with name of record key to delete :return: None ''' import os from time import sleep current_dir = os.path.split(_file_path)[0] count = 0 retry_count = 10 while True: try: os.remove(_file_path) while current_dir != self.collection_folder: if not os.listdir(current_dir): os.rmdir(current_dir) current_dir = os.path.split(current_dir)[0] else: break break except PermissionError: sleep(.05) count += 1 if count > retry_count: raise Exception('%s failed to delete %s' % (_method_title, _record_key)) os._exit(0)
python
def _delete(self, _file_path, _method_title, _record_key): ''' a helper method for non-blocking deletion of files :param _file_path: string with path to file to remove :param _method_title: string with name of method calling _delete :param _record_key: string with name of record key to delete :return: None ''' import os from time import sleep current_dir = os.path.split(_file_path)[0] count = 0 retry_count = 10 while True: try: os.remove(_file_path) while current_dir != self.collection_folder: if not os.listdir(current_dir): os.rmdir(current_dir) current_dir = os.path.split(current_dir)[0] else: break break except PermissionError: sleep(.05) count += 1 if count > retry_count: raise Exception('%s failed to delete %s' % (_method_title, _record_key)) os._exit(0)
['def', '_delete', '(', 'self', ',', '_file_path', ',', '_method_title', ',', '_record_key', ')', ':', 'import', 'os', 'from', 'time', 'import', 'sleep', 'current_dir', '=', 'os', '.', 'path', '.', 'split', '(', '_file_path', ')', '[', '0', ']', 'count', '=', '0', 'retry_count', '=', '10', 'while', 'True', ':', 'try', ':', 'os', '.', 'remove', '(', '_file_path', ')', 'while', 'current_dir', '!=', 'self', '.', 'collection_folder', ':', 'if', 'not', 'os', '.', 'listdir', '(', 'current_dir', ')', ':', 'os', '.', 'rmdir', '(', 'current_dir', ')', 'current_dir', '=', 'os', '.', 'path', '.', 'split', '(', 'current_dir', ')', '[', '0', ']', 'else', ':', 'break', 'break', 'except', 'PermissionError', ':', 'sleep', '(', '.05', ')', 'count', '+=', '1', 'if', 'count', '>', 'retry_count', ':', 'raise', 'Exception', '(', "'%s failed to delete %s'", '%', '(', '_method_title', ',', '_record_key', ')', ')', 'os', '.', '_exit', '(', '0', ')']
a helper method for non-blocking deletion of files :param _file_path: string with path to file to remove :param _method_title: string with name of method calling _delete :param _record_key: string with name of record key to delete :return: None
['a', 'helper', 'method', 'for', 'non', '-', 'blocking', 'deletion', 'of', 'files', ':', 'param', '_file_path', ':', 'string', 'with', 'path', 'to', 'file', 'to', 'remove', ':', 'param', '_method_title', ':', 'string', 'with', 'name', 'of', 'method', 'calling', '_delete', ':', 'param', '_record_key', ':', 'string', 'with', 'name', 'of', 'record', 'key', 'to', 'delete', ':', 'return', ':', 'None']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/appdata.py#L131-L164
4,856
ladybug-tools/ladybug
ladybug/skymodel.py
get_relative_airmass
def get_relative_airmass(altitude, model='kastenyoung1989'): """ Gives the relative (not pressure-corrected) airmass. Gives the airmass at sea-level when given a sun altitude angle (in degrees). The ``model`` variable allows selection of different airmass models (described below). If ``model`` is not included or is not valid, the default model is 'kastenyoung1989'. Note: [1] Fritz Kasten. "A New Table and Approximation Formula for the Relative Optical Air Mass". Technical Report 136, Hanover, N.H.: U.S. Army Material Command, CRREL. [2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric Photometry of the Brighter Planets," The Astronomical Journal, vol. 72, pp. 945-950, 1967. [3] Fritz Kasten and Andrew Young. "Revised optical air mass tables and approximation formula". Applied Optics 28:4735-4738 [4] C. Gueymard, "Critical analysis and performance assessment of clear sky solar irradiance models using theoretical and measured data," Solar Energy, vol. 51, pp. 121-138, 1993. [5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33, pp. 1108-1110, Feb 1994. [6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20, [7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis" Sandia Report, (2012). Args: altitude: numeric Altitude angle of the sun in degrees. Note that some models use the apparent (refraction corrected) altitude angle, and some models use the true (not refraction-corrected) altitude angle. See model descriptions to determine which type of altitude angle is required. Apparent altitude angles must be calculated at sea level. model: string, default 'kastenyoung1989' Available models include the following: * 'simple' - secant(apparent altitude angle) - Note that this gives -inf at altitude=0 * 'kasten1966' - See reference [1] - requires apparent sun altitude * 'youngirvine1967' - See reference [2] - requires true sun altitude * 'kastenyoung1989' - See reference [3] - requires apparent sun altitude * 'gueymard1993' - See reference [4] - requires apparent sun altitude * 'young1994' - See reference [5] - requries true sun altitude * 'pickering2002' - See reference [6] - requires apparent sun altitude Returns: airmass_relative: numeric Relative airmass at sea level. Will return None for any altitude angle smaller than 0 degrees. """ if altitude < 0: return None else: alt_rad = math.radians(altitude) model = model.lower() if 'kastenyoung1989' == model: am = (1.0 / (math.sin(alt_rad) + 0.50572*(((6.07995 + altitude) ** - 1.6364)))) elif 'kasten1966' == model: am = 1.0 / (math.sin(alt_rad) + 0.15*((3.885 + altitude) ** - 1.253)) elif 'simple' == model: am = 1.0 / math.sin(altitude) elif 'pickering2002' == model: am = (1.0 / (math.sin(math.radians(altitude + 244.0 / (165 + 47.0 * altitude ** 1.1))))) elif 'youngirvine1967' == model: sec_zen = 1.0 / math.sin(alt_rad) am = sec_zen * (1 - 0.0012 * (sec_zen * sec_zen - 1)) elif 'young1994' == model: am = ((1.002432*((math.sin(alt_rad)) ** 2) + 0.148386*(math.sin(alt_rad)) + 0.0096467) / (math.sin(alt_rad) ** 3 + 0.149864*(math.sin(alt_rad) ** 2) + 0.0102963*(math.sin(alt_rad)) + 0.000303978)) elif 'gueymard1993' == model: am = (1.0 / (math.sin(alt_rad) + 0.00176759*(90 - altitude)*( (94.37515 - (90 - altitude)) ** - 1.21563))) else: raise ValueError('%s is not a valid model for relativeairmass', model) return am
python
def get_relative_airmass(altitude, model='kastenyoung1989'): """ Gives the relative (not pressure-corrected) airmass. Gives the airmass at sea-level when given a sun altitude angle (in degrees). The ``model`` variable allows selection of different airmass models (described below). If ``model`` is not included or is not valid, the default model is 'kastenyoung1989'. Note: [1] Fritz Kasten. "A New Table and Approximation Formula for the Relative Optical Air Mass". Technical Report 136, Hanover, N.H.: U.S. Army Material Command, CRREL. [2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric Photometry of the Brighter Planets," The Astronomical Journal, vol. 72, pp. 945-950, 1967. [3] Fritz Kasten and Andrew Young. "Revised optical air mass tables and approximation formula". Applied Optics 28:4735-4738 [4] C. Gueymard, "Critical analysis and performance assessment of clear sky solar irradiance models using theoretical and measured data," Solar Energy, vol. 51, pp. 121-138, 1993. [5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33, pp. 1108-1110, Feb 1994. [6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20, [7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis" Sandia Report, (2012). Args: altitude: numeric Altitude angle of the sun in degrees. Note that some models use the apparent (refraction corrected) altitude angle, and some models use the true (not refraction-corrected) altitude angle. See model descriptions to determine which type of altitude angle is required. Apparent altitude angles must be calculated at sea level. model: string, default 'kastenyoung1989' Available models include the following: * 'simple' - secant(apparent altitude angle) - Note that this gives -inf at altitude=0 * 'kasten1966' - See reference [1] - requires apparent sun altitude * 'youngirvine1967' - See reference [2] - requires true sun altitude * 'kastenyoung1989' - See reference [3] - requires apparent sun altitude * 'gueymard1993' - See reference [4] - requires apparent sun altitude * 'young1994' - See reference [5] - requries true sun altitude * 'pickering2002' - See reference [6] - requires apparent sun altitude Returns: airmass_relative: numeric Relative airmass at sea level. Will return None for any altitude angle smaller than 0 degrees. """ if altitude < 0: return None else: alt_rad = math.radians(altitude) model = model.lower() if 'kastenyoung1989' == model: am = (1.0 / (math.sin(alt_rad) + 0.50572*(((6.07995 + altitude) ** - 1.6364)))) elif 'kasten1966' == model: am = 1.0 / (math.sin(alt_rad) + 0.15*((3.885 + altitude) ** - 1.253)) elif 'simple' == model: am = 1.0 / math.sin(altitude) elif 'pickering2002' == model: am = (1.0 / (math.sin(math.radians(altitude + 244.0 / (165 + 47.0 * altitude ** 1.1))))) elif 'youngirvine1967' == model: sec_zen = 1.0 / math.sin(alt_rad) am = sec_zen * (1 - 0.0012 * (sec_zen * sec_zen - 1)) elif 'young1994' == model: am = ((1.002432*((math.sin(alt_rad)) ** 2) + 0.148386*(math.sin(alt_rad)) + 0.0096467) / (math.sin(alt_rad) ** 3 + 0.149864*(math.sin(alt_rad) ** 2) + 0.0102963*(math.sin(alt_rad)) + 0.000303978)) elif 'gueymard1993' == model: am = (1.0 / (math.sin(alt_rad) + 0.00176759*(90 - altitude)*( (94.37515 - (90 - altitude)) ** - 1.21563))) else: raise ValueError('%s is not a valid model for relativeairmass', model) return am
['def', 'get_relative_airmass', '(', 'altitude', ',', 'model', '=', "'kastenyoung1989'", ')', ':', 'if', 'altitude', '<', '0', ':', 'return', 'None', 'else', ':', 'alt_rad', '=', 'math', '.', 'radians', '(', 'altitude', ')', 'model', '=', 'model', '.', 'lower', '(', ')', 'if', "'kastenyoung1989'", '==', 'model', ':', 'am', '=', '(', '1.0', '/', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', '+', '0.50572', '*', '(', '(', '(', '6.07995', '+', 'altitude', ')', '**', '-', '1.6364', ')', ')', ')', ')', 'elif', "'kasten1966'", '==', 'model', ':', 'am', '=', '1.0', '/', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', '+', '0.15', '*', '(', '(', '3.885', '+', 'altitude', ')', '**', '-', '1.253', ')', ')', 'elif', "'simple'", '==', 'model', ':', 'am', '=', '1.0', '/', 'math', '.', 'sin', '(', 'altitude', ')', 'elif', "'pickering2002'", '==', 'model', ':', 'am', '=', '(', '1.0', '/', '(', 'math', '.', 'sin', '(', 'math', '.', 'radians', '(', 'altitude', '+', '244.0', '/', '(', '165', '+', '47.0', '*', 'altitude', '**', '1.1', ')', ')', ')', ')', ')', 'elif', "'youngirvine1967'", '==', 'model', ':', 'sec_zen', '=', '1.0', '/', 'math', '.', 'sin', '(', 'alt_rad', ')', 'am', '=', 'sec_zen', '*', '(', '1', '-', '0.0012', '*', '(', 'sec_zen', '*', 'sec_zen', '-', '1', ')', ')', 'elif', "'young1994'", '==', 'model', ':', 'am', '=', '(', '(', '1.002432', '*', '(', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', ')', '**', '2', ')', '+', '0.148386', '*', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', ')', '+', '0.0096467', ')', '/', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', '**', '3', '+', '0.149864', '*', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', '**', '2', ')', '+', '0.0102963', '*', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', ')', '+', '0.000303978', ')', ')', 'elif', "'gueymard1993'", '==', 'model', ':', 'am', '=', '(', '1.0', '/', '(', 'math', '.', 'sin', '(', 'alt_rad', ')', '+', '0.00176759', '*', '(', '90', '-', 'altitude', ')', '*', '(', '(', '94.37515', '-', '(', '90', '-', 'altitude', ')', ')', '**', '-', '1.21563', ')', ')', ')', 'else', ':', 'raise', 'ValueError', '(', "'%s is not a valid model for relativeairmass'", ',', 'model', ')', 'return', 'am']
Gives the relative (not pressure-corrected) airmass. Gives the airmass at sea-level when given a sun altitude angle (in degrees). The ``model`` variable allows selection of different airmass models (described below). If ``model`` is not included or is not valid, the default model is 'kastenyoung1989'. Note: [1] Fritz Kasten. "A New Table and Approximation Formula for the Relative Optical Air Mass". Technical Report 136, Hanover, N.H.: U.S. Army Material Command, CRREL. [2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric Photometry of the Brighter Planets," The Astronomical Journal, vol. 72, pp. 945-950, 1967. [3] Fritz Kasten and Andrew Young. "Revised optical air mass tables and approximation formula". Applied Optics 28:4735-4738 [4] C. Gueymard, "Critical analysis and performance assessment of clear sky solar irradiance models using theoretical and measured data," Solar Energy, vol. 51, pp. 121-138, 1993. [5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33, pp. 1108-1110, Feb 1994. [6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20, [7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis" Sandia Report, (2012). Args: altitude: numeric Altitude angle of the sun in degrees. Note that some models use the apparent (refraction corrected) altitude angle, and some models use the true (not refraction-corrected) altitude angle. See model descriptions to determine which type of altitude angle is required. Apparent altitude angles must be calculated at sea level. model: string, default 'kastenyoung1989' Available models include the following: * 'simple' - secant(apparent altitude angle) - Note that this gives -inf at altitude=0 * 'kasten1966' - See reference [1] - requires apparent sun altitude * 'youngirvine1967' - See reference [2] - requires true sun altitude * 'kastenyoung1989' - See reference [3] - requires apparent sun altitude * 'gueymard1993' - See reference [4] - requires apparent sun altitude * 'young1994' - See reference [5] - requries true sun altitude * 'pickering2002' - See reference [6] - requires apparent sun altitude Returns: airmass_relative: numeric Relative airmass at sea level. Will return None for any altitude angle smaller than 0 degrees.
['Gives', 'the', 'relative', '(', 'not', 'pressure', '-', 'corrected', ')', 'airmass', '.']
train
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L732-L820
4,857
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.multi_get
def multi_get(self, urls, query_params=None, to_json=True): """Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
python
def multi_get(self, urls, query_params=None, to_json=True): """Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
['def', 'multi_get', '(', 'self', ',', 'urls', ',', 'query_params', '=', 'None', ',', 'to_json', '=', 'True', ')', ':', 'return', 'self', '.', '_multi_request', '(', 'MultiRequest', '.', '_VERB_GET', ',', 'urls', ',', 'query_params', ',', 'data', '=', 'None', ',', 'to_json', '=', 'to_json', ',', ')']
Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
['Issue', 'multiple', 'GET', 'requests', '.']
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L203-L218
4,858
slarse/pdfebc-core
pdfebc_core/config_utils.py
valid_config_exists
def valid_config_exists(config_path=CONFIG_PATH): """Verify that a valid config file exists. Args: config_path (str): Path to the config file. Returns: boolean: True if there is a valid config file, false if not. """ if os.path.isfile(config_path): try: config = read_config(config_path) check_config(config) except (ConfigurationError, IOError): return False else: return False return True
python
def valid_config_exists(config_path=CONFIG_PATH): """Verify that a valid config file exists. Args: config_path (str): Path to the config file. Returns: boolean: True if there is a valid config file, false if not. """ if os.path.isfile(config_path): try: config = read_config(config_path) check_config(config) except (ConfigurationError, IOError): return False else: return False return True
['def', 'valid_config_exists', '(', 'config_path', '=', 'CONFIG_PATH', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'config_path', ')', ':', 'try', ':', 'config', '=', 'read_config', '(', 'config_path', ')', 'check_config', '(', 'config', ')', 'except', '(', 'ConfigurationError', ',', 'IOError', ')', ':', 'return', 'False', 'else', ':', 'return', 'False', 'return', 'True']
Verify that a valid config file exists. Args: config_path (str): Path to the config file. Returns: boolean: True if there is a valid config file, false if not.
['Verify', 'that', 'a', 'valid', 'config', 'file', 'exists', '.']
train
https://github.com/slarse/pdfebc-core/blob/fc40857bc42365b7434714333e37d7a3487603a0/pdfebc_core/config_utils.py#L171-L188
4,859
alertot/detectem
detectem/matchers.py
extract_named_group
def extract_named_group(text, named_group, matchers, return_presence=False): ''' Return ``named_group`` match from ``text`` reached by using a matcher from ``matchers``. It also supports matching without a ``named_group`` in a matcher, which sets ``presence=True``. ``presence`` is only returned if ``return_presence=True``. ''' presence = False for matcher in matchers: if isinstance(matcher, str): v = re.search(matcher, text, flags=re.DOTALL) if v: dict_result = v.groupdict() try: return dict_result[named_group] except KeyError: if dict_result: # It's other named group matching, discard continue else: # It's a matcher without named_group # but we can't return it until every matcher pass # because a following matcher could have a named group presence = True elif callable(matcher): v = matcher(text) if v: return v if return_presence and presence: return 'presence' return None
python
def extract_named_group(text, named_group, matchers, return_presence=False): ''' Return ``named_group`` match from ``text`` reached by using a matcher from ``matchers``. It also supports matching without a ``named_group`` in a matcher, which sets ``presence=True``. ``presence`` is only returned if ``return_presence=True``. ''' presence = False for matcher in matchers: if isinstance(matcher, str): v = re.search(matcher, text, flags=re.DOTALL) if v: dict_result = v.groupdict() try: return dict_result[named_group] except KeyError: if dict_result: # It's other named group matching, discard continue else: # It's a matcher without named_group # but we can't return it until every matcher pass # because a following matcher could have a named group presence = True elif callable(matcher): v = matcher(text) if v: return v if return_presence and presence: return 'presence' return None
['def', 'extract_named_group', '(', 'text', ',', 'named_group', ',', 'matchers', ',', 'return_presence', '=', 'False', ')', ':', 'presence', '=', 'False', 'for', 'matcher', 'in', 'matchers', ':', 'if', 'isinstance', '(', 'matcher', ',', 'str', ')', ':', 'v', '=', 're', '.', 'search', '(', 'matcher', ',', 'text', ',', 'flags', '=', 're', '.', 'DOTALL', ')', 'if', 'v', ':', 'dict_result', '=', 'v', '.', 'groupdict', '(', ')', 'try', ':', 'return', 'dict_result', '[', 'named_group', ']', 'except', 'KeyError', ':', 'if', 'dict_result', ':', "# It's other named group matching, discard", 'continue', 'else', ':', "# It's a matcher without named_group", "# but we can't return it until every matcher pass", '# because a following matcher could have a named group', 'presence', '=', 'True', 'elif', 'callable', '(', 'matcher', ')', ':', 'v', '=', 'matcher', '(', 'text', ')', 'if', 'v', ':', 'return', 'v', 'if', 'return_presence', 'and', 'presence', ':', 'return', "'presence'", 'return', 'None']
Return ``named_group`` match from ``text`` reached by using a matcher from ``matchers``. It also supports matching without a ``named_group`` in a matcher, which sets ``presence=True``. ``presence`` is only returned if ``return_presence=True``.
['Return', 'named_group', 'match', 'from', 'text', 'reached', 'by', 'using', 'a', 'matcher', 'from', 'matchers', '.']
train
https://github.com/alertot/detectem/blob/b1ecc3543b7c44ee76c4cac0d3896a7747bf86c1/detectem/matchers.py#L12-L48
4,860
NuGrid/NuGridPy
nugridpy/mesa.py
history_data.find_TPs_and_DUPs
def find_TPs_and_DUPs(self, percent=5., makefig=False): """ Function which finds TPs and uses the calc_DUP_parameter function. To calculate DUP parameter evolution dependent of the star or core mass. Parameters ---------- fig : integer Figure number to plot. t0_model : integer First he-shell lum peak. percent : float dredge-up is defined as when the mass dredged up is a certain percent of the total mass dredged up during that event, which is set by the user in this variable. The default is 5. makefig : do you want a figure to be made? Returns ------- TPmods : array model numbers at the peak of each thermal pulse DUPmods : array model numbers at the dredge-up, where dredge-up is defined as when the mass dredged up is a certain percent of the total mass dredged up during that event, which is set by the user TPend : array model numbers at the end of the PDCZ for each TP lambda : array DUP efficiency for each pulse """ t0_model=self.find_first_TP() t0_idx=(t0_model-self.get("model_number")[0]) first_TP_he_lum=10**(self.get("log_LHe")[t0_idx]) he_lum=10**(self.get("log_LHe")[t0_idx:]) h_lum=10**(self.get("log_LH")[t0_idx:]) model=self.get("model_number")[t0_idx:] try: h1_bndry=self.get("h1_boundary_mass")[t0_idx:] except: try: h1_bndry=self.get('he_core_mass')[t0_idx:] except: pass # SJ find TPs by finding local maxima in He-burning luminosity and # checking that the he_lum is greater than the h_lum: maxima=[0] for i in range(2,len(model)-1): if he_lum[i] > he_lum[i-1] and he_lum[i] > he_lum[i+1]: if he_lum[i-1] > he_lum[i-2] and he_lum[i+1] > he_lum[i+2]: if he_lum[i] > h_lum[i]: maxima.append(i) # find DUPs when h-boundary first decreases by more than XX% of the total DUP # depth: DUPs=[] TPend=[] maxDUPs=[] for i in range(len(maxima)): idx1=maxima[i] try: idx2=maxima[i+1] except IndexError: idx2=-1 bound=h1_bndry[idx1:idx2] bound0=bound[0] if bound0==min(bound) or bound0 < min(bound): # then no DUP DUP=idx1 DUPs.append(DUP) maxDUPs.append(DUP) else: maxDUPs.append(idx1+bound.argmin()) # model number of deepest extend of 3DUP maxDUP=bound0-min(bound) # total mass dredged up in DUP db=bound - bound[0] db_maxDUP = old_div(db, maxDUP) DUP=np.where(db_maxDUP <= old_div(-float(percent),100.))[0][0] DUPs.append(DUP+idx1) # # Alternative definition, where envelope reaches mass coordinate # # where top of PDCZ had resided during the TP: # top=self.get('mx2_top')[idx1] # DUP=np.abs(bound-top).argmin() # DUPs.append(DUP+idx1) # find end of PDCZ by seeking from TP peak and checking mx2_bot: mx2b=self.get('mx2_bot')[t0_idx:][idx1:idx2] for i in range(len(mx2b)): if mx2b[i]==0.: endTP=i+idx1 TPend.append(endTP) break # 3DUP efficiency: lambd=[0.] for i in range(1,len(maxima)): dmenv = h1_bndry[maxima[i]] - h1_bndry[maxDUPs[i-1]] dmdredge = h1_bndry[maxima[i]] - h1_bndry[maxDUPs[i]] lambd.append(old_div(dmdredge,dmenv)) TPmods = maxima + t0_idx DUPmods = DUPs + t0_idx TPend = TPend + t0_idx return TPmods, DUPmods, TPend, lambd
python
def find_TPs_and_DUPs(self, percent=5., makefig=False): """ Function which finds TPs and uses the calc_DUP_parameter function. To calculate DUP parameter evolution dependent of the star or core mass. Parameters ---------- fig : integer Figure number to plot. t0_model : integer First he-shell lum peak. percent : float dredge-up is defined as when the mass dredged up is a certain percent of the total mass dredged up during that event, which is set by the user in this variable. The default is 5. makefig : do you want a figure to be made? Returns ------- TPmods : array model numbers at the peak of each thermal pulse DUPmods : array model numbers at the dredge-up, where dredge-up is defined as when the mass dredged up is a certain percent of the total mass dredged up during that event, which is set by the user TPend : array model numbers at the end of the PDCZ for each TP lambda : array DUP efficiency for each pulse """ t0_model=self.find_first_TP() t0_idx=(t0_model-self.get("model_number")[0]) first_TP_he_lum=10**(self.get("log_LHe")[t0_idx]) he_lum=10**(self.get("log_LHe")[t0_idx:]) h_lum=10**(self.get("log_LH")[t0_idx:]) model=self.get("model_number")[t0_idx:] try: h1_bndry=self.get("h1_boundary_mass")[t0_idx:] except: try: h1_bndry=self.get('he_core_mass')[t0_idx:] except: pass # SJ find TPs by finding local maxima in He-burning luminosity and # checking that the he_lum is greater than the h_lum: maxima=[0] for i in range(2,len(model)-1): if he_lum[i] > he_lum[i-1] and he_lum[i] > he_lum[i+1]: if he_lum[i-1] > he_lum[i-2] and he_lum[i+1] > he_lum[i+2]: if he_lum[i] > h_lum[i]: maxima.append(i) # find DUPs when h-boundary first decreases by more than XX% of the total DUP # depth: DUPs=[] TPend=[] maxDUPs=[] for i in range(len(maxima)): idx1=maxima[i] try: idx2=maxima[i+1] except IndexError: idx2=-1 bound=h1_bndry[idx1:idx2] bound0=bound[0] if bound0==min(bound) or bound0 < min(bound): # then no DUP DUP=idx1 DUPs.append(DUP) maxDUPs.append(DUP) else: maxDUPs.append(idx1+bound.argmin()) # model number of deepest extend of 3DUP maxDUP=bound0-min(bound) # total mass dredged up in DUP db=bound - bound[0] db_maxDUP = old_div(db, maxDUP) DUP=np.where(db_maxDUP <= old_div(-float(percent),100.))[0][0] DUPs.append(DUP+idx1) # # Alternative definition, where envelope reaches mass coordinate # # where top of PDCZ had resided during the TP: # top=self.get('mx2_top')[idx1] # DUP=np.abs(bound-top).argmin() # DUPs.append(DUP+idx1) # find end of PDCZ by seeking from TP peak and checking mx2_bot: mx2b=self.get('mx2_bot')[t0_idx:][idx1:idx2] for i in range(len(mx2b)): if mx2b[i]==0.: endTP=i+idx1 TPend.append(endTP) break # 3DUP efficiency: lambd=[0.] for i in range(1,len(maxima)): dmenv = h1_bndry[maxima[i]] - h1_bndry[maxDUPs[i-1]] dmdredge = h1_bndry[maxima[i]] - h1_bndry[maxDUPs[i]] lambd.append(old_div(dmdredge,dmenv)) TPmods = maxima + t0_idx DUPmods = DUPs + t0_idx TPend = TPend + t0_idx return TPmods, DUPmods, TPend, lambd
['def', 'find_TPs_and_DUPs', '(', 'self', ',', 'percent', '=', '5.', ',', 'makefig', '=', 'False', ')', ':', 't0_model', '=', 'self', '.', 'find_first_TP', '(', ')', 't0_idx', '=', '(', 't0_model', '-', 'self', '.', 'get', '(', '"model_number"', ')', '[', '0', ']', ')', 'first_TP_he_lum', '=', '10', '**', '(', 'self', '.', 'get', '(', '"log_LHe"', ')', '[', 't0_idx', ']', ')', 'he_lum', '=', '10', '**', '(', 'self', '.', 'get', '(', '"log_LHe"', ')', '[', 't0_idx', ':', ']', ')', 'h_lum', '=', '10', '**', '(', 'self', '.', 'get', '(', '"log_LH"', ')', '[', 't0_idx', ':', ']', ')', 'model', '=', 'self', '.', 'get', '(', '"model_number"', ')', '[', 't0_idx', ':', ']', 'try', ':', 'h1_bndry', '=', 'self', '.', 'get', '(', '"h1_boundary_mass"', ')', '[', 't0_idx', ':', ']', 'except', ':', 'try', ':', 'h1_bndry', '=', 'self', '.', 'get', '(', "'he_core_mass'", ')', '[', 't0_idx', ':', ']', 'except', ':', 'pass', '# SJ find TPs by finding local maxima in He-burning luminosity and', '# checking that the he_lum is greater than the h_lum:', 'maxima', '=', '[', '0', ']', 'for', 'i', 'in', 'range', '(', '2', ',', 'len', '(', 'model', ')', '-', '1', ')', ':', 'if', 'he_lum', '[', 'i', ']', '>', 'he_lum', '[', 'i', '-', '1', ']', 'and', 'he_lum', '[', 'i', ']', '>', 'he_lum', '[', 'i', '+', '1', ']', ':', 'if', 'he_lum', '[', 'i', '-', '1', ']', '>', 'he_lum', '[', 'i', '-', '2', ']', 'and', 'he_lum', '[', 'i', '+', '1', ']', '>', 'he_lum', '[', 'i', '+', '2', ']', ':', 'if', 'he_lum', '[', 'i', ']', '>', 'h_lum', '[', 'i', ']', ':', 'maxima', '.', 'append', '(', 'i', ')', '# find DUPs when h-boundary first decreases by more than XX% of the total DUP', '# depth:', 'DUPs', '=', '[', ']', 'TPend', '=', '[', ']', 'maxDUPs', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'maxima', ')', ')', ':', 'idx1', '=', 'maxima', '[', 'i', ']', 'try', ':', 'idx2', '=', 'maxima', '[', 'i', '+', '1', ']', 'except', 'IndexError', ':', 'idx2', '=', '-', '1', 'bound', '=', 'h1_bndry', '[', 'idx1', ':', 'idx2', ']', 'bound0', '=', 'bound', '[', '0', ']', 'if', 'bound0', '==', 'min', '(', 'bound', ')', 'or', 'bound0', '<', 'min', '(', 'bound', ')', ':', '# then no DUP', 'DUP', '=', 'idx1', 'DUPs', '.', 'append', '(', 'DUP', ')', 'maxDUPs', '.', 'append', '(', 'DUP', ')', 'else', ':', 'maxDUPs', '.', 'append', '(', 'idx1', '+', 'bound', '.', 'argmin', '(', ')', ')', '# model number of deepest extend of 3DUP', 'maxDUP', '=', 'bound0', '-', 'min', '(', 'bound', ')', '# total mass dredged up in DUP', 'db', '=', 'bound', '-', 'bound', '[', '0', ']', 'db_maxDUP', '=', 'old_div', '(', 'db', ',', 'maxDUP', ')', 'DUP', '=', 'np', '.', 'where', '(', 'db_maxDUP', '<=', 'old_div', '(', '-', 'float', '(', 'percent', ')', ',', '100.', ')', ')', '[', '0', ']', '[', '0', ']', 'DUPs', '.', 'append', '(', 'DUP', '+', 'idx1', ')', '# # Alternative definition, where envelope reaches mass coordinate', '# # where top of PDCZ had resided during the TP:', "# top=self.get('mx2_top')[idx1]", '# DUP=np.abs(bound-top).argmin()', '# DUPs.append(DUP+idx1)', '# find end of PDCZ by seeking from TP peak and checking mx2_bot:', 'mx2b', '=', 'self', '.', 'get', '(', "'mx2_bot'", ')', '[', 't0_idx', ':', ']', '[', 'idx1', ':', 'idx2', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'mx2b', ')', ')', ':', 'if', 'mx2b', '[', 'i', ']', '==', '0.', ':', 'endTP', '=', 'i', '+', 'idx1', 'TPend', '.', 'append', '(', 'endTP', ')', 'break', '# 3DUP efficiency:', 'lambd', '=', '[', '0.', ']', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'maxima', ')', ')', ':', 'dmenv', '=', 'h1_bndry', '[', 'maxima', '[', 'i', ']', ']', '-', 'h1_bndry', '[', 'maxDUPs', '[', 'i', '-', '1', ']', ']', 'dmdredge', '=', 'h1_bndry', '[', 'maxima', '[', 'i', ']', ']', '-', 'h1_bndry', '[', 'maxDUPs', '[', 'i', ']', ']', 'lambd', '.', 'append', '(', 'old_div', '(', 'dmdredge', ',', 'dmenv', ')', ')', 'TPmods', '=', 'maxima', '+', 't0_idx', 'DUPmods', '=', 'DUPs', '+', 't0_idx', 'TPend', '=', 'TPend', '+', 't0_idx', 'return', 'TPmods', ',', 'DUPmods', ',', 'TPend', ',', 'lambd']
Function which finds TPs and uses the calc_DUP_parameter function. To calculate DUP parameter evolution dependent of the star or core mass. Parameters ---------- fig : integer Figure number to plot. t0_model : integer First he-shell lum peak. percent : float dredge-up is defined as when the mass dredged up is a certain percent of the total mass dredged up during that event, which is set by the user in this variable. The default is 5. makefig : do you want a figure to be made? Returns ------- TPmods : array model numbers at the peak of each thermal pulse DUPmods : array model numbers at the dredge-up, where dredge-up is defined as when the mass dredged up is a certain percent of the total mass dredged up during that event, which is set by the user TPend : array model numbers at the end of the PDCZ for each TP lambda : array DUP efficiency for each pulse
['Function', 'which', 'finds', 'TPs', 'and', 'uses', 'the', 'calc_DUP_parameter', 'function', '.', 'To', 'calculate', 'DUP', 'parameter', 'evolution', 'dependent', 'of', 'the', 'star', 'or', 'core', 'mass', '.']
train
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3204-L3310
4,861
senaite/senaite.core
bika/lims/browser/widgets/artemplateanalyseswidget.py
ARTemplateAnalysesView.get_configuration
def get_configuration(self): """Returns a mapping of UID -> configuration """ mapping = {} settings = self.get_settings() for record in self.context.getAnalyses(): uid = record.get("service_uid") setting = settings.get(uid, {}) config = { "partition": record.get("partition"), "hidden": setting.get("hidden", False), } mapping[uid] = config return mapping
python
def get_configuration(self): """Returns a mapping of UID -> configuration """ mapping = {} settings = self.get_settings() for record in self.context.getAnalyses(): uid = record.get("service_uid") setting = settings.get(uid, {}) config = { "partition": record.get("partition"), "hidden": setting.get("hidden", False), } mapping[uid] = config return mapping
['def', 'get_configuration', '(', 'self', ')', ':', 'mapping', '=', '{', '}', 'settings', '=', 'self', '.', 'get_settings', '(', ')', 'for', 'record', 'in', 'self', '.', 'context', '.', 'getAnalyses', '(', ')', ':', 'uid', '=', 'record', '.', 'get', '(', '"service_uid"', ')', 'setting', '=', 'settings', '.', 'get', '(', 'uid', ',', '{', '}', ')', 'config', '=', '{', '"partition"', ':', 'record', '.', 'get', '(', '"partition"', ')', ',', '"hidden"', ':', 'setting', '.', 'get', '(', '"hidden"', ',', 'False', ')', ',', '}', 'mapping', '[', 'uid', ']', '=', 'config', 'return', 'mapping']
Returns a mapping of UID -> configuration
['Returns', 'a', 'mapping', 'of', 'UID', '-', '>', 'configuration']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/artemplateanalyseswidget.py#L131-L144
4,862
jrigden/pyPodcastParser
pyPodcastParser/Podcast.py
Podcast.set_link
def set_link(self): """Parses link to homepage and set value""" try: self.link = self.soup.find('link').string except AttributeError: self.link = None
python
def set_link(self): """Parses link to homepage and set value""" try: self.link = self.soup.find('link').string except AttributeError: self.link = None
['def', 'set_link', '(', 'self', ')', ':', 'try', ':', 'self', '.', 'link', '=', 'self', '.', 'soup', '.', 'find', '(', "'link'", ')', '.', 'string', 'except', 'AttributeError', ':', 'self', '.', 'link', '=', 'None']
Parses link to homepage and set value
['Parses', 'link', 'to', 'homepage', 'and', 'set', 'value']
train
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Podcast.py#L373-L378
4,863
agoragames/leaderboard-python
leaderboard/leaderboard.py
Leaderboard.page_for_in
def page_for_in(self, leaderboard_name, member, page_size=DEFAULT_PAGE_SIZE): ''' Determine the page where a member falls in the named leaderboard. @param leaderboard [String] Name of the leaderboard. @param member [String] Member name. @param page_size [int] Page size to be used in determining page location. @return the page where a member falls in the leaderboard. ''' rank_for_member = None if self.order == self.ASC: rank_for_member = self.redis_connection.zrank( leaderboard_name, member) else: rank_for_member = self.redis_connection.zrevrank( leaderboard_name, member) if rank_for_member is None: rank_for_member = 0 else: rank_for_member += 1 return int(math.ceil(float(rank_for_member) / float(page_size)))
python
def page_for_in(self, leaderboard_name, member, page_size=DEFAULT_PAGE_SIZE): ''' Determine the page where a member falls in the named leaderboard. @param leaderboard [String] Name of the leaderboard. @param member [String] Member name. @param page_size [int] Page size to be used in determining page location. @return the page where a member falls in the leaderboard. ''' rank_for_member = None if self.order == self.ASC: rank_for_member = self.redis_connection.zrank( leaderboard_name, member) else: rank_for_member = self.redis_connection.zrevrank( leaderboard_name, member) if rank_for_member is None: rank_for_member = 0 else: rank_for_member += 1 return int(math.ceil(float(rank_for_member) / float(page_size)))
['def', 'page_for_in', '(', 'self', ',', 'leaderboard_name', ',', 'member', ',', 'page_size', '=', 'DEFAULT_PAGE_SIZE', ')', ':', 'rank_for_member', '=', 'None', 'if', 'self', '.', 'order', '==', 'self', '.', 'ASC', ':', 'rank_for_member', '=', 'self', '.', 'redis_connection', '.', 'zrank', '(', 'leaderboard_name', ',', 'member', ')', 'else', ':', 'rank_for_member', '=', 'self', '.', 'redis_connection', '.', 'zrevrank', '(', 'leaderboard_name', ',', 'member', ')', 'if', 'rank_for_member', 'is', 'None', ':', 'rank_for_member', '=', '0', 'else', ':', 'rank_for_member', '+=', '1', 'return', 'int', '(', 'math', '.', 'ceil', '(', 'float', '(', 'rank_for_member', ')', '/', 'float', '(', 'page_size', ')', ')', ')']
Determine the page where a member falls in the named leaderboard. @param leaderboard [String] Name of the leaderboard. @param member [String] Member name. @param page_size [int] Page size to be used in determining page location. @return the page where a member falls in the leaderboard.
['Determine', 'the', 'page', 'where', 'a', 'member', 'falls', 'in', 'the', 'named', 'leaderboard', '.']
train
https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L618-L643
4,864
hydpy-dev/hydpy
hydpy/models/lstream/lstream_model.py
calc_rk_v1
def calc_rk_v1(self): """Determine the actual traveling time of the water (not of the wave!). Required derived parameter: |Sek| Required flux sequences: |AG| |QRef| Calculated flux sequence: |RK| Basic equation: :math:`RK = \\frac{Laen \\cdot A}{QRef}` Examples: First, note that the traveling time is determined in the unit of the actual simulation step size: >>> from hydpy.models.lstream import * >>> parameterstep() >>> laen(25.0) >>> derived.sek(24*60*60) >>> fluxes.ag = 10.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(2.893519) Second, for negative values or zero values of |AG| or |QRef|, the value of |RK| is set to zero: >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess if (flu.ag > 0.) and (flu.qref > 0.): flu.rk = (1000.*con.laen*flu.ag)/(der.sek*flu.qref) else: flu.rk = 0.
python
def calc_rk_v1(self): """Determine the actual traveling time of the water (not of the wave!). Required derived parameter: |Sek| Required flux sequences: |AG| |QRef| Calculated flux sequence: |RK| Basic equation: :math:`RK = \\frac{Laen \\cdot A}{QRef}` Examples: First, note that the traveling time is determined in the unit of the actual simulation step size: >>> from hydpy.models.lstream import * >>> parameterstep() >>> laen(25.0) >>> derived.sek(24*60*60) >>> fluxes.ag = 10.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(2.893519) Second, for negative values or zero values of |AG| or |QRef|, the value of |RK| is set to zero: >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess if (flu.ag > 0.) and (flu.qref > 0.): flu.rk = (1000.*con.laen*flu.ag)/(der.sek*flu.qref) else: flu.rk = 0.
['def', 'calc_rk_v1', '(', 'self', ')', ':', 'con', '=', 'self', '.', 'parameters', '.', 'control', '.', 'fastaccess', 'der', '=', 'self', '.', 'parameters', '.', 'derived', '.', 'fastaccess', 'flu', '=', 'self', '.', 'sequences', '.', 'fluxes', '.', 'fastaccess', 'if', '(', 'flu', '.', 'ag', '>', '0.', ')', 'and', '(', 'flu', '.', 'qref', '>', '0.', ')', ':', 'flu', '.', 'rk', '=', '(', '1000.', '*', 'con', '.', 'laen', '*', 'flu', '.', 'ag', ')', '/', '(', 'der', '.', 'sek', '*', 'flu', '.', 'qref', ')', 'else', ':', 'flu', '.', 'rk', '=', '0.']
Determine the actual traveling time of the water (not of the wave!). Required derived parameter: |Sek| Required flux sequences: |AG| |QRef| Calculated flux sequence: |RK| Basic equation: :math:`RK = \\frac{Laen \\cdot A}{QRef}` Examples: First, note that the traveling time is determined in the unit of the actual simulation step size: >>> from hydpy.models.lstream import * >>> parameterstep() >>> laen(25.0) >>> derived.sek(24*60*60) >>> fluxes.ag = 10.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(2.893519) Second, for negative values or zero values of |AG| or |QRef|, the value of |RK| is set to zero: >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0)
['Determine', 'the', 'actual', 'traveling', 'time', 'of', 'the', 'water', '(', 'not', 'of', 'the', 'wave!', ')', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lstream/lstream_model.py#L45-L97
4,865
googleapis/google-cloud-python
storage/google/cloud/storage/blob.py
Blob._get_writable_metadata
def _get_writable_metadata(self): """Get the object / blob metadata which is writable. This is intended to be used when creating a new object / blob. See the `API reference docs`_ for more information, the fields marked as writable are: * ``acl`` * ``cacheControl`` * ``contentDisposition`` * ``contentEncoding`` * ``contentLanguage`` * ``contentType`` * ``crc32c`` * ``md5Hash`` * ``metadata`` * ``name`` * ``storageClass`` For now, we don't support ``acl``, access control lists should be managed directly through :class:`ObjectACL` methods. """ # NOTE: This assumes `self.name` is unicode. object_metadata = {"name": self.name} for key in self._changes: if key in _WRITABLE_FIELDS: object_metadata[key] = self._properties[key] return object_metadata
python
def _get_writable_metadata(self): """Get the object / blob metadata which is writable. This is intended to be used when creating a new object / blob. See the `API reference docs`_ for more information, the fields marked as writable are: * ``acl`` * ``cacheControl`` * ``contentDisposition`` * ``contentEncoding`` * ``contentLanguage`` * ``contentType`` * ``crc32c`` * ``md5Hash`` * ``metadata`` * ``name`` * ``storageClass`` For now, we don't support ``acl``, access control lists should be managed directly through :class:`ObjectACL` methods. """ # NOTE: This assumes `self.name` is unicode. object_metadata = {"name": self.name} for key in self._changes: if key in _WRITABLE_FIELDS: object_metadata[key] = self._properties[key] return object_metadata
['def', '_get_writable_metadata', '(', 'self', ')', ':', '# NOTE: This assumes `self.name` is unicode.', 'object_metadata', '=', '{', '"name"', ':', 'self', '.', 'name', '}', 'for', 'key', 'in', 'self', '.', '_changes', ':', 'if', 'key', 'in', '_WRITABLE_FIELDS', ':', 'object_metadata', '[', 'key', ']', '=', 'self', '.', '_properties', '[', 'key', ']', 'return', 'object_metadata']
Get the object / blob metadata which is writable. This is intended to be used when creating a new object / blob. See the `API reference docs`_ for more information, the fields marked as writable are: * ``acl`` * ``cacheControl`` * ``contentDisposition`` * ``contentEncoding`` * ``contentLanguage`` * ``contentType`` * ``crc32c`` * ``md5Hash`` * ``metadata`` * ``name`` * ``storageClass`` For now, we don't support ``acl``, access control lists should be managed directly through :class:`ObjectACL` methods.
['Get', 'the', 'object', '/', 'blob', 'metadata', 'which', 'is', 'writable', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/blob.py#L730-L759
4,866
scott-griffiths/bitstring
bitstring.py
Bits._copy
def _copy(self): """Create and return a new copy of the Bits (always in memory).""" s_copy = self.__class__() s_copy._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength), self.len, self._offset) return s_copy
python
def _copy(self): """Create and return a new copy of the Bits (always in memory).""" s_copy = self.__class__() s_copy._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength), self.len, self._offset) return s_copy
['def', '_copy', '(', 'self', ')', ':', 's_copy', '=', 'self', '.', '__class__', '(', ')', 's_copy', '.', '_setbytes_unsafe', '(', 'self', '.', '_datastore', '.', 'getbyteslice', '(', '0', ',', 'self', '.', '_datastore', '.', 'bytelength', ')', ',', 'self', '.', 'len', ',', 'self', '.', '_offset', ')', 'return', 's_copy']
Create and return a new copy of the Bits (always in memory).
['Create', 'and', 'return', 'a', 'new', 'copy', 'of', 'the', 'Bits', '(', 'always', 'in', 'memory', ')', '.']
train
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1988-L1993
4,867
jim-easterbrook/pyctools
src/pyctools/core/frame.py
Frame.initialise
def initialise(self, other): """Initialise a :py:class:`Frame` from another :py:class:`Frame`. Copies the metadata and (a reference to) the data from :py:obj:`other`. Note that the data is not actually copied -- you must make a copy of the data before changing it. :param Frame other: The frame to copy. """ self.frame_no = other.frame_no self.data = other.data self.type = other.type self.metadata.copy(other.metadata)
python
def initialise(self, other): """Initialise a :py:class:`Frame` from another :py:class:`Frame`. Copies the metadata and (a reference to) the data from :py:obj:`other`. Note that the data is not actually copied -- you must make a copy of the data before changing it. :param Frame other: The frame to copy. """ self.frame_no = other.frame_no self.data = other.data self.type = other.type self.metadata.copy(other.metadata)
['def', 'initialise', '(', 'self', ',', 'other', ')', ':', 'self', '.', 'frame_no', '=', 'other', '.', 'frame_no', 'self', '.', 'data', '=', 'other', '.', 'data', 'self', '.', 'type', '=', 'other', '.', 'type', 'self', '.', 'metadata', '.', 'copy', '(', 'other', '.', 'metadata', ')']
Initialise a :py:class:`Frame` from another :py:class:`Frame`. Copies the metadata and (a reference to) the data from :py:obj:`other`. Note that the data is not actually copied -- you must make a copy of the data before changing it. :param Frame other: The frame to copy.
['Initialise', 'a', ':', 'py', ':', 'class', ':', 'Frame', 'from', 'another', ':', 'py', ':', 'class', ':', 'Frame', '.']
train
https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/core/frame.py#L67-L80
4,868
linkhub-sdk/popbill.py
popbill/statementService.py
StatementService.issue
def issue(self, CorpNum, ItemCode, MgtKey, Memo=None, EmailSubject=None, UserID=None): """ 발행 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 Memo : 처리메모 EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송) UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") if ItemCode == None or ItemCode == "": raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.") req = {} postData = "" if Memo != None and Memo != '': req["memo"] = Memo if EmailSubject != None and EmailSubject != '': req["emailSubject"] = EmailSubject postData = self._stringtify(req) return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "ISSUE")
python
def issue(self, CorpNum, ItemCode, MgtKey, Memo=None, EmailSubject=None, UserID=None): """ 발행 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 Memo : 처리메모 EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송) UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") if ItemCode == None or ItemCode == "": raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.") req = {} postData = "" if Memo != None and Memo != '': req["memo"] = Memo if EmailSubject != None and EmailSubject != '': req["emailSubject"] = EmailSubject postData = self._stringtify(req) return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "ISSUE")
['def', 'issue', '(', 'self', ',', 'CorpNum', ',', 'ItemCode', ',', 'MgtKey', ',', 'Memo', '=', 'None', ',', 'EmailSubject', '=', 'None', ',', 'UserID', '=', 'None', ')', ':', 'if', 'MgtKey', '==', 'None', 'or', 'MgtKey', '==', '""', ':', 'raise', 'PopbillException', '(', '-', '99999999', ',', '"관리번호가 입력되지 않았습니다.")\r', '', 'if', 'ItemCode', '==', 'None', 'or', 'ItemCode', '==', '""', ':', 'raise', 'PopbillException', '(', '-', '99999999', ',', '"명세서 종류 코드가 입력되지 않았습니다.")\r', '', 'req', '=', '{', '}', 'postData', '=', '""', 'if', 'Memo', '!=', 'None', 'and', 'Memo', '!=', "''", ':', 'req', '[', '"memo"', ']', '=', 'Memo', 'if', 'EmailSubject', '!=', 'None', 'and', 'EmailSubject', '!=', "''", ':', 'req', '[', '"emailSubject"', ']', '=', 'EmailSubject', 'postData', '=', 'self', '.', '_stringtify', '(', 'req', ')', 'return', 'self', '.', '_httppost', '(', "'/Statement/'", '+', 'str', '(', 'ItemCode', ')', '+', "'/'", '+', 'MgtKey', ',', 'postData', ',', 'CorpNum', ',', 'UserID', ',', '"ISSUE"', ')']
발행 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 Memo : 처리메모 EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송) UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
['발행', 'args', 'CorpNum', ':', '팝빌회원', '사업자번호', 'ItemCode', ':', '명세서', '종류', '코드', '[', '121', '-', '거래명세서', ']', '[', '122', '-', '청구서', ']', '[', '123', '-', '견적서', ']', '[', '124', '-', '발주서', ']', '[', '125', '-', '입금표', ']', '[', '126', '-', '영수증', ']', 'MgtKey', ':', '파트너', '문서관리번호', 'Memo', ':', '처리메모', 'EmailSubject', ':', '발행메일', '제목', '(', '미기재시', '기본양식으로', '전송', ')', 'UserID', ':', '팝빌회원', '아이디', 'return', '처리결과', '.', 'consist', 'of', 'code', 'and', 'message', 'raise', 'PopbillException']
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L204-L235
4,869
pandas-dev/pandas
pandas/core/resample.py
Resampler.std
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof)
python
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof)
['def', 'std', '(', 'self', ',', 'ddof', '=', '1', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'nv', '.', 'validate_resampler_func', '(', "'std'", ',', 'args', ',', 'kwargs', ')', 'return', 'self', '.', '_downsample', '(', "'std'", ',', 'ddof', '=', 'ddof', ')']
Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom.
['Compute', 'standard', 'deviation', 'of', 'groups', 'excluding', 'missing', 'values', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L790-L800
4,870
pyamg/pyamg
pyamg/krylov/_steepest_descent.py
steepest_descent
def steepest_descent(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None, residuals=None): """Steepest descent algorithm. Solves the linear system Ax = b. Left preconditioning is supported. Parameters ---------- A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by the preconditioner norm of r_0, or ||r_0||_M. maxiter : int maximum number of allowed iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals contains the residual norm history, including the initial residual. The preconditioner norm is used, instead of the Euclidean norm. Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of cg == ======================================= 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. <0 numerical breakdown, or illegal input == ======================================= Notes ----- The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. The residual in the preconditioner norm is both used for halting and returned in the residuals list. Examples -------- >>> from pyamg.krylov import steepest_descent >>> from pyamg.util.linalg import norm >>> import numpy as np >>> from pyamg.gallery import poisson >>> A = poisson((10,10)) >>> b = np.ones((A.shape[0],)) >>> (x,flag) = steepest_descent(A,b, maxiter=2, tol=1e-8) >>> print norm(b - A*x) 7.89436429704 References ---------- .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 137--142, 2003 http://www-users.cs.umn.edu/~saad/books.html """ A, M, x, b, postprocess = make_system(A, M, x0, b) # Ensure that warnings are always reissued from this function import warnings warnings.filterwarnings('always', module='pyamg\.krylov\._steepest_descent') # determine maxiter if maxiter is None: maxiter = int(len(b)) elif maxiter < 1: raise ValueError('Number of iterations must be positive') # setup method r = b - A*x z = M*r rz = np.inner(r.conjugate(), z) # use preconditioner norm normr = np.sqrt(rz) if residuals is not None: residuals[:] = [normr] # initial residual # Check initial guess ( scaling by b, if b != 0, # must account for case when norm(b) is very small) normb = norm(b) if normb == 0.0: normb = 1.0 if normr < tol*normb: return (postprocess(x), 0) # Scale tol by ||r_0||_M if normr != 0.0: tol = tol*normr # How often should r be recomputed recompute_r = 50 iter = 0 while True: iter = iter+1 q = A*z zAz = np.inner(z.conjugate(), q) # check curvature of A if zAz < 0.0: warn("\nIndefinite matrix detected in steepest descent,\ aborting\n") return (postprocess(x), -1) alpha = rz / zAz # step size x = x + alpha*z if np.mod(iter, recompute_r) and iter > 0: r = b - A*x else: r = r - alpha*q z = M*r rz = np.inner(r.conjugate(), z) if rz < 0.0: # check curvature of M warn("\nIndefinite preconditioner detected in steepest descent,\ aborting\n") return (postprocess(x), -1) normr = np.sqrt(rz) # use preconditioner norm if residuals is not None: residuals.append(normr) if callback is not None: callback(x) if normr < tol: return (postprocess(x), 0) elif rz == 0.0: # important to test after testing normr < tol. rz == 0.0 is an # indicator of convergence when r = 0.0 warn("\nSingular preconditioner detected in steepest descent,\ ceasing iterations\n") return (postprocess(x), -1) if iter == maxiter: return (postprocess(x), iter)
python
def steepest_descent(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None, residuals=None): """Steepest descent algorithm. Solves the linear system Ax = b. Left preconditioning is supported. Parameters ---------- A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by the preconditioner norm of r_0, or ||r_0||_M. maxiter : int maximum number of allowed iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals contains the residual norm history, including the initial residual. The preconditioner norm is used, instead of the Euclidean norm. Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of cg == ======================================= 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. <0 numerical breakdown, or illegal input == ======================================= Notes ----- The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. The residual in the preconditioner norm is both used for halting and returned in the residuals list. Examples -------- >>> from pyamg.krylov import steepest_descent >>> from pyamg.util.linalg import norm >>> import numpy as np >>> from pyamg.gallery import poisson >>> A = poisson((10,10)) >>> b = np.ones((A.shape[0],)) >>> (x,flag) = steepest_descent(A,b, maxiter=2, tol=1e-8) >>> print norm(b - A*x) 7.89436429704 References ---------- .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 137--142, 2003 http://www-users.cs.umn.edu/~saad/books.html """ A, M, x, b, postprocess = make_system(A, M, x0, b) # Ensure that warnings are always reissued from this function import warnings warnings.filterwarnings('always', module='pyamg\.krylov\._steepest_descent') # determine maxiter if maxiter is None: maxiter = int(len(b)) elif maxiter < 1: raise ValueError('Number of iterations must be positive') # setup method r = b - A*x z = M*r rz = np.inner(r.conjugate(), z) # use preconditioner norm normr = np.sqrt(rz) if residuals is not None: residuals[:] = [normr] # initial residual # Check initial guess ( scaling by b, if b != 0, # must account for case when norm(b) is very small) normb = norm(b) if normb == 0.0: normb = 1.0 if normr < tol*normb: return (postprocess(x), 0) # Scale tol by ||r_0||_M if normr != 0.0: tol = tol*normr # How often should r be recomputed recompute_r = 50 iter = 0 while True: iter = iter+1 q = A*z zAz = np.inner(z.conjugate(), q) # check curvature of A if zAz < 0.0: warn("\nIndefinite matrix detected in steepest descent,\ aborting\n") return (postprocess(x), -1) alpha = rz / zAz # step size x = x + alpha*z if np.mod(iter, recompute_r) and iter > 0: r = b - A*x else: r = r - alpha*q z = M*r rz = np.inner(r.conjugate(), z) if rz < 0.0: # check curvature of M warn("\nIndefinite preconditioner detected in steepest descent,\ aborting\n") return (postprocess(x), -1) normr = np.sqrt(rz) # use preconditioner norm if residuals is not None: residuals.append(normr) if callback is not None: callback(x) if normr < tol: return (postprocess(x), 0) elif rz == 0.0: # important to test after testing normr < tol. rz == 0.0 is an # indicator of convergence when r = 0.0 warn("\nSingular preconditioner detected in steepest descent,\ ceasing iterations\n") return (postprocess(x), -1) if iter == maxiter: return (postprocess(x), iter)
['def', 'steepest_descent', '(', 'A', ',', 'b', ',', 'x0', '=', 'None', ',', 'tol', '=', '1e-5', ',', 'maxiter', '=', 'None', ',', 'xtype', '=', 'None', ',', 'M', '=', 'None', ',', 'callback', '=', 'None', ',', 'residuals', '=', 'None', ')', ':', 'A', ',', 'M', ',', 'x', ',', 'b', ',', 'postprocess', '=', 'make_system', '(', 'A', ',', 'M', ',', 'x0', ',', 'b', ')', '# Ensure that warnings are always reissued from this function', 'import', 'warnings', 'warnings', '.', 'filterwarnings', '(', "'always'", ',', 'module', '=', "'pyamg\\.krylov\\._steepest_descent'", ')', '# determine maxiter', 'if', 'maxiter', 'is', 'None', ':', 'maxiter', '=', 'int', '(', 'len', '(', 'b', ')', ')', 'elif', 'maxiter', '<', '1', ':', 'raise', 'ValueError', '(', "'Number of iterations must be positive'", ')', '# setup method', 'r', '=', 'b', '-', 'A', '*', 'x', 'z', '=', 'M', '*', 'r', 'rz', '=', 'np', '.', 'inner', '(', 'r', '.', 'conjugate', '(', ')', ',', 'z', ')', '# use preconditioner norm', 'normr', '=', 'np', '.', 'sqrt', '(', 'rz', ')', 'if', 'residuals', 'is', 'not', 'None', ':', 'residuals', '[', ':', ']', '=', '[', 'normr', ']', '# initial residual', '# Check initial guess ( scaling by b, if b != 0,', '# must account for case when norm(b) is very small)', 'normb', '=', 'norm', '(', 'b', ')', 'if', 'normb', '==', '0.0', ':', 'normb', '=', '1.0', 'if', 'normr', '<', 'tol', '*', 'normb', ':', 'return', '(', 'postprocess', '(', 'x', ')', ',', '0', ')', '# Scale tol by ||r_0||_M', 'if', 'normr', '!=', '0.0', ':', 'tol', '=', 'tol', '*', 'normr', '# How often should r be recomputed', 'recompute_r', '=', '50', 'iter', '=', '0', 'while', 'True', ':', 'iter', '=', 'iter', '+', '1', 'q', '=', 'A', '*', 'z', 'zAz', '=', 'np', '.', 'inner', '(', 'z', '.', 'conjugate', '(', ')', ',', 'q', ')', '# check curvature of A', 'if', 'zAz', '<', '0.0', ':', 'warn', '(', '"\\nIndefinite matrix detected in steepest descent,\\\n aborting\\n"', ')', 'return', '(', 'postprocess', '(', 'x', ')', ',', '-', '1', ')', 'alpha', '=', 'rz', '/', 'zAz', '# step size', 'x', '=', 'x', '+', 'alpha', '*', 'z', 'if', 'np', '.', 'mod', '(', 'iter', ',', 'recompute_r', ')', 'and', 'iter', '>', '0', ':', 'r', '=', 'b', '-', 'A', '*', 'x', 'else', ':', 'r', '=', 'r', '-', 'alpha', '*', 'q', 'z', '=', 'M', '*', 'r', 'rz', '=', 'np', '.', 'inner', '(', 'r', '.', 'conjugate', '(', ')', ',', 'z', ')', 'if', 'rz', '<', '0.0', ':', '# check curvature of M', 'warn', '(', '"\\nIndefinite preconditioner detected in steepest descent,\\\n aborting\\n"', ')', 'return', '(', 'postprocess', '(', 'x', ')', ',', '-', '1', ')', 'normr', '=', 'np', '.', 'sqrt', '(', 'rz', ')', '# use preconditioner norm', 'if', 'residuals', 'is', 'not', 'None', ':', 'residuals', '.', 'append', '(', 'normr', ')', 'if', 'callback', 'is', 'not', 'None', ':', 'callback', '(', 'x', ')', 'if', 'normr', '<', 'tol', ':', 'return', '(', 'postprocess', '(', 'x', ')', ',', '0', ')', 'elif', 'rz', '==', '0.0', ':', '# important to test after testing normr < tol. rz == 0.0 is an', '# indicator of convergence when r = 0.0', 'warn', '(', '"\\nSingular preconditioner detected in steepest descent,\\\n ceasing iterations\\n"', ')', 'return', '(', 'postprocess', '(', 'x', ')', ',', '-', '1', ')', 'if', 'iter', '==', 'maxiter', ':', 'return', '(', 'postprocess', '(', 'x', ')', ',', 'iter', ')']
Steepest descent algorithm. Solves the linear system Ax = b. Left preconditioning is supported. Parameters ---------- A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by the preconditioner norm of r_0, or ||r_0||_M. maxiter : int maximum number of allowed iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals contains the residual norm history, including the initial residual. The preconditioner norm is used, instead of the Euclidean norm. Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of cg == ======================================= 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. <0 numerical breakdown, or illegal input == ======================================= Notes ----- The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. The residual in the preconditioner norm is both used for halting and returned in the residuals list. Examples -------- >>> from pyamg.krylov import steepest_descent >>> from pyamg.util.linalg import norm >>> import numpy as np >>> from pyamg.gallery import poisson >>> A = poisson((10,10)) >>> b = np.ones((A.shape[0],)) >>> (x,flag) = steepest_descent(A,b, maxiter=2, tol=1e-8) >>> print norm(b - A*x) 7.89436429704 References ---------- .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 137--142, 2003 http://www-users.cs.umn.edu/~saad/books.html
['Steepest', 'descent', 'algorithm', '.']
train
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_steepest_descent.py#L10-L168
4,871
google/grr
grr/core/grr_response_core/stats/default_stats_collector.py
_GaugeMetric.Set
def Set(self, value, fields=None): """Sets the metric's current value.""" self._metric_values[_FieldsToKey(fields)] = self._value_type(value)
python
def Set(self, value, fields=None): """Sets the metric's current value.""" self._metric_values[_FieldsToKey(fields)] = self._value_type(value)
['def', 'Set', '(', 'self', ',', 'value', ',', 'fields', '=', 'None', ')', ':', 'self', '.', '_metric_values', '[', '_FieldsToKey', '(', 'fields', ')', ']', '=', 'self', '.', '_value_type', '(', 'value', ')']
Sets the metric's current value.
['Sets', 'the', 'metric', 's', 'current', 'value', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/stats/default_stats_collector.py#L136-L138
4,872
senaite/senaite.core
bika/lims/upgrade/v01_03_000.py
change_analysis_requests_id_formatting
def change_analysis_requests_id_formatting(portal, p_type="AnalysisRequest"): """Applies the system's Sample ID Formatting to Analysis Request """ ar_id_format = dict( form='{sampleType}-{seq:04d}', portal_type='AnalysisRequest', prefix='analysisrequest', sequence_type='generated', counter_type='', split_length=1) bs = portal.bika_setup id_formatting = bs.getIDFormatting() ar_format = filter(lambda id: id["portal_type"] == p_type, id_formatting) if p_type=="AnalysisRequest": logger.info("Set ID Format for Analysis Request portal_type ...") if not ar_format or "sample" in ar_format[0]["form"]: # Copy the ID formatting set for Sample change_analysis_requests_id_formatting(portal, p_type="Sample") return else: logger.info("ID Format for Analysis Request already set: {} [SKIP]" .format(ar_format[0]["form"])) return else: ar_format = ar_format and ar_format[0].copy() or ar_id_format # Set the Analysis Request ID Format ar_id_format.update(ar_format) ar_id_format["portal_type"] ="AnalysisRequest" ar_id_format["prefix"] = "analysisrequest" set_id_format(portal, ar_id_format) # Find out the last ID for Sample and reseed AR to prevent ID already taken # errors on AR creation if p_type == "Sample": number_generator = getUtility(INumberGenerator) ar_keys = dict() ar_keys_prev = dict() for key, value in number_generator.storage.items(): if "sample-" in key: ar_key = key.replace("sample-", "analysisrequest-") ar_keys[ar_key] = api.to_int(value, 0) elif "analysisrequest-" in key: ar_keys_prev[key] = api.to_int(value, 0) for key, value in ar_keys.items(): if key in ar_keys_prev: # Maybe this upgrade step has already been run, so we don't # want the ar IDs to be reseeded again! if value <= ar_keys_prev[key]: logger.info("ID for '{}' already seeded to '{}' [SKIP]" .format(key, ar_keys_prev[key])) continue logger.info("Seeding {} to {}".format(key, value)) number_generator.set_number(key, value)
python
def change_analysis_requests_id_formatting(portal, p_type="AnalysisRequest"): """Applies the system's Sample ID Formatting to Analysis Request """ ar_id_format = dict( form='{sampleType}-{seq:04d}', portal_type='AnalysisRequest', prefix='analysisrequest', sequence_type='generated', counter_type='', split_length=1) bs = portal.bika_setup id_formatting = bs.getIDFormatting() ar_format = filter(lambda id: id["portal_type"] == p_type, id_formatting) if p_type=="AnalysisRequest": logger.info("Set ID Format for Analysis Request portal_type ...") if not ar_format or "sample" in ar_format[0]["form"]: # Copy the ID formatting set for Sample change_analysis_requests_id_formatting(portal, p_type="Sample") return else: logger.info("ID Format for Analysis Request already set: {} [SKIP]" .format(ar_format[0]["form"])) return else: ar_format = ar_format and ar_format[0].copy() or ar_id_format # Set the Analysis Request ID Format ar_id_format.update(ar_format) ar_id_format["portal_type"] ="AnalysisRequest" ar_id_format["prefix"] = "analysisrequest" set_id_format(portal, ar_id_format) # Find out the last ID for Sample and reseed AR to prevent ID already taken # errors on AR creation if p_type == "Sample": number_generator = getUtility(INumberGenerator) ar_keys = dict() ar_keys_prev = dict() for key, value in number_generator.storage.items(): if "sample-" in key: ar_key = key.replace("sample-", "analysisrequest-") ar_keys[ar_key] = api.to_int(value, 0) elif "analysisrequest-" in key: ar_keys_prev[key] = api.to_int(value, 0) for key, value in ar_keys.items(): if key in ar_keys_prev: # Maybe this upgrade step has already been run, so we don't # want the ar IDs to be reseeded again! if value <= ar_keys_prev[key]: logger.info("ID for '{}' already seeded to '{}' [SKIP]" .format(key, ar_keys_prev[key])) continue logger.info("Seeding {} to {}".format(key, value)) number_generator.set_number(key, value)
['def', 'change_analysis_requests_id_formatting', '(', 'portal', ',', 'p_type', '=', '"AnalysisRequest"', ')', ':', 'ar_id_format', '=', 'dict', '(', 'form', '=', "'{sampleType}-{seq:04d}'", ',', 'portal_type', '=', "'AnalysisRequest'", ',', 'prefix', '=', "'analysisrequest'", ',', 'sequence_type', '=', "'generated'", ',', 'counter_type', '=', "''", ',', 'split_length', '=', '1', ')', 'bs', '=', 'portal', '.', 'bika_setup', 'id_formatting', '=', 'bs', '.', 'getIDFormatting', '(', ')', 'ar_format', '=', 'filter', '(', 'lambda', 'id', ':', 'id', '[', '"portal_type"', ']', '==', 'p_type', ',', 'id_formatting', ')', 'if', 'p_type', '==', '"AnalysisRequest"', ':', 'logger', '.', 'info', '(', '"Set ID Format for Analysis Request portal_type ..."', ')', 'if', 'not', 'ar_format', 'or', '"sample"', 'in', 'ar_format', '[', '0', ']', '[', '"form"', ']', ':', '# Copy the ID formatting set for Sample', 'change_analysis_requests_id_formatting', '(', 'portal', ',', 'p_type', '=', '"Sample"', ')', 'return', 'else', ':', 'logger', '.', 'info', '(', '"ID Format for Analysis Request already set: {} [SKIP]"', '.', 'format', '(', 'ar_format', '[', '0', ']', '[', '"form"', ']', ')', ')', 'return', 'else', ':', 'ar_format', '=', 'ar_format', 'and', 'ar_format', '[', '0', ']', '.', 'copy', '(', ')', 'or', 'ar_id_format', '# Set the Analysis Request ID Format', 'ar_id_format', '.', 'update', '(', 'ar_format', ')', 'ar_id_format', '[', '"portal_type"', ']', '=', '"AnalysisRequest"', 'ar_id_format', '[', '"prefix"', ']', '=', '"analysisrequest"', 'set_id_format', '(', 'portal', ',', 'ar_id_format', ')', '# Find out the last ID for Sample and reseed AR to prevent ID already taken', '# errors on AR creation', 'if', 'p_type', '==', '"Sample"', ':', 'number_generator', '=', 'getUtility', '(', 'INumberGenerator', ')', 'ar_keys', '=', 'dict', '(', ')', 'ar_keys_prev', '=', 'dict', '(', ')', 'for', 'key', ',', 'value', 'in', 'number_generator', '.', 'storage', '.', 'items', '(', ')', ':', 'if', '"sample-"', 'in', 'key', ':', 'ar_key', '=', 'key', '.', 'replace', '(', '"sample-"', ',', '"analysisrequest-"', ')', 'ar_keys', '[', 'ar_key', ']', '=', 'api', '.', 'to_int', '(', 'value', ',', '0', ')', 'elif', '"analysisrequest-"', 'in', 'key', ':', 'ar_keys_prev', '[', 'key', ']', '=', 'api', '.', 'to_int', '(', 'value', ',', '0', ')', 'for', 'key', ',', 'value', 'in', 'ar_keys', '.', 'items', '(', ')', ':', 'if', 'key', 'in', 'ar_keys_prev', ':', "# Maybe this upgrade step has already been run, so we don't", '# want the ar IDs to be reseeded again!', 'if', 'value', '<=', 'ar_keys_prev', '[', 'key', ']', ':', 'logger', '.', 'info', '(', '"ID for \'{}\' already seeded to \'{}\' [SKIP]"', '.', 'format', '(', 'key', ',', 'ar_keys_prev', '[', 'key', ']', ')', ')', 'continue', 'logger', '.', 'info', '(', '"Seeding {} to {}"', '.', 'format', '(', 'key', ',', 'value', ')', ')', 'number_generator', '.', 'set_number', '(', 'key', ',', 'value', ')']
Applies the system's Sample ID Formatting to Analysis Request
['Applies', 'the', 'system', 's', 'Sample', 'ID', 'Formatting', 'to', 'Analysis', 'Request']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_03_000.py#L1660-L1715
4,873
python-cmd2/cmd2
cmd2/utils.py
unquote_redirection_tokens
def unquote_redirection_tokens(args: List[str]) -> None: """ Unquote redirection tokens in a list of command-line arguments This is used when redirection tokens have to be passed to another command :param args: the command line args """ for i, arg in enumerate(args): unquoted_arg = strip_quotes(arg) if unquoted_arg in constants.REDIRECTION_TOKENS: args[i] = unquoted_arg
python
def unquote_redirection_tokens(args: List[str]) -> None: """ Unquote redirection tokens in a list of command-line arguments This is used when redirection tokens have to be passed to another command :param args: the command line args """ for i, arg in enumerate(args): unquoted_arg = strip_quotes(arg) if unquoted_arg in constants.REDIRECTION_TOKENS: args[i] = unquoted_arg
['def', 'unquote_redirection_tokens', '(', 'args', ':', 'List', '[', 'str', ']', ')', '->', 'None', ':', 'for', 'i', ',', 'arg', 'in', 'enumerate', '(', 'args', ')', ':', 'unquoted_arg', '=', 'strip_quotes', '(', 'arg', ')', 'if', 'unquoted_arg', 'in', 'constants', '.', 'REDIRECTION_TOKENS', ':', 'args', '[', 'i', ']', '=', 'unquoted_arg']
Unquote redirection tokens in a list of command-line arguments This is used when redirection tokens have to be passed to another command :param args: the command line args
['Unquote', 'redirection', 'tokens', 'in', 'a', 'list', 'of', 'command', '-', 'line', 'arguments', 'This', 'is', 'used', 'when', 'redirection', 'tokens', 'have', 'to', 'be', 'passed', 'to', 'another', 'command', ':', 'param', 'args', ':', 'the', 'command', 'line', 'args']
train
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/utils.py#L265-L274
4,874
AdvancedClimateSystems/uModbus
umodbus/functions.py
WriteSingleCoil.execute
def execute(self, slave_id, route_map): """ Execute the Modbus function registered for a route. :param slave_id: Slave id. :param eindpoint: Instance of modbus.route.Map. """ endpoint = route_map.match(slave_id, self.function_code, self.address) try: endpoint(slave_id=slave_id, address=self.address, value=self.value, function_code=self.function_code) # route_map.match() returns None if no match is found. Calling None # results in TypeError. except TypeError: raise IllegalDataAddressError()
python
def execute(self, slave_id, route_map): """ Execute the Modbus function registered for a route. :param slave_id: Slave id. :param eindpoint: Instance of modbus.route.Map. """ endpoint = route_map.match(slave_id, self.function_code, self.address) try: endpoint(slave_id=slave_id, address=self.address, value=self.value, function_code=self.function_code) # route_map.match() returns None if no match is found. Calling None # results in TypeError. except TypeError: raise IllegalDataAddressError()
['def', 'execute', '(', 'self', ',', 'slave_id', ',', 'route_map', ')', ':', 'endpoint', '=', 'route_map', '.', 'match', '(', 'slave_id', ',', 'self', '.', 'function_code', ',', 'self', '.', 'address', ')', 'try', ':', 'endpoint', '(', 'slave_id', '=', 'slave_id', ',', 'address', '=', 'self', '.', 'address', ',', 'value', '=', 'self', '.', 'value', ',', 'function_code', '=', 'self', '.', 'function_code', ')', '# route_map.match() returns None if no match is found. Calling None', '# results in TypeError.', 'except', 'TypeError', ':', 'raise', 'IllegalDataAddressError', '(', ')']
Execute the Modbus function registered for a route. :param slave_id: Slave id. :param eindpoint: Instance of modbus.route.Map.
['Execute', 'the', 'Modbus', 'function', 'registered', 'for', 'a', 'route', '.']
train
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/functions.py#L1090-L1103
4,875
thornomad/django-hitcount
hitcount/managers.py
HitManager.filter_active
def filter_active(self, *args, **kwargs): """ Return only the 'active' hits. How you count a hit/view will depend on personal choice: Should the same user/visitor *ever* be counted twice? After a week, or a month, or a year, should their view be counted again? The defaulf is to consider a visitor's hit still 'active' if they return within a the last seven days.. After that the hit will be counted again. So if one person visits once a week for a year, they will add 52 hits to a given object. Change how long the expiration is by adding to settings.py: HITCOUNT_KEEP_HIT_ACTIVE = {'days' : 30, 'minutes' : 30} Accepts days, seconds, microseconds, milliseconds, minutes, hours, and weeks. It's creating a datetime.timedelta object. """ grace = getattr(settings, 'HITCOUNT_KEEP_HIT_ACTIVE', {'days': 7}) period = timezone.now() - timedelta(**grace) return self.filter(created__gte=period).filter(*args, **kwargs)
python
def filter_active(self, *args, **kwargs): """ Return only the 'active' hits. How you count a hit/view will depend on personal choice: Should the same user/visitor *ever* be counted twice? After a week, or a month, or a year, should their view be counted again? The defaulf is to consider a visitor's hit still 'active' if they return within a the last seven days.. After that the hit will be counted again. So if one person visits once a week for a year, they will add 52 hits to a given object. Change how long the expiration is by adding to settings.py: HITCOUNT_KEEP_HIT_ACTIVE = {'days' : 30, 'minutes' : 30} Accepts days, seconds, microseconds, milliseconds, minutes, hours, and weeks. It's creating a datetime.timedelta object. """ grace = getattr(settings, 'HITCOUNT_KEEP_HIT_ACTIVE', {'days': 7}) period = timezone.now() - timedelta(**grace) return self.filter(created__gte=period).filter(*args, **kwargs)
['def', 'filter_active', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'grace', '=', 'getattr', '(', 'settings', ',', "'HITCOUNT_KEEP_HIT_ACTIVE'", ',', '{', "'days'", ':', '7', '}', ')', 'period', '=', 'timezone', '.', 'now', '(', ')', '-', 'timedelta', '(', '*', '*', 'grace', ')', 'return', 'self', '.', 'filter', '(', 'created__gte', '=', 'period', ')', '.', 'filter', '(', '*', 'args', ',', '*', '*', 'kwargs', ')']
Return only the 'active' hits. How you count a hit/view will depend on personal choice: Should the same user/visitor *ever* be counted twice? After a week, or a month, or a year, should their view be counted again? The defaulf is to consider a visitor's hit still 'active' if they return within a the last seven days.. After that the hit will be counted again. So if one person visits once a week for a year, they will add 52 hits to a given object. Change how long the expiration is by adding to settings.py: HITCOUNT_KEEP_HIT_ACTIVE = {'days' : 30, 'minutes' : 30} Accepts days, seconds, microseconds, milliseconds, minutes, hours, and weeks. It's creating a datetime.timedelta object.
['Return', 'only', 'the', 'active', 'hits', '.']
train
https://github.com/thornomad/django-hitcount/blob/b35d2f9c213f6a2ff0e5d0a746339a5b84b4d416/hitcount/managers.py#L22-L45
4,876
fermiPy/fermipy
fermipy/gtanalysis.py
GTBinnedAnalysis.model_counts_spectrum
def model_counts_spectrum(self, name, logemin, logemax, weighted=False): """Return the model counts spectrum of a source. Parameters ---------- name : str Source name. """ # EAC, we need this b/c older version of the ST don't have the right signature try: cs = np.array(self.like.logLike.modelCountsSpectrum( str(name), weighted)) except (TypeError, NotImplementedError): cs = np.array(self.like.logLike.modelCountsSpectrum(str(name))) imin = utils.val_to_edge(self.log_energies, logemin)[0] imax = utils.val_to_edge(self.log_energies, logemax)[0] if imax <= imin: raise Exception('Invalid energy range.') return cs[imin:imax]
python
def model_counts_spectrum(self, name, logemin, logemax, weighted=False): """Return the model counts spectrum of a source. Parameters ---------- name : str Source name. """ # EAC, we need this b/c older version of the ST don't have the right signature try: cs = np.array(self.like.logLike.modelCountsSpectrum( str(name), weighted)) except (TypeError, NotImplementedError): cs = np.array(self.like.logLike.modelCountsSpectrum(str(name))) imin = utils.val_to_edge(self.log_energies, logemin)[0] imax = utils.val_to_edge(self.log_energies, logemax)[0] if imax <= imin: raise Exception('Invalid energy range.') return cs[imin:imax]
['def', 'model_counts_spectrum', '(', 'self', ',', 'name', ',', 'logemin', ',', 'logemax', ',', 'weighted', '=', 'False', ')', ':', "# EAC, we need this b/c older version of the ST don't have the right signature", 'try', ':', 'cs', '=', 'np', '.', 'array', '(', 'self', '.', 'like', '.', 'logLike', '.', 'modelCountsSpectrum', '(', 'str', '(', 'name', ')', ',', 'weighted', ')', ')', 'except', '(', 'TypeError', ',', 'NotImplementedError', ')', ':', 'cs', '=', 'np', '.', 'array', '(', 'self', '.', 'like', '.', 'logLike', '.', 'modelCountsSpectrum', '(', 'str', '(', 'name', ')', ')', ')', 'imin', '=', 'utils', '.', 'val_to_edge', '(', 'self', '.', 'log_energies', ',', 'logemin', ')', '[', '0', ']', 'imax', '=', 'utils', '.', 'val_to_edge', '(', 'self', '.', 'log_energies', ',', 'logemax', ')', '[', '0', ']', 'if', 'imax', '<=', 'imin', ':', 'raise', 'Exception', '(', "'Invalid energy range.'", ')', 'return', 'cs', '[', 'imin', ':', 'imax', ']']
Return the model counts spectrum of a source. Parameters ---------- name : str Source name.
['Return', 'the', 'model', 'counts', 'spectrum', 'of', 'a', 'source', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L4982-L5001
4,877
pypa/pipenv
pipenv/vendor/cerberus/errors.py
ErrorTree.add
def add(self, error): """ Add an error to the tree. :param error: :class:`~cerberus.errors.ValidationError` """ if not self._path_of_(error): self.errors.append(error) self.errors.sort() else: super(ErrorTree, self).add(error)
python
def add(self, error): """ Add an error to the tree. :param error: :class:`~cerberus.errors.ValidationError` """ if not self._path_of_(error): self.errors.append(error) self.errors.sort() else: super(ErrorTree, self).add(error)
['def', 'add', '(', 'self', ',', 'error', ')', ':', 'if', 'not', 'self', '.', '_path_of_', '(', 'error', ')', ':', 'self', '.', 'errors', '.', 'append', '(', 'error', ')', 'self', '.', 'errors', '.', 'sort', '(', ')', 'else', ':', 'super', '(', 'ErrorTree', ',', 'self', ')', '.', 'add', '(', 'error', ')']
Add an error to the tree. :param error: :class:`~cerberus.errors.ValidationError`
['Add', 'an', 'error', 'to', 'the', 'tree', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/errors.py#L286-L295
4,878
DLR-RM/RAFCON
source/rafcon/core/states/state.py
State.get_next_upper_library_root_state
def get_next_upper_library_root_state(self): """ Get next upper library root state The method recursively checks state parent states till finding a StateMachine as parent or a library root state. If self is a LibraryState the next upper library root state is searched and it is not handed self.state_copy. :return library root state (Execution or ContainerState) or None if self is not a library root state or inside of such :rtype rafcon.core.states.library_state.State: """ from rafcon.core.state_machine import StateMachine if self.is_root_state_of_library: return self state = self while state.parent is not None and not isinstance(state.parent, StateMachine): if state.parent.is_root_state_of_library: return state.parent state = state.parent return None
python
def get_next_upper_library_root_state(self): """ Get next upper library root state The method recursively checks state parent states till finding a StateMachine as parent or a library root state. If self is a LibraryState the next upper library root state is searched and it is not handed self.state_copy. :return library root state (Execution or ContainerState) or None if self is not a library root state or inside of such :rtype rafcon.core.states.library_state.State: """ from rafcon.core.state_machine import StateMachine if self.is_root_state_of_library: return self state = self while state.parent is not None and not isinstance(state.parent, StateMachine): if state.parent.is_root_state_of_library: return state.parent state = state.parent return None
['def', 'get_next_upper_library_root_state', '(', 'self', ')', ':', 'from', 'rafcon', '.', 'core', '.', 'state_machine', 'import', 'StateMachine', 'if', 'self', '.', 'is_root_state_of_library', ':', 'return', 'self', 'state', '=', 'self', 'while', 'state', '.', 'parent', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'state', '.', 'parent', ',', 'StateMachine', ')', ':', 'if', 'state', '.', 'parent', '.', 'is_root_state_of_library', ':', 'return', 'state', '.', 'parent', 'state', '=', 'state', '.', 'parent', 'return', 'None']
Get next upper library root state The method recursively checks state parent states till finding a StateMachine as parent or a library root state. If self is a LibraryState the next upper library root state is searched and it is not handed self.state_copy. :return library root state (Execution or ContainerState) or None if self is not a library root state or inside of such :rtype rafcon.core.states.library_state.State:
['Get', 'next', 'upper', 'library', 'root', 'state']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L1419-L1439
4,879
gtaylor/paypal-python
paypal/interface.py
PayPalInterface._encode_utf8
def _encode_utf8(self, **kwargs): """ UTF8 encodes all of the NVP values. """ if is_py3: # This is only valid for Python 2. In Python 3, unicode is # everywhere (yay). return kwargs unencoded_pairs = kwargs for i in unencoded_pairs.keys(): #noinspection PyUnresolvedReferences if isinstance(unencoded_pairs[i], types.UnicodeType): unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8') return unencoded_pairs
python
def _encode_utf8(self, **kwargs): """ UTF8 encodes all of the NVP values. """ if is_py3: # This is only valid for Python 2. In Python 3, unicode is # everywhere (yay). return kwargs unencoded_pairs = kwargs for i in unencoded_pairs.keys(): #noinspection PyUnresolvedReferences if isinstance(unencoded_pairs[i], types.UnicodeType): unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8') return unencoded_pairs
['def', '_encode_utf8', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'is_py3', ':', '# This is only valid for Python 2. In Python 3, unicode is', '# everywhere (yay).', 'return', 'kwargs', 'unencoded_pairs', '=', 'kwargs', 'for', 'i', 'in', 'unencoded_pairs', '.', 'keys', '(', ')', ':', '#noinspection PyUnresolvedReferences', 'if', 'isinstance', '(', 'unencoded_pairs', '[', 'i', ']', ',', 'types', '.', 'UnicodeType', ')', ':', 'unencoded_pairs', '[', 'i', ']', '=', 'unencoded_pairs', '[', 'i', ']', '.', 'encode', '(', "'utf-8'", ')', 'return', 'unencoded_pairs']
UTF8 encodes all of the NVP values.
['UTF8', 'encodes', 'all', 'of', 'the', 'NVP', 'values', '.']
train
https://github.com/gtaylor/paypal-python/blob/aa7a987ea9e9b7f37bcd8a8b54a440aad6c871b1/paypal/interface.py#L58-L72
4,880
cloud-custodian/cloud-custodian
tools/c7n_azure/c7n_azure/session.py
Session._initialize_session
def _initialize_session(self): """ Creates a session using available authentication type. Auth priority: 1. Token Auth 2. Tenant Auth 3. Azure CLI Auth """ # Only run once if self.credentials is not None: return tenant_auth_variables = [ constants.ENV_TENANT_ID, constants.ENV_SUB_ID, constants.ENV_CLIENT_ID, constants.ENV_CLIENT_SECRET ] token_auth_variables = [ constants.ENV_ACCESS_TOKEN, constants.ENV_SUB_ID ] msi_auth_variables = [ constants.ENV_USE_MSI, constants.ENV_SUB_ID ] if self.authorization_file: self.credentials, self.subscription_id = self.load_auth_file(self.authorization_file) self.log.info("Creating session with authorization file") elif all(k in os.environ for k in token_auth_variables): # Token authentication self.credentials = BasicTokenAuthentication( token={ 'access_token': os.environ[constants.ENV_ACCESS_TOKEN] }) self.subscription_id = os.environ[constants.ENV_SUB_ID] self.log.info("Creating session with Token Authentication") self._is_token_auth = True elif all(k in os.environ for k in tenant_auth_variables): # Tenant (service principal) authentication self.credentials = ServicePrincipalCredentials( client_id=os.environ[constants.ENV_CLIENT_ID], secret=os.environ[constants.ENV_CLIENT_SECRET], tenant=os.environ[constants.ENV_TENANT_ID], resource=self.resource_namespace) self.subscription_id = os.environ[constants.ENV_SUB_ID] self.tenant_id = os.environ[constants.ENV_TENANT_ID] self.log.info("Creating session with Service Principal Authentication") elif all(k in os.environ for k in msi_auth_variables): # MSI authentication if constants.ENV_CLIENT_ID in os.environ: self.credentials = MSIAuthentication( client_id=os.environ[constants.ENV_CLIENT_ID], resource=self.resource_namespace) else: self.credentials = MSIAuthentication( resource=self.resource_namespace) self.subscription_id = os.environ[constants.ENV_SUB_ID] self.log.info("Creating session with MSI Authentication") else: # Azure CLI authentication self._is_cli_auth = True (self.credentials, self.subscription_id, self.tenant_id) = Profile().get_login_credentials( resource=self.resource_namespace) self.log.info("Creating session with Azure CLI Authentication") # Let provided id parameter override everything else if self.subscription_id_override is not None: self.subscription_id = self.subscription_id_override self.log.info("Session using Subscription ID: %s" % self.subscription_id) if self.credentials is None: self.log.error('Unable to locate credentials for Azure session.')
python
def _initialize_session(self): """ Creates a session using available authentication type. Auth priority: 1. Token Auth 2. Tenant Auth 3. Azure CLI Auth """ # Only run once if self.credentials is not None: return tenant_auth_variables = [ constants.ENV_TENANT_ID, constants.ENV_SUB_ID, constants.ENV_CLIENT_ID, constants.ENV_CLIENT_SECRET ] token_auth_variables = [ constants.ENV_ACCESS_TOKEN, constants.ENV_SUB_ID ] msi_auth_variables = [ constants.ENV_USE_MSI, constants.ENV_SUB_ID ] if self.authorization_file: self.credentials, self.subscription_id = self.load_auth_file(self.authorization_file) self.log.info("Creating session with authorization file") elif all(k in os.environ for k in token_auth_variables): # Token authentication self.credentials = BasicTokenAuthentication( token={ 'access_token': os.environ[constants.ENV_ACCESS_TOKEN] }) self.subscription_id = os.environ[constants.ENV_SUB_ID] self.log.info("Creating session with Token Authentication") self._is_token_auth = True elif all(k in os.environ for k in tenant_auth_variables): # Tenant (service principal) authentication self.credentials = ServicePrincipalCredentials( client_id=os.environ[constants.ENV_CLIENT_ID], secret=os.environ[constants.ENV_CLIENT_SECRET], tenant=os.environ[constants.ENV_TENANT_ID], resource=self.resource_namespace) self.subscription_id = os.environ[constants.ENV_SUB_ID] self.tenant_id = os.environ[constants.ENV_TENANT_ID] self.log.info("Creating session with Service Principal Authentication") elif all(k in os.environ for k in msi_auth_variables): # MSI authentication if constants.ENV_CLIENT_ID in os.environ: self.credentials = MSIAuthentication( client_id=os.environ[constants.ENV_CLIENT_ID], resource=self.resource_namespace) else: self.credentials = MSIAuthentication( resource=self.resource_namespace) self.subscription_id = os.environ[constants.ENV_SUB_ID] self.log.info("Creating session with MSI Authentication") else: # Azure CLI authentication self._is_cli_auth = True (self.credentials, self.subscription_id, self.tenant_id) = Profile().get_login_credentials( resource=self.resource_namespace) self.log.info("Creating session with Azure CLI Authentication") # Let provided id parameter override everything else if self.subscription_id_override is not None: self.subscription_id = self.subscription_id_override self.log.info("Session using Subscription ID: %s" % self.subscription_id) if self.credentials is None: self.log.error('Unable to locate credentials for Azure session.')
['def', '_initialize_session', '(', 'self', ')', ':', '# Only run once', 'if', 'self', '.', 'credentials', 'is', 'not', 'None', ':', 'return', 'tenant_auth_variables', '=', '[', 'constants', '.', 'ENV_TENANT_ID', ',', 'constants', '.', 'ENV_SUB_ID', ',', 'constants', '.', 'ENV_CLIENT_ID', ',', 'constants', '.', 'ENV_CLIENT_SECRET', ']', 'token_auth_variables', '=', '[', 'constants', '.', 'ENV_ACCESS_TOKEN', ',', 'constants', '.', 'ENV_SUB_ID', ']', 'msi_auth_variables', '=', '[', 'constants', '.', 'ENV_USE_MSI', ',', 'constants', '.', 'ENV_SUB_ID', ']', 'if', 'self', '.', 'authorization_file', ':', 'self', '.', 'credentials', ',', 'self', '.', 'subscription_id', '=', 'self', '.', 'load_auth_file', '(', 'self', '.', 'authorization_file', ')', 'self', '.', 'log', '.', 'info', '(', '"Creating session with authorization file"', ')', 'elif', 'all', '(', 'k', 'in', 'os', '.', 'environ', 'for', 'k', 'in', 'token_auth_variables', ')', ':', '# Token authentication', 'self', '.', 'credentials', '=', 'BasicTokenAuthentication', '(', 'token', '=', '{', "'access_token'", ':', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_ACCESS_TOKEN', ']', '}', ')', 'self', '.', 'subscription_id', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_SUB_ID', ']', 'self', '.', 'log', '.', 'info', '(', '"Creating session with Token Authentication"', ')', 'self', '.', '_is_token_auth', '=', 'True', 'elif', 'all', '(', 'k', 'in', 'os', '.', 'environ', 'for', 'k', 'in', 'tenant_auth_variables', ')', ':', '# Tenant (service principal) authentication', 'self', '.', 'credentials', '=', 'ServicePrincipalCredentials', '(', 'client_id', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_CLIENT_ID', ']', ',', 'secret', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_CLIENT_SECRET', ']', ',', 'tenant', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_TENANT_ID', ']', ',', 'resource', '=', 'self', '.', 'resource_namespace', ')', 'self', '.', 'subscription_id', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_SUB_ID', ']', 'self', '.', 'tenant_id', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_TENANT_ID', ']', 'self', '.', 'log', '.', 'info', '(', '"Creating session with Service Principal Authentication"', ')', 'elif', 'all', '(', 'k', 'in', 'os', '.', 'environ', 'for', 'k', 'in', 'msi_auth_variables', ')', ':', '# MSI authentication', 'if', 'constants', '.', 'ENV_CLIENT_ID', 'in', 'os', '.', 'environ', ':', 'self', '.', 'credentials', '=', 'MSIAuthentication', '(', 'client_id', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_CLIENT_ID', ']', ',', 'resource', '=', 'self', '.', 'resource_namespace', ')', 'else', ':', 'self', '.', 'credentials', '=', 'MSIAuthentication', '(', 'resource', '=', 'self', '.', 'resource_namespace', ')', 'self', '.', 'subscription_id', '=', 'os', '.', 'environ', '[', 'constants', '.', 'ENV_SUB_ID', ']', 'self', '.', 'log', '.', 'info', '(', '"Creating session with MSI Authentication"', ')', 'else', ':', '# Azure CLI authentication', 'self', '.', '_is_cli_auth', '=', 'True', '(', 'self', '.', 'credentials', ',', 'self', '.', 'subscription_id', ',', 'self', '.', 'tenant_id', ')', '=', 'Profile', '(', ')', '.', 'get_login_credentials', '(', 'resource', '=', 'self', '.', 'resource_namespace', ')', 'self', '.', 'log', '.', 'info', '(', '"Creating session with Azure CLI Authentication"', ')', '# Let provided id parameter override everything else', 'if', 'self', '.', 'subscription_id_override', 'is', 'not', 'None', ':', 'self', '.', 'subscription_id', '=', 'self', '.', 'subscription_id_override', 'self', '.', 'log', '.', 'info', '(', '"Session using Subscription ID: %s"', '%', 'self', '.', 'subscription_id', ')', 'if', 'self', '.', 'credentials', 'is', 'None', ':', 'self', '.', 'log', '.', 'error', '(', "'Unable to locate credentials for Azure session.'", ')']
Creates a session using available authentication type. Auth priority: 1. Token Auth 2. Tenant Auth 3. Azure CLI Auth
['Creates', 'a', 'session', 'using', 'available', 'authentication', 'type', '.']
train
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_azure/c7n_azure/session.py#L58-L139
4,881
saltstack/salt
salt/utils/openstack/nova.py
SaltNova.keypair_add
def keypair_add(self, name, pubfile=None, pubkey=None): ''' Add a keypair ''' nt_ks = self.compute_conn if pubfile: with salt.utils.files.fopen(pubfile, 'r') as fp_: pubkey = salt.utils.stringutils.to_unicode(fp_.read()) if not pubkey: return False nt_ks.keypairs.create(name, public_key=pubkey) ret = {'name': name, 'pubkey': pubkey} return ret
python
def keypair_add(self, name, pubfile=None, pubkey=None): ''' Add a keypair ''' nt_ks = self.compute_conn if pubfile: with salt.utils.files.fopen(pubfile, 'r') as fp_: pubkey = salt.utils.stringutils.to_unicode(fp_.read()) if not pubkey: return False nt_ks.keypairs.create(name, public_key=pubkey) ret = {'name': name, 'pubkey': pubkey} return ret
['def', 'keypair_add', '(', 'self', ',', 'name', ',', 'pubfile', '=', 'None', ',', 'pubkey', '=', 'None', ')', ':', 'nt_ks', '=', 'self', '.', 'compute_conn', 'if', 'pubfile', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'pubfile', ',', "'r'", ')', 'as', 'fp_', ':', 'pubkey', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'fp_', '.', 'read', '(', ')', ')', 'if', 'not', 'pubkey', ':', 'return', 'False', 'nt_ks', '.', 'keypairs', '.', 'create', '(', 'name', ',', 'public_key', '=', 'pubkey', ')', 'ret', '=', '{', "'name'", ':', 'name', ',', "'pubkey'", ':', 'pubkey', '}', 'return', 'ret']
Add a keypair
['Add', 'a', 'keypair']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L855-L867
4,882
yyuu/botornado
boto/gs/key.py
Key.add_group_email_grant
def add_group_email_grant(self, permission, email_address, headers=None): """ Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission. """ acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers)
python
def add_group_email_grant(self, permission, email_address, headers=None): """ Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission. """ acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers)
['def', 'add_group_email_grant', '(', 'self', ',', 'permission', ',', 'email_address', ',', 'headers', '=', 'None', ')', ':', 'acl', '=', 'self', '.', 'get_acl', '(', 'headers', '=', 'headers', ')', 'acl', '.', 'add_group_email_grant', '(', 'permission', ',', 'email_address', ')', 'self', '.', 'set_acl', '(', 'acl', ',', 'headers', '=', 'headers', ')']
Convenience method that provides a quick way to add an email group grant to a key. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission.
['Convenience', 'method', 'that', 'provides', 'a', 'quick', 'way', 'to', 'add', 'an', 'email', 'group', 'grant', 'to', 'a', 'key', '.', 'This', 'method', 'retrieves', 'the', 'current', 'ACL', 'creates', 'a', 'new', 'grant', 'based', 'on', 'the', 'parameters', 'passed', 'in', 'adds', 'that', 'grant', 'to', 'the', 'ACL', 'and', 'then', 'PUT', 's', 'the', 'new', 'ACL', 'back', 'to', 'GS', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/gs/key.py#L70-L89
4,883
Damgaard/PyImgur
pyimgur/__init__.py
Imgur.get_at_url
def get_at_url(self, url): """ Return a object representing the content at url. Returns None if no object could be matched with the id. Works for Album, Comment, Gallery_album, Gallery_image, Image and User. NOTE: Imgur's documentation does not cover what urls are available. Some urls, such as imgur.com/<ID> can be for several different types of object. Using a wrong, but similair call, such as get_subreddit_image on a meme image will not cause an error. But instead return a subset of information, with either the remaining pieces missing or the value set to None. This makes it hard to create a method such as this that attempts to deduce the object from the url. Due to these factors, this method should be considered experimental and used carefully. :param url: The url where the content is located at """ class NullDevice(): def write(self, string): pass def get_gallery_item(id): """ Special helper method to get gallery items. The problem is that it's impossible to distinguish albums and images from each other based on the url. And there isn't a common url endpoints that return either a Gallery_album or a Gallery_image depending on what the id represents. So the only option is to assume it's a Gallery_image and if we get an exception then try Gallery_album. Gallery_image is attempted first because there is the most of them. """ try: # HACK: Problem is that send_request prints the error message # from Imgur when it encounters an error. This is nice because # this error message is more descriptive than just the status # code that Requests give. But since we first assume the id # belong to an image, it means we will get an error whenever # the id belongs to an album. The following code temporarily # disables stdout to avoid give a cryptic and incorrect error. # Code for disabling stdout is from # http://coreygoldberg.blogspot.dk/2009/05/ # python-redirect-or-turn-off-stdout-and.html original_stdout = sys.stdout # keep a reference to STDOUT sys.stdout = NullDevice() # redirect the real STDOUT return self.get_gallery_image(id) # TODO: Add better error codes so I don't have to do a catch-all except Exception: return self.get_gallery_album(id) finally: sys.stdout = original_stdout # turn STDOUT back on if not self.is_imgur_url(url): return None objects = {'album': {'regex': "a/(?P<id>[\w.]*?)$", 'method': self.get_album}, 'comment': {'regex': "gallery/\w*/comment/(?P<id>[\w.]*?)$", 'method': self.get_comment}, 'gallery': {'regex': "(gallery|r/\w*?)/(?P<id>[\w.]*?)$", 'method': get_gallery_item}, # Valid image extensions: http://imgur.com/faq#types # All are between 3 and 4 chars long. 'image': {'regex': "(?P<id>[\w.]*?)(\\.\w{3,4})?$", 'method': self.get_image}, 'user': {'regex': "user/(?P<id>[\w.]*?)$", 'method': self.get_user} } parsed_url = urlparse(url) for obj_type, values in objects.items(): regex_result = re.match('/' + values['regex'], parsed_url.path) if regex_result is not None: obj_id = regex_result.group('id') initial_object = values['method'](obj_id) if obj_type == 'image': try: # A better version might be to ping the url where the # gallery_image should be with a requests.head call. If # we get a 200 returned, then that means it exists and # this becomes less hacky. original_stdout = sys.stdout sys.stdout = NullDevice() if getattr(initial_object, 'section', None): sub = initial_object.section return self.get_subreddit_image(sub, obj_id) return self.get_gallery_image(obj_id) except Exception: pass finally: sys.stdout = original_stdout return initial_object
python
def get_at_url(self, url): """ Return a object representing the content at url. Returns None if no object could be matched with the id. Works for Album, Comment, Gallery_album, Gallery_image, Image and User. NOTE: Imgur's documentation does not cover what urls are available. Some urls, such as imgur.com/<ID> can be for several different types of object. Using a wrong, but similair call, such as get_subreddit_image on a meme image will not cause an error. But instead return a subset of information, with either the remaining pieces missing or the value set to None. This makes it hard to create a method such as this that attempts to deduce the object from the url. Due to these factors, this method should be considered experimental and used carefully. :param url: The url where the content is located at """ class NullDevice(): def write(self, string): pass def get_gallery_item(id): """ Special helper method to get gallery items. The problem is that it's impossible to distinguish albums and images from each other based on the url. And there isn't a common url endpoints that return either a Gallery_album or a Gallery_image depending on what the id represents. So the only option is to assume it's a Gallery_image and if we get an exception then try Gallery_album. Gallery_image is attempted first because there is the most of them. """ try: # HACK: Problem is that send_request prints the error message # from Imgur when it encounters an error. This is nice because # this error message is more descriptive than just the status # code that Requests give. But since we first assume the id # belong to an image, it means we will get an error whenever # the id belongs to an album. The following code temporarily # disables stdout to avoid give a cryptic and incorrect error. # Code for disabling stdout is from # http://coreygoldberg.blogspot.dk/2009/05/ # python-redirect-or-turn-off-stdout-and.html original_stdout = sys.stdout # keep a reference to STDOUT sys.stdout = NullDevice() # redirect the real STDOUT return self.get_gallery_image(id) # TODO: Add better error codes so I don't have to do a catch-all except Exception: return self.get_gallery_album(id) finally: sys.stdout = original_stdout # turn STDOUT back on if not self.is_imgur_url(url): return None objects = {'album': {'regex': "a/(?P<id>[\w.]*?)$", 'method': self.get_album}, 'comment': {'regex': "gallery/\w*/comment/(?P<id>[\w.]*?)$", 'method': self.get_comment}, 'gallery': {'regex': "(gallery|r/\w*?)/(?P<id>[\w.]*?)$", 'method': get_gallery_item}, # Valid image extensions: http://imgur.com/faq#types # All are between 3 and 4 chars long. 'image': {'regex': "(?P<id>[\w.]*?)(\\.\w{3,4})?$", 'method': self.get_image}, 'user': {'regex': "user/(?P<id>[\w.]*?)$", 'method': self.get_user} } parsed_url = urlparse(url) for obj_type, values in objects.items(): regex_result = re.match('/' + values['regex'], parsed_url.path) if regex_result is not None: obj_id = regex_result.group('id') initial_object = values['method'](obj_id) if obj_type == 'image': try: # A better version might be to ping the url where the # gallery_image should be with a requests.head call. If # we get a 200 returned, then that means it exists and # this becomes less hacky. original_stdout = sys.stdout sys.stdout = NullDevice() if getattr(initial_object, 'section', None): sub = initial_object.section return self.get_subreddit_image(sub, obj_id) return self.get_gallery_image(obj_id) except Exception: pass finally: sys.stdout = original_stdout return initial_object
['def', 'get_at_url', '(', 'self', ',', 'url', ')', ':', 'class', 'NullDevice', '(', ')', ':', 'def', 'write', '(', 'self', ',', 'string', ')', ':', 'pass', 'def', 'get_gallery_item', '(', 'id', ')', ':', '"""\n Special helper method to get gallery items.\n\n The problem is that it\'s impossible to distinguish albums and\n images from each other based on the url. And there isn\'t a common\n url endpoints that return either a Gallery_album or a Gallery_image\n depending on what the id represents. So the only option is to\n assume it\'s a Gallery_image and if we get an exception then try\n Gallery_album. Gallery_image is attempted first because there is\n the most of them.\n """', 'try', ':', '# HACK: Problem is that send_request prints the error message', '# from Imgur when it encounters an error. This is nice because', '# this error message is more descriptive than just the status', '# code that Requests give. But since we first assume the id', '# belong to an image, it means we will get an error whenever', '# the id belongs to an album. The following code temporarily', '# disables stdout to avoid give a cryptic and incorrect error.', '# Code for disabling stdout is from', '# http://coreygoldberg.blogspot.dk/2009/05/', '# python-redirect-or-turn-off-stdout-and.html', 'original_stdout', '=', 'sys', '.', 'stdout', '# keep a reference to STDOUT', 'sys', '.', 'stdout', '=', 'NullDevice', '(', ')', '# redirect the real STDOUT', 'return', 'self', '.', 'get_gallery_image', '(', 'id', ')', "# TODO: Add better error codes so I don't have to do a catch-all", 'except', 'Exception', ':', 'return', 'self', '.', 'get_gallery_album', '(', 'id', ')', 'finally', ':', 'sys', '.', 'stdout', '=', 'original_stdout', '# turn STDOUT back on', 'if', 'not', 'self', '.', 'is_imgur_url', '(', 'url', ')', ':', 'return', 'None', 'objects', '=', '{', "'album'", ':', '{', "'regex'", ':', '"a/(?P<id>[\\w.]*?)$"', ',', "'method'", ':', 'self', '.', 'get_album', '}', ',', "'comment'", ':', '{', "'regex'", ':', '"gallery/\\w*/comment/(?P<id>[\\w.]*?)$"', ',', "'method'", ':', 'self', '.', 'get_comment', '}', ',', "'gallery'", ':', '{', "'regex'", ':', '"(gallery|r/\\w*?)/(?P<id>[\\w.]*?)$"', ',', "'method'", ':', 'get_gallery_item', '}', ',', '# Valid image extensions: http://imgur.com/faq#types', '# All are between 3 and 4 chars long.', "'image'", ':', '{', "'regex'", ':', '"(?P<id>[\\w.]*?)(\\\\.\\w{3,4})?$"', ',', "'method'", ':', 'self', '.', 'get_image', '}', ',', "'user'", ':', '{', "'regex'", ':', '"user/(?P<id>[\\w.]*?)$"', ',', "'method'", ':', 'self', '.', 'get_user', '}', '}', 'parsed_url', '=', 'urlparse', '(', 'url', ')', 'for', 'obj_type', ',', 'values', 'in', 'objects', '.', 'items', '(', ')', ':', 'regex_result', '=', 're', '.', 'match', '(', "'/'", '+', 'values', '[', "'regex'", ']', ',', 'parsed_url', '.', 'path', ')', 'if', 'regex_result', 'is', 'not', 'None', ':', 'obj_id', '=', 'regex_result', '.', 'group', '(', "'id'", ')', 'initial_object', '=', 'values', '[', "'method'", ']', '(', 'obj_id', ')', 'if', 'obj_type', '==', "'image'", ':', 'try', ':', '# A better version might be to ping the url where the', '# gallery_image should be with a requests.head call. If', '# we get a 200 returned, then that means it exists and', '# this becomes less hacky.', 'original_stdout', '=', 'sys', '.', 'stdout', 'sys', '.', 'stdout', '=', 'NullDevice', '(', ')', 'if', 'getattr', '(', 'initial_object', ',', "'section'", ',', 'None', ')', ':', 'sub', '=', 'initial_object', '.', 'section', 'return', 'self', '.', 'get_subreddit_image', '(', 'sub', ',', 'obj_id', ')', 'return', 'self', '.', 'get_gallery_image', '(', 'obj_id', ')', 'except', 'Exception', ':', 'pass', 'finally', ':', 'sys', '.', 'stdout', '=', 'original_stdout', 'return', 'initial_object']
Return a object representing the content at url. Returns None if no object could be matched with the id. Works for Album, Comment, Gallery_album, Gallery_image, Image and User. NOTE: Imgur's documentation does not cover what urls are available. Some urls, such as imgur.com/<ID> can be for several different types of object. Using a wrong, but similair call, such as get_subreddit_image on a meme image will not cause an error. But instead return a subset of information, with either the remaining pieces missing or the value set to None. This makes it hard to create a method such as this that attempts to deduce the object from the url. Due to these factors, this method should be considered experimental and used carefully. :param url: The url where the content is located at
['Return', 'a', 'object', 'representing', 'the', 'content', 'at', 'url', '.']
train
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L845-L939
4,884
OSSOS/MOP
src/ossos/core/ossos/util.py
get_pixel_bounds_from_datasec_keyword
def get_pixel_bounds_from_datasec_keyword(datasec): """ Return the x/y pixel boundaries of the data section. :param datasec: str e.g. '[33:2080,1:4612]' :return: ((xmin,xmax),(ymin,ymax)) """ datasec = re.findall(r'(\d+)', datasec) x1 = min(int(datasec[0]), int(datasec[1])) x2 = max(int(datasec[0]), int(datasec[1])) y1 = min(int(datasec[2]), int(datasec[3])) y2 = max(int(datasec[2]), int(datasec[3])) return (x1, x2), (y1, y2)
python
def get_pixel_bounds_from_datasec_keyword(datasec): """ Return the x/y pixel boundaries of the data section. :param datasec: str e.g. '[33:2080,1:4612]' :return: ((xmin,xmax),(ymin,ymax)) """ datasec = re.findall(r'(\d+)', datasec) x1 = min(int(datasec[0]), int(datasec[1])) x2 = max(int(datasec[0]), int(datasec[1])) y1 = min(int(datasec[2]), int(datasec[3])) y2 = max(int(datasec[2]), int(datasec[3])) return (x1, x2), (y1, y2)
['def', 'get_pixel_bounds_from_datasec_keyword', '(', 'datasec', ')', ':', 'datasec', '=', 're', '.', 'findall', '(', "r'(\\d+)'", ',', 'datasec', ')', 'x1', '=', 'min', '(', 'int', '(', 'datasec', '[', '0', ']', ')', ',', 'int', '(', 'datasec', '[', '1', ']', ')', ')', 'x2', '=', 'max', '(', 'int', '(', 'datasec', '[', '0', ']', ')', ',', 'int', '(', 'datasec', '[', '1', ']', ')', ')', 'y1', '=', 'min', '(', 'int', '(', 'datasec', '[', '2', ']', ')', ',', 'int', '(', 'datasec', '[', '3', ']', ')', ')', 'y2', '=', 'max', '(', 'int', '(', 'datasec', '[', '2', ']', ')', ',', 'int', '(', 'datasec', '[', '3', ']', ')', ')', 'return', '(', 'x1', ',', 'x2', ')', ',', '(', 'y1', ',', 'y2', ')']
Return the x/y pixel boundaries of the data section. :param datasec: str e.g. '[33:2080,1:4612]' :return: ((xmin,xmax),(ymin,ymax))
['Return', 'the', 'x', '/', 'y', 'pixel', 'boundaries', 'of', 'the', 'data', 'section', '.', ':', 'param', 'datasec', ':', 'str', 'e', '.', 'g', '.', '[', '33', ':', '2080', '1', ':', '4612', ']', ':', 'return', ':', '((', 'xmin', 'xmax', ')', '(', 'ymin', 'ymax', '))']
train
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/util.py#L133-L145
4,885
AtteqCom/zsl
src/zsl/utils/deploy/integrator.py
integrate_to_file
def integrate_to_file(what, filename, start_line, end_line): """WARNING this is working every second run.. so serious bug Integrate content into a file withing "line marks" """ try: with open(filename) as f: lines = f.readlines() except IOError: lines = [] tmp_file = tempfile.NamedTemporaryFile(delete=False) lines.reverse() # first copy before start line while lines: line = lines.pop() if line == start_line: break tmp_file.write(line) # insert content tmp_file.write(start_line) tmp_file.write(what) tmp_file.write(end_line) # skip until end line while lines: line = lines.pop() if line == end_line: break # copy rest tmp_file.writelines(lines) tmp_file.close() os.rename(tmp_file.name, filename)
python
def integrate_to_file(what, filename, start_line, end_line): """WARNING this is working every second run.. so serious bug Integrate content into a file withing "line marks" """ try: with open(filename) as f: lines = f.readlines() except IOError: lines = [] tmp_file = tempfile.NamedTemporaryFile(delete=False) lines.reverse() # first copy before start line while lines: line = lines.pop() if line == start_line: break tmp_file.write(line) # insert content tmp_file.write(start_line) tmp_file.write(what) tmp_file.write(end_line) # skip until end line while lines: line = lines.pop() if line == end_line: break # copy rest tmp_file.writelines(lines) tmp_file.close() os.rename(tmp_file.name, filename)
['def', 'integrate_to_file', '(', 'what', ',', 'filename', ',', 'start_line', ',', 'end_line', ')', ':', 'try', ':', 'with', 'open', '(', 'filename', ')', 'as', 'f', ':', 'lines', '=', 'f', '.', 'readlines', '(', ')', 'except', 'IOError', ':', 'lines', '=', '[', ']', 'tmp_file', '=', 'tempfile', '.', 'NamedTemporaryFile', '(', 'delete', '=', 'False', ')', 'lines', '.', 'reverse', '(', ')', '# first copy before start line', 'while', 'lines', ':', 'line', '=', 'lines', '.', 'pop', '(', ')', 'if', 'line', '==', 'start_line', ':', 'break', 'tmp_file', '.', 'write', '(', 'line', ')', '# insert content', 'tmp_file', '.', 'write', '(', 'start_line', ')', 'tmp_file', '.', 'write', '(', 'what', ')', 'tmp_file', '.', 'write', '(', 'end_line', ')', '# skip until end line', 'while', 'lines', ':', 'line', '=', 'lines', '.', 'pop', '(', ')', 'if', 'line', '==', 'end_line', ':', 'break', '# copy rest', 'tmp_file', '.', 'writelines', '(', 'lines', ')', 'tmp_file', '.', 'close', '(', ')', 'os', '.', 'rename', '(', 'tmp_file', '.', 'name', ',', 'filename', ')']
WARNING this is working every second run.. so serious bug Integrate content into a file withing "line marks"
['WARNING', 'this', 'is', 'working', 'every', 'second', 'run', '..', 'so', 'serious', 'bug', 'Integrate', 'content', 'into', 'a', 'file', 'withing', 'line', 'marks']
train
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/deploy/integrator.py#L12-L52
4,886
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
zeros
def zeros(shape, ctx=None, dtype=None, **kwargs): """Returns a new array filled with all zeros, with the given shape and type. Parameters ---------- shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A created array Examples -------- >>> mx.nd.zeros(1).asnumpy() array([ 0.], dtype=float32) >>> mx.nd.zeros((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy() array([[ 0., 0.]], dtype=float16) """ # pylint: disable= unused-argument if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype # pylint: disable= no-member, protected-access return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
python
def zeros(shape, ctx=None, dtype=None, **kwargs): """Returns a new array filled with all zeros, with the given shape and type. Parameters ---------- shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A created array Examples -------- >>> mx.nd.zeros(1).asnumpy() array([ 0.], dtype=float32) >>> mx.nd.zeros((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy() array([[ 0., 0.]], dtype=float16) """ # pylint: disable= unused-argument if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype # pylint: disable= no-member, protected-access return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
['def', 'zeros', '(', 'shape', ',', 'ctx', '=', 'None', ',', 'dtype', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# pylint: disable= unused-argument', 'if', 'ctx', 'is', 'None', ':', 'ctx', '=', 'current_context', '(', ')', 'dtype', '=', 'mx_real_t', 'if', 'dtype', 'is', 'None', 'else', 'dtype', '# pylint: disable= no-member, protected-access', 'return', '_internal', '.', '_zeros', '(', 'shape', '=', 'shape', ',', 'ctx', '=', 'ctx', ',', 'dtype', '=', 'dtype', ',', '*', '*', 'kwargs', ')']
Returns a new array filled with all zeros, with the given shape and type. Parameters ---------- shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A created array Examples -------- >>> mx.nd.zeros(1).asnumpy() array([ 0.], dtype=float32) >>> mx.nd.zeros((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy() array([[ 0., 0.]], dtype=float16)
['Returns', 'a', 'new', 'array', 'filled', 'with', 'all', 'zeros', 'with', 'the', 'given', 'shape', 'and', 'type', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3805-L3838
4,887
StackStorm/pybind
pybind/slxos/v17r_1_01a/__init__.py
brocade_ssm_operational._set_vxlan_acl_state
def _set_vxlan_acl_state(self, v, load=False): """ Setter method for vxlan_acl_state, mapped from YANG variable /vxlan_acl_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_vxlan_acl_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vxlan_acl_state() directly. YANG Description: Vxlan ACL information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vxlan_acl_state.vxlan_acl_state, is_container='container', presence=False, yang_name="vxlan-acl-state", rest_name="vxlan-acl-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-vxlan-acl', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vxlan_acl_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vxlan_acl_state.vxlan_acl_state, is_container='container', presence=False, yang_name="vxlan-acl-state", rest_name="vxlan-acl-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-vxlan-acl', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)""", }) self.__vxlan_acl_state = t if hasattr(self, '_set'): self._set()
python
def _set_vxlan_acl_state(self, v, load=False): """ Setter method for vxlan_acl_state, mapped from YANG variable /vxlan_acl_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_vxlan_acl_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vxlan_acl_state() directly. YANG Description: Vxlan ACL information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vxlan_acl_state.vxlan_acl_state, is_container='container', presence=False, yang_name="vxlan-acl-state", rest_name="vxlan-acl-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-vxlan-acl', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vxlan_acl_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vxlan_acl_state.vxlan_acl_state, is_container='container', presence=False, yang_name="vxlan-acl-state", rest_name="vxlan-acl-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-vxlan-acl', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)""", }) self.__vxlan_acl_state = t if hasattr(self, '_set'): self._set()
['def', '_set_vxlan_acl_state', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'vxlan_acl_state', '.', 'vxlan_acl_state', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"vxlan-acl-state"', ',', 'rest_name', '=', '"vxlan-acl-state"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'callpoint'", ':', "u'ssm-vxlan-acl'", ',', "u'cli-suppress-show-path'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-ssm-operational'", ',', 'defining_module', '=', "'brocade-ssm-operational'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""vxlan_acl_state must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=vxlan_acl_state.vxlan_acl_state, is_container=\'container\', presence=False, yang_name="vxlan-acl-state", rest_name="vxlan-acl-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'ssm-vxlan-acl\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-ssm-operational\', defining_module=\'brocade-ssm-operational\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__vxlan_acl_state', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for vxlan_acl_state, mapped from YANG variable /vxlan_acl_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_vxlan_acl_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vxlan_acl_state() directly. YANG Description: Vxlan ACL information
['Setter', 'method', 'for', 'vxlan_acl_state', 'mapped', 'from', 'YANG', 'variable', '/', 'vxlan_acl_state', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_vxlan_acl_state', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_vxlan_acl_state', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/__init__.py#L4405-L4428
4,888
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
insrtc
def insrtc(item, inset): """ Insert an item into a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/insrtc_c.html :param item: Item to be inserted. :type item: str or list of str :param inset: Insertion set. :type inset: spiceypy.utils.support_types.SpiceCell """ assert isinstance(inset, stypes.SpiceCell) if isinstance(item, list): for c in item: libspice.insrtc_c(stypes.stringToCharP(c), ctypes.byref(inset)) else: item = stypes.stringToCharP(item) libspice.insrtc_c(item, ctypes.byref(inset))
python
def insrtc(item, inset): """ Insert an item into a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/insrtc_c.html :param item: Item to be inserted. :type item: str or list of str :param inset: Insertion set. :type inset: spiceypy.utils.support_types.SpiceCell """ assert isinstance(inset, stypes.SpiceCell) if isinstance(item, list): for c in item: libspice.insrtc_c(stypes.stringToCharP(c), ctypes.byref(inset)) else: item = stypes.stringToCharP(item) libspice.insrtc_c(item, ctypes.byref(inset))
['def', 'insrtc', '(', 'item', ',', 'inset', ')', ':', 'assert', 'isinstance', '(', 'inset', ',', 'stypes', '.', 'SpiceCell', ')', 'if', 'isinstance', '(', 'item', ',', 'list', ')', ':', 'for', 'c', 'in', 'item', ':', 'libspice', '.', 'insrtc_c', '(', 'stypes', '.', 'stringToCharP', '(', 'c', ')', ',', 'ctypes', '.', 'byref', '(', 'inset', ')', ')', 'else', ':', 'item', '=', 'stypes', '.', 'stringToCharP', '(', 'item', ')', 'libspice', '.', 'insrtc_c', '(', 'item', ',', 'ctypes', '.', 'byref', '(', 'inset', ')', ')']
Insert an item into a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/insrtc_c.html :param item: Item to be inserted. :type item: str or list of str :param inset: Insertion set. :type inset: spiceypy.utils.support_types.SpiceCell
['Insert', 'an', 'item', 'into', 'a', 'character', 'set', '.']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L7146-L7163
4,889
Robpol86/libnl
libnl/nl.py
nl_recvmsgs
def nl_recvmsgs(sk, cb): """Receive a set of messages from a Netlink socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L1023 Repeatedly calls nl_recv() or the respective replacement if provided by the application (see nl_cb_overwrite_recv()) and parses the received data as Netlink messages. Stops reading if one of the callbacks returns NL_STOP or nl_recv returns either 0 or a negative error code. A non-blocking sockets causes the function to return immediately if no data is available. See nl_recvmsgs_report(). Positional arguments: sk -- Netlink socket (nl_sock class instance). cb -- set of callbacks to control behaviour (nl_cb class instance). Returns: 0 on success or a negative error code from nl_recv(). """ err = nl_recvmsgs_report(sk, cb) if err > 0: return 0 return int(err)
python
def nl_recvmsgs(sk, cb): """Receive a set of messages from a Netlink socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L1023 Repeatedly calls nl_recv() or the respective replacement if provided by the application (see nl_cb_overwrite_recv()) and parses the received data as Netlink messages. Stops reading if one of the callbacks returns NL_STOP or nl_recv returns either 0 or a negative error code. A non-blocking sockets causes the function to return immediately if no data is available. See nl_recvmsgs_report(). Positional arguments: sk -- Netlink socket (nl_sock class instance). cb -- set of callbacks to control behaviour (nl_cb class instance). Returns: 0 on success or a negative error code from nl_recv(). """ err = nl_recvmsgs_report(sk, cb) if err > 0: return 0 return int(err)
['def', 'nl_recvmsgs', '(', 'sk', ',', 'cb', ')', ':', 'err', '=', 'nl_recvmsgs_report', '(', 'sk', ',', 'cb', ')', 'if', 'err', '>', '0', ':', 'return', '0', 'return', 'int', '(', 'err', ')']
Receive a set of messages from a Netlink socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L1023 Repeatedly calls nl_recv() or the respective replacement if provided by the application (see nl_cb_overwrite_recv()) and parses the received data as Netlink messages. Stops reading if one of the callbacks returns NL_STOP or nl_recv returns either 0 or a negative error code. A non-blocking sockets causes the function to return immediately if no data is available. See nl_recvmsgs_report(). Positional arguments: sk -- Netlink socket (nl_sock class instance). cb -- set of callbacks to control behaviour (nl_cb class instance). Returns: 0 on success or a negative error code from nl_recv().
['Receive', 'a', 'set', 'of', 'messages', 'from', 'a', 'Netlink', 'socket', '.']
train
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl.py#L646-L669
4,890
has2k1/plotnine
plotnine/guides/guides.py
guides.create_geoms
def create_geoms(self, gdefs, plot): """ Add geoms to the guide definitions """ new_gdefs = [] for gdef in gdefs: gdef = gdef.create_geoms(plot) if gdef: new_gdefs.append(gdef) return new_gdefs
python
def create_geoms(self, gdefs, plot): """ Add geoms to the guide definitions """ new_gdefs = [] for gdef in gdefs: gdef = gdef.create_geoms(plot) if gdef: new_gdefs.append(gdef) return new_gdefs
['def', 'create_geoms', '(', 'self', ',', 'gdefs', ',', 'plot', ')', ':', 'new_gdefs', '=', '[', ']', 'for', 'gdef', 'in', 'gdefs', ':', 'gdef', '=', 'gdef', '.', 'create_geoms', '(', 'plot', ')', 'if', 'gdef', ':', 'new_gdefs', '.', 'append', '(', 'gdef', ')', 'return', 'new_gdefs']
Add geoms to the guide definitions
['Add', 'geoms', 'to', 'the', 'guide', 'definitions']
train
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guides.py#L255-L265
4,891
RudolfCardinal/pythonlib
cardinal_pythonlib/debugging.py
get_class_name_from_frame
def get_class_name_from_frame(fr: FrameType) -> Optional[str]: """ A frame contains information about a specific call in the Python call stack; see https://docs.python.org/3/library/inspect.html. If the call was to a member function of a class, this function attempts to read the class's name. It returns ``None`` otherwise. """ # http://stackoverflow.com/questions/2203424/python-how-to-retrieve-class-information-from-a-frame-object # noqa args, _, _, value_dict = inspect.getargvalues(fr) # we check the first parameter for the frame function is named 'self' if len(args) and args[0] == 'self': # in that case, 'self' will be referenced in value_dict instance = value_dict.get('self', None) if instance: # return its class cls = getattr(instance, '__class__', None) if cls: return cls.__name__ return None # return None otherwise return None
python
def get_class_name_from_frame(fr: FrameType) -> Optional[str]: """ A frame contains information about a specific call in the Python call stack; see https://docs.python.org/3/library/inspect.html. If the call was to a member function of a class, this function attempts to read the class's name. It returns ``None`` otherwise. """ # http://stackoverflow.com/questions/2203424/python-how-to-retrieve-class-information-from-a-frame-object # noqa args, _, _, value_dict = inspect.getargvalues(fr) # we check the first parameter for the frame function is named 'self' if len(args) and args[0] == 'self': # in that case, 'self' will be referenced in value_dict instance = value_dict.get('self', None) if instance: # return its class cls = getattr(instance, '__class__', None) if cls: return cls.__name__ return None # return None otherwise return None
['def', 'get_class_name_from_frame', '(', 'fr', ':', 'FrameType', ')', '->', 'Optional', '[', 'str', ']', ':', '# http://stackoverflow.com/questions/2203424/python-how-to-retrieve-class-information-from-a-frame-object # noqa', 'args', ',', '_', ',', '_', ',', 'value_dict', '=', 'inspect', '.', 'getargvalues', '(', 'fr', ')', "# we check the first parameter for the frame function is named 'self'", 'if', 'len', '(', 'args', ')', 'and', 'args', '[', '0', ']', '==', "'self'", ':', "# in that case, 'self' will be referenced in value_dict", 'instance', '=', 'value_dict', '.', 'get', '(', "'self'", ',', 'None', ')', 'if', 'instance', ':', '# return its class', 'cls', '=', 'getattr', '(', 'instance', ',', "'__class__'", ',', 'None', ')', 'if', 'cls', ':', 'return', 'cls', '.', '__name__', 'return', 'None', '# return None otherwise', 'return', 'None']
A frame contains information about a specific call in the Python call stack; see https://docs.python.org/3/library/inspect.html. If the call was to a member function of a class, this function attempts to read the class's name. It returns ``None`` otherwise.
['A', 'frame', 'contains', 'information', 'about', 'a', 'specific', 'call', 'in', 'the', 'Python', 'call', 'stack', ';', 'see', 'https', ':', '//', 'docs', '.', 'python', '.', 'org', '/', '3', '/', 'library', '/', 'inspect', '.', 'html', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/debugging.py#L74-L95
4,892
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
DFARESTClient._create_org
def _create_org(self, orch_id, name, desc): """Create organization on the DCNM. :param orch_id: orchestrator ID :param name: Name of organization :param desc: Description of organization """ url = self._org_url payload = { "organizationName": name, "description": name if len(desc) == 0 else desc, "orchestrationSource": orch_id} return self._send_request('POST', url, payload, 'organization')
python
def _create_org(self, orch_id, name, desc): """Create organization on the DCNM. :param orch_id: orchestrator ID :param name: Name of organization :param desc: Description of organization """ url = self._org_url payload = { "organizationName": name, "description": name if len(desc) == 0 else desc, "orchestrationSource": orch_id} return self._send_request('POST', url, payload, 'organization')
['def', '_create_org', '(', 'self', ',', 'orch_id', ',', 'name', ',', 'desc', ')', ':', 'url', '=', 'self', '.', '_org_url', 'payload', '=', '{', '"organizationName"', ':', 'name', ',', '"description"', ':', 'name', 'if', 'len', '(', 'desc', ')', '==', '0', 'else', 'desc', ',', '"orchestrationSource"', ':', 'orch_id', '}', 'return', 'self', '.', '_send_request', '(', "'POST'", ',', 'url', ',', 'payload', ',', "'organization'", ')']
Create organization on the DCNM. :param orch_id: orchestrator ID :param name: Name of organization :param desc: Description of organization
['Create', 'organization', 'on', 'the', 'DCNM', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L208-L221
4,893
numenta/htmresearch
htmresearch/algorithms/TM.py
TM.adaptSegment
def adaptSegment(self, segUpdate, positiveReinforcement): """This function applies segment update information to a segment in a cell. If positiveReinforcement is true then synapses on the active list get their permanence counts incremented by permanenceInc. All other synapses get their permanence counts decremented by permanenceDec. If positiveReinforcement is false, then synapses on the active list get their permanence counts decremented by permanenceDec. After this step, any synapses in segmentUpdate that do yet exist get added with a permanence count of initialPerm. Parameters: ----------------------------------------------------------- segUpdate: SegmentUpdate instance positiveReinforcement: True for positive enforcement, false for negative re-enforcement retval: True if some synapses were decremented to 0 and the segment is a candidate for trimming """ # This will be set to True if detect that any syapses were decremented to # 0 trimSegment = False # segUpdate.segment is None when creating a new segment c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment # update.activeSynapses can be empty. # If not, it can contain either or both integers and tuples. # The integers are indices of synapses to update. # The tuples represent new synapses to create (src col, src cell in col). # We pre-process to separate these various element types. # synToCreate is not empty only if positiveReinforcement is True. # NOTE: the synapse indices start at *1* to skip the segment flags. activeSynapses = segUpdate.activeSynapses synToUpdate = set([syn for syn in activeSynapses if type(syn) == int]) if segment is not None: # modify an existing segment if positiveReinforcement: if self.verbosity >= 4: print "Reinforcing segment for cell[%d,%d]" %(c,i), segment.printSegment() # Update frequency and positiveActivations segment.positiveActivations += 1 # positiveActivations += 1 segment.dutyCycle(active=True) # First, decrement synapses that are not active # s is a synapse *index*, with index 0 in the segment being the tuple # (segId, sequence segment flag). See below, creation of segments. lastSynIndex = len(segment.syns) - 1 inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \ if s not in synToUpdate] trimSegment = segment.updateSynapses(inactiveSynIndices, -self.permanenceDec) # Now, increment active synapses activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex] segment.updateSynapses(activeSynIndices, self.permanenceInc) # Finally, create new synapses if needed # syn is now a tuple (src col, src cell) synsToAdd = [syn for syn in activeSynapses if type(syn) != int] for newSyn in synsToAdd: segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm) if self.verbosity >= 4: print " after", segment.printSegment() else: # positiveReinforcement is False desc = "" if self.verbosity >= 4: print "Negatively Reinforcing %s segment for cell[%d,%d]" \ % (desc, c,i), segment.printSegment() # Decrease frequency count segment.dutyCycle(active=True) # We decrement all the "active" that were passed in trimSegment = segment.updateSynapses(synToUpdate, -self.permanenceDec) if self.verbosity >= 4: print " after", segment.printSegment() else: # segment is None: create a new segment newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment) # numpy.float32 important so that we can match with C++ for synapse in activeSynapses: newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm) if self.verbosity >= 3: print "New segment for cell[%d,%d]" %(c,i), newSegment.printSegment() self.cells[c][i].append(newSegment) return trimSegment
python
def adaptSegment(self, segUpdate, positiveReinforcement): """This function applies segment update information to a segment in a cell. If positiveReinforcement is true then synapses on the active list get their permanence counts incremented by permanenceInc. All other synapses get their permanence counts decremented by permanenceDec. If positiveReinforcement is false, then synapses on the active list get their permanence counts decremented by permanenceDec. After this step, any synapses in segmentUpdate that do yet exist get added with a permanence count of initialPerm. Parameters: ----------------------------------------------------------- segUpdate: SegmentUpdate instance positiveReinforcement: True for positive enforcement, false for negative re-enforcement retval: True if some synapses were decremented to 0 and the segment is a candidate for trimming """ # This will be set to True if detect that any syapses were decremented to # 0 trimSegment = False # segUpdate.segment is None when creating a new segment c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment # update.activeSynapses can be empty. # If not, it can contain either or both integers and tuples. # The integers are indices of synapses to update. # The tuples represent new synapses to create (src col, src cell in col). # We pre-process to separate these various element types. # synToCreate is not empty only if positiveReinforcement is True. # NOTE: the synapse indices start at *1* to skip the segment flags. activeSynapses = segUpdate.activeSynapses synToUpdate = set([syn for syn in activeSynapses if type(syn) == int]) if segment is not None: # modify an existing segment if positiveReinforcement: if self.verbosity >= 4: print "Reinforcing segment for cell[%d,%d]" %(c,i), segment.printSegment() # Update frequency and positiveActivations segment.positiveActivations += 1 # positiveActivations += 1 segment.dutyCycle(active=True) # First, decrement synapses that are not active # s is a synapse *index*, with index 0 in the segment being the tuple # (segId, sequence segment flag). See below, creation of segments. lastSynIndex = len(segment.syns) - 1 inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \ if s not in synToUpdate] trimSegment = segment.updateSynapses(inactiveSynIndices, -self.permanenceDec) # Now, increment active synapses activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex] segment.updateSynapses(activeSynIndices, self.permanenceInc) # Finally, create new synapses if needed # syn is now a tuple (src col, src cell) synsToAdd = [syn for syn in activeSynapses if type(syn) != int] for newSyn in synsToAdd: segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm) if self.verbosity >= 4: print " after", segment.printSegment() else: # positiveReinforcement is False desc = "" if self.verbosity >= 4: print "Negatively Reinforcing %s segment for cell[%d,%d]" \ % (desc, c,i), segment.printSegment() # Decrease frequency count segment.dutyCycle(active=True) # We decrement all the "active" that were passed in trimSegment = segment.updateSynapses(synToUpdate, -self.permanenceDec) if self.verbosity >= 4: print " after", segment.printSegment() else: # segment is None: create a new segment newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment) # numpy.float32 important so that we can match with C++ for synapse in activeSynapses: newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm) if self.verbosity >= 3: print "New segment for cell[%d,%d]" %(c,i), newSegment.printSegment() self.cells[c][i].append(newSegment) return trimSegment
['def', 'adaptSegment', '(', 'self', ',', 'segUpdate', ',', 'positiveReinforcement', ')', ':', '# This will be set to True if detect that any syapses were decremented to', '# 0', 'trimSegment', '=', 'False', '# segUpdate.segment is None when creating a new segment', 'c', ',', 'i', ',', 'segment', '=', 'segUpdate', '.', 'columnIdx', ',', 'segUpdate', '.', 'cellIdx', ',', 'segUpdate', '.', 'segment', '# update.activeSynapses can be empty.', '# If not, it can contain either or both integers and tuples.', '# The integers are indices of synapses to update.', '# The tuples represent new synapses to create (src col, src cell in col).', '# We pre-process to separate these various element types.', '# synToCreate is not empty only if positiveReinforcement is True.', '# NOTE: the synapse indices start at *1* to skip the segment flags.', 'activeSynapses', '=', 'segUpdate', '.', 'activeSynapses', 'synToUpdate', '=', 'set', '(', '[', 'syn', 'for', 'syn', 'in', 'activeSynapses', 'if', 'type', '(', 'syn', ')', '==', 'int', ']', ')', 'if', 'segment', 'is', 'not', 'None', ':', '# modify an existing segment', 'if', 'positiveReinforcement', ':', 'if', 'self', '.', 'verbosity', '>=', '4', ':', 'print', '"Reinforcing segment for cell[%d,%d]"', '%', '(', 'c', ',', 'i', ')', ',', 'segment', '.', 'printSegment', '(', ')', '# Update frequency and positiveActivations', 'segment', '.', 'positiveActivations', '+=', '1', '# positiveActivations += 1', 'segment', '.', 'dutyCycle', '(', 'active', '=', 'True', ')', '# First, decrement synapses that are not active', '# s is a synapse *index*, with index 0 in the segment being the tuple', '# (segId, sequence segment flag). See below, creation of segments.', 'lastSynIndex', '=', 'len', '(', 'segment', '.', 'syns', ')', '-', '1', 'inactiveSynIndices', '=', '[', 's', 'for', 's', 'in', 'xrange', '(', '0', ',', 'lastSynIndex', '+', '1', ')', 'if', 's', 'not', 'in', 'synToUpdate', ']', 'trimSegment', '=', 'segment', '.', 'updateSynapses', '(', 'inactiveSynIndices', ',', '-', 'self', '.', 'permanenceDec', ')', '# Now, increment active synapses', 'activeSynIndices', '=', '[', 'syn', 'for', 'syn', 'in', 'synToUpdate', 'if', 'syn', '<=', 'lastSynIndex', ']', 'segment', '.', 'updateSynapses', '(', 'activeSynIndices', ',', 'self', '.', 'permanenceInc', ')', '# Finally, create new synapses if needed', '# syn is now a tuple (src col, src cell)', 'synsToAdd', '=', '[', 'syn', 'for', 'syn', 'in', 'activeSynapses', 'if', 'type', '(', 'syn', ')', '!=', 'int', ']', 'for', 'newSyn', 'in', 'synsToAdd', ':', 'segment', '.', 'addSynapse', '(', 'newSyn', '[', '0', ']', ',', 'newSyn', '[', '1', ']', ',', 'self', '.', 'initialPerm', ')', 'if', 'self', '.', 'verbosity', '>=', '4', ':', 'print', '" after"', ',', 'segment', '.', 'printSegment', '(', ')', 'else', ':', '# positiveReinforcement is False', 'desc', '=', '""', 'if', 'self', '.', 'verbosity', '>=', '4', ':', 'print', '"Negatively Reinforcing %s segment for cell[%d,%d]"', '%', '(', 'desc', ',', 'c', ',', 'i', ')', ',', 'segment', '.', 'printSegment', '(', ')', '# Decrease frequency count', 'segment', '.', 'dutyCycle', '(', 'active', '=', 'True', ')', '# We decrement all the "active" that were passed in', 'trimSegment', '=', 'segment', '.', 'updateSynapses', '(', 'synToUpdate', ',', '-', 'self', '.', 'permanenceDec', ')', 'if', 'self', '.', 'verbosity', '>=', '4', ':', 'print', '" after"', ',', 'segment', '.', 'printSegment', '(', ')', 'else', ':', '# segment is None: create a new segment', 'newSegment', '=', 'Segment', '(', 'tp', '=', 'self', ',', 'isSequenceSeg', '=', 'segUpdate', '.', 'sequenceSegment', ')', '# numpy.float32 important so that we can match with C++', 'for', 'synapse', 'in', 'activeSynapses', ':', 'newSegment', '.', 'addSynapse', '(', 'synapse', '[', '0', ']', ',', 'synapse', '[', '1', ']', ',', 'self', '.', 'initialPerm', ')', 'if', 'self', '.', 'verbosity', '>=', '3', ':', 'print', '"New segment for cell[%d,%d]"', '%', '(', 'c', ',', 'i', ')', ',', 'newSegment', '.', 'printSegment', '(', ')', 'self', '.', 'cells', '[', 'c', ']', '[', 'i', ']', '.', 'append', '(', 'newSegment', ')', 'return', 'trimSegment']
This function applies segment update information to a segment in a cell. If positiveReinforcement is true then synapses on the active list get their permanence counts incremented by permanenceInc. All other synapses get their permanence counts decremented by permanenceDec. If positiveReinforcement is false, then synapses on the active list get their permanence counts decremented by permanenceDec. After this step, any synapses in segmentUpdate that do yet exist get added with a permanence count of initialPerm. Parameters: ----------------------------------------------------------- segUpdate: SegmentUpdate instance positiveReinforcement: True for positive enforcement, false for negative re-enforcement retval: True if some synapses were decremented to 0 and the segment is a candidate for trimming
['This', 'function', 'applies', 'segment', 'update', 'information', 'to', 'a', 'segment', 'in', 'a', 'cell', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/TM.py#L1988-L2100
4,894
rsalmei/clearly
clearly/server.py
ClearlyServer.find_task
def find_task(self, request, context): """Finds one specific task.""" _log_request(request, context) task = self.listener.memory.tasks.get(request.task_uuid) if not task: return clearly_pb2.TaskMessage() return ClearlyServer._event_to_pb(task)[1]
python
def find_task(self, request, context): """Finds one specific task.""" _log_request(request, context) task = self.listener.memory.tasks.get(request.task_uuid) if not task: return clearly_pb2.TaskMessage() return ClearlyServer._event_to_pb(task)[1]
['def', 'find_task', '(', 'self', ',', 'request', ',', 'context', ')', ':', '_log_request', '(', 'request', ',', 'context', ')', 'task', '=', 'self', '.', 'listener', '.', 'memory', '.', 'tasks', '.', 'get', '(', 'request', '.', 'task_uuid', ')', 'if', 'not', 'task', ':', 'return', 'clearly_pb2', '.', 'TaskMessage', '(', ')', 'return', 'ClearlyServer', '.', '_event_to_pb', '(', 'task', ')', '[', '1', ']']
Finds one specific task.
['Finds', 'one', 'specific', 'task', '.']
train
https://github.com/rsalmei/clearly/blob/fd784843d13f0fed28fc192565bec3668f1363f4/clearly/server.py#L148-L154
4,895
greenelab/PathCORE-T
pathcore/feature_pathway_overrepresentation.py
_significant_pathways_dataframe
def _significant_pathways_dataframe(pvalue_information, side_information, alpha): """Create the significant pathways pandas.DataFrame. Given the p-values corresponding to each pathway in a feature, apply the FDR correction for multiple testing and remove those that do not have a q-value of less than `alpha`. """ significant_pathways = pd.concat( [pvalue_information, side_information], axis=1) # fdr_bh: false discovery rate, Benjamini & Hochberg (1995, 2000) below_alpha, qvalues, _, _ = multipletests( significant_pathways["p-value"], alpha=alpha, method="fdr_bh") below_alpha = pd.Series( below_alpha, index=pvalue_information.index, name="pass") qvalues = pd.Series( qvalues, index=pvalue_information.index, name="q-value") significant_pathways = pd.concat( [significant_pathways, below_alpha, qvalues], axis=1) significant_pathways = significant_pathways[significant_pathways["pass"]] significant_pathways.drop("pass", axis=1, inplace=True) significant_pathways.loc[:, "pathway"] = significant_pathways.index return significant_pathways
python
def _significant_pathways_dataframe(pvalue_information, side_information, alpha): """Create the significant pathways pandas.DataFrame. Given the p-values corresponding to each pathway in a feature, apply the FDR correction for multiple testing and remove those that do not have a q-value of less than `alpha`. """ significant_pathways = pd.concat( [pvalue_information, side_information], axis=1) # fdr_bh: false discovery rate, Benjamini & Hochberg (1995, 2000) below_alpha, qvalues, _, _ = multipletests( significant_pathways["p-value"], alpha=alpha, method="fdr_bh") below_alpha = pd.Series( below_alpha, index=pvalue_information.index, name="pass") qvalues = pd.Series( qvalues, index=pvalue_information.index, name="q-value") significant_pathways = pd.concat( [significant_pathways, below_alpha, qvalues], axis=1) significant_pathways = significant_pathways[significant_pathways["pass"]] significant_pathways.drop("pass", axis=1, inplace=True) significant_pathways.loc[:, "pathway"] = significant_pathways.index return significant_pathways
['def', '_significant_pathways_dataframe', '(', 'pvalue_information', ',', 'side_information', ',', 'alpha', ')', ':', 'significant_pathways', '=', 'pd', '.', 'concat', '(', '[', 'pvalue_information', ',', 'side_information', ']', ',', 'axis', '=', '1', ')', '# fdr_bh: false discovery rate, Benjamini & Hochberg (1995, 2000)', 'below_alpha', ',', 'qvalues', ',', '_', ',', '_', '=', 'multipletests', '(', 'significant_pathways', '[', '"p-value"', ']', ',', 'alpha', '=', 'alpha', ',', 'method', '=', '"fdr_bh"', ')', 'below_alpha', '=', 'pd', '.', 'Series', '(', 'below_alpha', ',', 'index', '=', 'pvalue_information', '.', 'index', ',', 'name', '=', '"pass"', ')', 'qvalues', '=', 'pd', '.', 'Series', '(', 'qvalues', ',', 'index', '=', 'pvalue_information', '.', 'index', ',', 'name', '=', '"q-value"', ')', 'significant_pathways', '=', 'pd', '.', 'concat', '(', '[', 'significant_pathways', ',', 'below_alpha', ',', 'qvalues', ']', ',', 'axis', '=', '1', ')', 'significant_pathways', '=', 'significant_pathways', '[', 'significant_pathways', '[', '"pass"', ']', ']', 'significant_pathways', '.', 'drop', '(', '"pass"', ',', 'axis', '=', '1', ',', 'inplace', '=', 'True', ')', 'significant_pathways', '.', 'loc', '[', ':', ',', '"pathway"', ']', '=', 'significant_pathways', '.', 'index', 'return', 'significant_pathways']
Create the significant pathways pandas.DataFrame. Given the p-values corresponding to each pathway in a feature, apply the FDR correction for multiple testing and remove those that do not have a q-value of less than `alpha`.
['Create', 'the', 'significant', 'pathways', 'pandas', '.', 'DataFrame', '.', 'Given', 'the', 'p', '-', 'values', 'corresponding', 'to', 'each', 'pathway', 'in', 'a', 'feature', 'apply', 'the', 'FDR', 'correction', 'for', 'multiple', 'testing', 'and', 'remove', 'those', 'that', 'do', 'not', 'have', 'a', 'q', '-', 'value', 'of', 'less', 'than', 'alpha', '.']
train
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/feature_pathway_overrepresentation.py#L129-L151
4,896
ConsenSys/mythril-classic
mythril/laser/ethereum/plugins/implementations/mutation_pruner.py
MutationPruner.initialize
def initialize(self, symbolic_vm: LaserEVM): """Initializes the mutation pruner Introduces hooks for SSTORE operations :param symbolic_vm: :return: """ @symbolic_vm.pre_hook("SSTORE") def mutator_hook(global_state: GlobalState): global_state.annotate(MutationAnnotation()) @symbolic_vm.laser_hook("add_world_state") def world_state_filter_hook(global_state: GlobalState): if And( *global_state.mstate.constraints[:] + [ global_state.environment.callvalue > symbol_factory.BitVecVal(0, 256) ] ).is_false: return if isinstance( global_state.current_transaction, ContractCreationTransaction ): return if len(list(global_state.get_annotations(MutationAnnotation))) == 0: raise PluginSkipWorldState
python
def initialize(self, symbolic_vm: LaserEVM): """Initializes the mutation pruner Introduces hooks for SSTORE operations :param symbolic_vm: :return: """ @symbolic_vm.pre_hook("SSTORE") def mutator_hook(global_state: GlobalState): global_state.annotate(MutationAnnotation()) @symbolic_vm.laser_hook("add_world_state") def world_state_filter_hook(global_state: GlobalState): if And( *global_state.mstate.constraints[:] + [ global_state.environment.callvalue > symbol_factory.BitVecVal(0, 256) ] ).is_false: return if isinstance( global_state.current_transaction, ContractCreationTransaction ): return if len(list(global_state.get_annotations(MutationAnnotation))) == 0: raise PluginSkipWorldState
['def', 'initialize', '(', 'self', ',', 'symbolic_vm', ':', 'LaserEVM', ')', ':', '@', 'symbolic_vm', '.', 'pre_hook', '(', '"SSTORE"', ')', 'def', 'mutator_hook', '(', 'global_state', ':', 'GlobalState', ')', ':', 'global_state', '.', 'annotate', '(', 'MutationAnnotation', '(', ')', ')', '@', 'symbolic_vm', '.', 'laser_hook', '(', '"add_world_state"', ')', 'def', 'world_state_filter_hook', '(', 'global_state', ':', 'GlobalState', ')', ':', 'if', 'And', '(', '*', 'global_state', '.', 'mstate', '.', 'constraints', '[', ':', ']', '+', '[', 'global_state', '.', 'environment', '.', 'callvalue', '>', 'symbol_factory', '.', 'BitVecVal', '(', '0', ',', '256', ')', ']', ')', '.', 'is_false', ':', 'return', 'if', 'isinstance', '(', 'global_state', '.', 'current_transaction', ',', 'ContractCreationTransaction', ')', ':', 'return', 'if', 'len', '(', 'list', '(', 'global_state', '.', 'get_annotations', '(', 'MutationAnnotation', ')', ')', ')', '==', '0', ':', 'raise', 'PluginSkipWorldState']
Initializes the mutation pruner Introduces hooks for SSTORE operations :param symbolic_vm: :return:
['Initializes', 'the', 'mutation', 'pruner']
train
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/ethereum/plugins/implementations/mutation_pruner.py#L36-L63
4,897
theiviaxx/Frog
frog/common.py
getPutData
def getPutData(request): """Adds raw post to the PUT and DELETE querydicts on the request so they behave like post :param request: Request object to add PUT/DELETE to :type request: Request """ dataDict = {} data = request.body for n in urlparse.parse_qsl(data): dataDict[n[0]] = n[1] setattr(request, 'PUT', dataDict) setattr(request, 'DELETE', dataDict)
python
def getPutData(request): """Adds raw post to the PUT and DELETE querydicts on the request so they behave like post :param request: Request object to add PUT/DELETE to :type request: Request """ dataDict = {} data = request.body for n in urlparse.parse_qsl(data): dataDict[n[0]] = n[1] setattr(request, 'PUT', dataDict) setattr(request, 'DELETE', dataDict)
['def', 'getPutData', '(', 'request', ')', ':', 'dataDict', '=', '{', '}', 'data', '=', 'request', '.', 'body', 'for', 'n', 'in', 'urlparse', '.', 'parse_qsl', '(', 'data', ')', ':', 'dataDict', '[', 'n', '[', '0', ']', ']', '=', 'n', '[', '1', ']', 'setattr', '(', 'request', ',', "'PUT'", ',', 'dataDict', ')', 'setattr', '(', 'request', ',', "'DELETE'", ',', 'dataDict', ')']
Adds raw post to the PUT and DELETE querydicts on the request so they behave like post :param request: Request object to add PUT/DELETE to :type request: Request
['Adds', 'raw', 'post', 'to', 'the', 'PUT', 'and', 'DELETE', 'querydicts', 'on', 'the', 'request', 'so', 'they', 'behave', 'like', 'post']
train
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/common.py#L118-L131
4,898
jbasko/configmanager
configmanager/managers.py
Config.configparser
def configparser(self): """ Adapter to dump/load INI format strings and files using standard library's ``ConfigParser`` (or the backported configparser module in Python 2). Returns: ConfigPersistenceAdapter """ if self._configparser_adapter is None: self._configparser_adapter = ConfigPersistenceAdapter( config=self, reader_writer=ConfigParserReaderWriter( config_parser_factory=self.settings.configparser_factory, ), ) return self._configparser_adapter
python
def configparser(self): """ Adapter to dump/load INI format strings and files using standard library's ``ConfigParser`` (or the backported configparser module in Python 2). Returns: ConfigPersistenceAdapter """ if self._configparser_adapter is None: self._configparser_adapter = ConfigPersistenceAdapter( config=self, reader_writer=ConfigParserReaderWriter( config_parser_factory=self.settings.configparser_factory, ), ) return self._configparser_adapter
['def', 'configparser', '(', 'self', ')', ':', 'if', 'self', '.', '_configparser_adapter', 'is', 'None', ':', 'self', '.', '_configparser_adapter', '=', 'ConfigPersistenceAdapter', '(', 'config', '=', 'self', ',', 'reader_writer', '=', 'ConfigParserReaderWriter', '(', 'config_parser_factory', '=', 'self', '.', 'settings', '.', 'configparser_factory', ',', ')', ',', ')', 'return', 'self', '.', '_configparser_adapter']
Adapter to dump/load INI format strings and files using standard library's ``ConfigParser`` (or the backported configparser module in Python 2). Returns: ConfigPersistenceAdapter
['Adapter', 'to', 'dump', '/', 'load', 'INI', 'format', 'strings', 'and', 'files', 'using', 'standard', 'library', 's', 'ConfigParser', '(', 'or', 'the', 'backported', 'configparser', 'module', 'in', 'Python', '2', ')', '.', 'Returns', ':', 'ConfigPersistenceAdapter']
train
https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/managers.py#L152-L167
4,899
xtrementl/focus
focus/plugin/modules/tasks.py
TaskEdit.execute
def execute(self, env, args): """ Edits task configuration. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """ task_name = args.task_name if not env.task.exists(task_name): raise errors.TaskNotFound(task_name) if env.task.active and task_name == env.task.name: raise errors.ActiveTask # open in task config in editor task_config = env.task.get_config_path(task_name) if not _edit_task_config(env, task_config, confirm=True): raise errors.FocusError(u'Could not open task config: {0}' .format(task_config))
python
def execute(self, env, args): """ Edits task configuration. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """ task_name = args.task_name if not env.task.exists(task_name): raise errors.TaskNotFound(task_name) if env.task.active and task_name == env.task.name: raise errors.ActiveTask # open in task config in editor task_config = env.task.get_config_path(task_name) if not _edit_task_config(env, task_config, confirm=True): raise errors.FocusError(u'Could not open task config: {0}' .format(task_config))
['def', 'execute', '(', 'self', ',', 'env', ',', 'args', ')', ':', 'task_name', '=', 'args', '.', 'task_name', 'if', 'not', 'env', '.', 'task', '.', 'exists', '(', 'task_name', ')', ':', 'raise', 'errors', '.', 'TaskNotFound', '(', 'task_name', ')', 'if', 'env', '.', 'task', '.', 'active', 'and', 'task_name', '==', 'env', '.', 'task', '.', 'name', ':', 'raise', 'errors', '.', 'ActiveTask', '# open in task config in editor', 'task_config', '=', 'env', '.', 'task', '.', 'get_config_path', '(', 'task_name', ')', 'if', 'not', '_edit_task_config', '(', 'env', ',', 'task_config', ',', 'confirm', '=', 'True', ')', ':', 'raise', 'errors', '.', 'FocusError', '(', "u'Could not open task config: {0}'", '.', 'format', '(', 'task_config', ')', ')']
Edits task configuration. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser.
['Edits', 'task', 'configuration', '.']
train
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/tasks.py#L293-L315