Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
2,200
RobinNil/file_read_backwards
file_read_backwards/buffer_work_space.py
BufferWorkSpace.yieldable
def yieldable(self): """Return True if there is a line that the buffer can return, False otherwise.""" if self.read_buffer is None: return False t = _remove_trailing_new_line(self.read_buffer) n = _find_furthest_new_line(t) if n >= 0: return True # we have read in entire file and have some unprocessed lines if self.read_position == 0 and self.read_buffer is not None: return True return False
python
def yieldable(self): """Return True if there is a line that the buffer can return, False otherwise.""" if self.read_buffer is None: return False t = _remove_trailing_new_line(self.read_buffer) n = _find_furthest_new_line(t) if n >= 0: return True # we have read in entire file and have some unprocessed lines if self.read_position == 0 and self.read_buffer is not None: return True return False
['def', 'yieldable', '(', 'self', ')', ':', 'if', 'self', '.', 'read_buffer', 'is', 'None', ':', 'return', 'False', 't', '=', '_remove_trailing_new_line', '(', 'self', '.', 'read_buffer', ')', 'n', '=', '_find_furthest_new_line', '(', 't', ')', 'if', 'n', '>=', '0', ':', 'return', 'True', '# we have read in entire file and have some unprocessed lines', 'if', 'self', '.', 'read_position', '==', '0', 'and', 'self', '.', 'read_buffer', 'is', 'not', 'None', ':', 'return', 'True', 'return', 'False']
Return True if there is a line that the buffer can return, False otherwise.
['Return', 'True', 'if', 'there', 'is', 'a', 'line', 'that', 'the', 'buffer', 'can', 'return', 'False', 'otherwise', '.']
train
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L42-L55
2,201
elastic/apm-agent-python
elasticapm/contrib/django/middleware/__init__.py
_is_ignorable_404
def _is_ignorable_404(uri): """ Returns True if the given request *shouldn't* notify the site managers. """ urls = getattr(django_settings, "IGNORABLE_404_URLS", ()) return any(pattern.search(uri) for pattern in urls)
python
def _is_ignorable_404(uri): """ Returns True if the given request *shouldn't* notify the site managers. """ urls = getattr(django_settings, "IGNORABLE_404_URLS", ()) return any(pattern.search(uri) for pattern in urls)
['def', '_is_ignorable_404', '(', 'uri', ')', ':', 'urls', '=', 'getattr', '(', 'django_settings', ',', '"IGNORABLE_404_URLS"', ',', '(', ')', ')', 'return', 'any', '(', 'pattern', '.', 'search', '(', 'uri', ')', 'for', 'pattern', 'in', 'urls', ')']
Returns True if the given request *shouldn't* notify the site managers.
['Returns', 'True', 'if', 'the', 'given', 'request', '*', 'shouldn', 't', '*', 'notify', 'the', 'site', 'managers', '.']
train
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/contrib/django/middleware/__init__.py#L57-L62
2,202
ihabunek/toot
toot/ui/app.py
TimelineApp.reply
def reply(self): """Reply to the selected status""" status = self.get_selected_status() app, user = self.app, self.user if not app or not user: self.footer.draw_message("You must be logged in to reply", Color.RED) return compose_modal = ComposeModal(self.stdscr, default_cw='\n'.join(status['spoiler_text']) or None, resize_callback=self.on_resize) content, cw = compose_modal.loop() self.full_redraw() if content is None: return elif len(content) == 0: self.footer.draw_message("Status must contain content", Color.RED) return self.footer.draw_message("Submitting reply...", Color.YELLOW) response = api.post_status(app, user, content, spoiler_text=cw, sensitive=cw is not None, in_reply_to_id=status['id']) status = parse_status(response) self.statuses.insert(0, status) self.selected += 1 self.left.draw_statuses(self.statuses, self.selected) self.footer.draw_message("✓ Reply posted", Color.GREEN)
python
def reply(self): """Reply to the selected status""" status = self.get_selected_status() app, user = self.app, self.user if not app or not user: self.footer.draw_message("You must be logged in to reply", Color.RED) return compose_modal = ComposeModal(self.stdscr, default_cw='\n'.join(status['spoiler_text']) or None, resize_callback=self.on_resize) content, cw = compose_modal.loop() self.full_redraw() if content is None: return elif len(content) == 0: self.footer.draw_message("Status must contain content", Color.RED) return self.footer.draw_message("Submitting reply...", Color.YELLOW) response = api.post_status(app, user, content, spoiler_text=cw, sensitive=cw is not None, in_reply_to_id=status['id']) status = parse_status(response) self.statuses.insert(0, status) self.selected += 1 self.left.draw_statuses(self.statuses, self.selected) self.footer.draw_message("✓ Reply posted", Color.GREEN)
['def', 'reply', '(', 'self', ')', ':', 'status', '=', 'self', '.', 'get_selected_status', '(', ')', 'app', ',', 'user', '=', 'self', '.', 'app', ',', 'self', '.', 'user', 'if', 'not', 'app', 'or', 'not', 'user', ':', 'self', '.', 'footer', '.', 'draw_message', '(', '"You must be logged in to reply"', ',', 'Color', '.', 'RED', ')', 'return', 'compose_modal', '=', 'ComposeModal', '(', 'self', '.', 'stdscr', ',', 'default_cw', '=', "'\\n'", '.', 'join', '(', 'status', '[', "'spoiler_text'", ']', ')', 'or', 'None', ',', 'resize_callback', '=', 'self', '.', 'on_resize', ')', 'content', ',', 'cw', '=', 'compose_modal', '.', 'loop', '(', ')', 'self', '.', 'full_redraw', '(', ')', 'if', 'content', 'is', 'None', ':', 'return', 'elif', 'len', '(', 'content', ')', '==', '0', ':', 'self', '.', 'footer', '.', 'draw_message', '(', '"Status must contain content"', ',', 'Color', '.', 'RED', ')', 'return', 'self', '.', 'footer', '.', 'draw_message', '(', '"Submitting reply..."', ',', 'Color', '.', 'YELLOW', ')', 'response', '=', 'api', '.', 'post_status', '(', 'app', ',', 'user', ',', 'content', ',', 'spoiler_text', '=', 'cw', ',', 'sensitive', '=', 'cw', 'is', 'not', 'None', ',', 'in_reply_to_id', '=', 'status', '[', "'id'", ']', ')', 'status', '=', 'parse_status', '(', 'response', ')', 'self', '.', 'statuses', '.', 'insert', '(', '0', ',', 'status', ')', 'self', '.', 'selected', '+=', '1', 'self', '.', 'left', '.', 'draw_statuses', '(', 'self', '.', 'statuses', ',', 'self', '.', 'selected', ')', 'self', '.', 'footer', '.', 'draw_message', '(', '"✓ Reply posted", ', 'C', 'lor.G', 'R', 'EEN)', '']
Reply to the selected status
['Reply', 'to', 'the', 'selected', 'status']
train
https://github.com/ihabunek/toot/blob/d13fa8685b300f96621fa325774913ec0f413a7f/toot/ui/app.py#L624-L647
2,203
HazyResearch/fonduer
src/fonduer/utils/utils_udf.py
get_mapping
def get_mapping(session, table, candidates, generator, key_map): """Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict """ for cand in candidates: # Grab the old values currently in the DB try: temp = session.query(table).filter(table.candidate_id == cand.id).one() cand_map = dict(zip(temp.keys, temp.values)) except NoResultFound: cand_map = {} map_args = {"candidate_id": cand.id} for cid, key, value in generator(cand): if value == 0: continue cand_map[key] = value # Assemble label arguments map_args["keys"] = [*cand_map.keys()] map_args["values"] = [*cand_map.values()] # Update key_map by adding the candidate class for each key for key in map_args["keys"]: try: key_map[key].add(cand.__class__.__tablename__) except KeyError: key_map[key] = {cand.__class__.__tablename__} yield map_args
python
def get_mapping(session, table, candidates, generator, key_map): """Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict """ for cand in candidates: # Grab the old values currently in the DB try: temp = session.query(table).filter(table.candidate_id == cand.id).one() cand_map = dict(zip(temp.keys, temp.values)) except NoResultFound: cand_map = {} map_args = {"candidate_id": cand.id} for cid, key, value in generator(cand): if value == 0: continue cand_map[key] = value # Assemble label arguments map_args["keys"] = [*cand_map.keys()] map_args["values"] = [*cand_map.values()] # Update key_map by adding the candidate class for each key for key in map_args["keys"]: try: key_map[key].add(cand.__class__.__tablename__) except KeyError: key_map[key] = {cand.__class__.__tablename__} yield map_args
['def', 'get_mapping', '(', 'session', ',', 'table', ',', 'candidates', ',', 'generator', ',', 'key_map', ')', ':', 'for', 'cand', 'in', 'candidates', ':', '# Grab the old values currently in the DB', 'try', ':', 'temp', '=', 'session', '.', 'query', '(', 'table', ')', '.', 'filter', '(', 'table', '.', 'candidate_id', '==', 'cand', '.', 'id', ')', '.', 'one', '(', ')', 'cand_map', '=', 'dict', '(', 'zip', '(', 'temp', '.', 'keys', ',', 'temp', '.', 'values', ')', ')', 'except', 'NoResultFound', ':', 'cand_map', '=', '{', '}', 'map_args', '=', '{', '"candidate_id"', ':', 'cand', '.', 'id', '}', 'for', 'cid', ',', 'key', ',', 'value', 'in', 'generator', '(', 'cand', ')', ':', 'if', 'value', '==', '0', ':', 'continue', 'cand_map', '[', 'key', ']', '=', 'value', '# Assemble label arguments', 'map_args', '[', '"keys"', ']', '=', '[', '*', 'cand_map', '.', 'keys', '(', ')', ']', 'map_args', '[', '"values"', ']', '=', '[', '*', 'cand_map', '.', 'values', '(', ')', ']', '# Update key_map by adding the candidate class for each key', 'for', 'key', 'in', 'map_args', '[', '"keys"', ']', ':', 'try', ':', 'key_map', '[', 'key', ']', '.', 'add', '(', 'cand', '.', '__class__', '.', '__tablename__', ')', 'except', 'KeyError', ':', 'key_map', '[', 'key', ']', '=', '{', 'cand', '.', '__class__', '.', '__tablename__', '}', 'yield', 'map_args']
Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict
['Generate', 'map', 'of', 'keys', 'and', 'values', 'for', 'the', 'candidate', 'from', 'the', 'generator', '.']
train
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_udf.py#L161-L198
2,204
jmvrbanac/Specter
specter/reporting/console.py
ConsoleReporter.output
def output(self, msg, indent, status=None): """ Alias for print_indent_msg with color determined by status.""" color = None if self.use_color: color = get_color_from_status(status) print_indent_msg(msg, indent, color)
python
def output(self, msg, indent, status=None): """ Alias for print_indent_msg with color determined by status.""" color = None if self.use_color: color = get_color_from_status(status) print_indent_msg(msg, indent, color)
['def', 'output', '(', 'self', ',', 'msg', ',', 'indent', ',', 'status', '=', 'None', ')', ':', 'color', '=', 'None', 'if', 'self', '.', 'use_color', ':', 'color', '=', 'get_color_from_status', '(', 'status', ')', 'print_indent_msg', '(', 'msg', ',', 'indent', ',', 'color', ')']
Alias for print_indent_msg with color determined by status.
['Alias', 'for', 'print_indent_msg', 'with', 'color', 'determined', 'by', 'status', '.']
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/reporting/console.py#L139-L144
2,205
gwastro/pycbc
pycbc/inference/sampler/emcee.py
EmceeEnsembleSampler.from_config
def from_config(cls, cp, model, nprocesses=1, use_mpi=False): """Loads the sampler from the given config file.""" section = "sampler" # check name assert cp.get(section, "name") == cls.name, ( "name in section [sampler] must match mine") # get the number of walkers to use nwalkers = int(cp.get(section, "nwalkers")) # get the checkpoint interval, if it's specified checkpoint_interval = cls.checkpoint_from_config(cp, section) checkpoint_signal = cls.ckpt_signal_from_config(cp, section) # get the logpost function lnpost = get_optional_arg_from_config(cp, section, 'logpost-function') obj = cls(model, nwalkers, checkpoint_interval=checkpoint_interval, checkpoint_signal=checkpoint_signal, logpost_function=lnpost, nprocesses=nprocesses, use_mpi=use_mpi) # set target obj.set_target_from_config(cp, section) # add burn-in if it's specified obj.set_burn_in_from_config(cp) # set prethin options obj.set_thin_interval_from_config(cp, section) return obj
python
def from_config(cls, cp, model, nprocesses=1, use_mpi=False): """Loads the sampler from the given config file.""" section = "sampler" # check name assert cp.get(section, "name") == cls.name, ( "name in section [sampler] must match mine") # get the number of walkers to use nwalkers = int(cp.get(section, "nwalkers")) # get the checkpoint interval, if it's specified checkpoint_interval = cls.checkpoint_from_config(cp, section) checkpoint_signal = cls.ckpt_signal_from_config(cp, section) # get the logpost function lnpost = get_optional_arg_from_config(cp, section, 'logpost-function') obj = cls(model, nwalkers, checkpoint_interval=checkpoint_interval, checkpoint_signal=checkpoint_signal, logpost_function=lnpost, nprocesses=nprocesses, use_mpi=use_mpi) # set target obj.set_target_from_config(cp, section) # add burn-in if it's specified obj.set_burn_in_from_config(cp) # set prethin options obj.set_thin_interval_from_config(cp, section) return obj
['def', 'from_config', '(', 'cls', ',', 'cp', ',', 'model', ',', 'nprocesses', '=', '1', ',', 'use_mpi', '=', 'False', ')', ':', 'section', '=', '"sampler"', '# check name', 'assert', 'cp', '.', 'get', '(', 'section', ',', '"name"', ')', '==', 'cls', '.', 'name', ',', '(', '"name in section [sampler] must match mine"', ')', '# get the number of walkers to use', 'nwalkers', '=', 'int', '(', 'cp', '.', 'get', '(', 'section', ',', '"nwalkers"', ')', ')', "# get the checkpoint interval, if it's specified", 'checkpoint_interval', '=', 'cls', '.', 'checkpoint_from_config', '(', 'cp', ',', 'section', ')', 'checkpoint_signal', '=', 'cls', '.', 'ckpt_signal_from_config', '(', 'cp', ',', 'section', ')', '# get the logpost function', 'lnpost', '=', 'get_optional_arg_from_config', '(', 'cp', ',', 'section', ',', "'logpost-function'", ')', 'obj', '=', 'cls', '(', 'model', ',', 'nwalkers', ',', 'checkpoint_interval', '=', 'checkpoint_interval', ',', 'checkpoint_signal', '=', 'checkpoint_signal', ',', 'logpost_function', '=', 'lnpost', ',', 'nprocesses', '=', 'nprocesses', ',', 'use_mpi', '=', 'use_mpi', ')', '# set target', 'obj', '.', 'set_target_from_config', '(', 'cp', ',', 'section', ')', "# add burn-in if it's specified", 'obj', '.', 'set_burn_in_from_config', '(', 'cp', ')', '# set prethin options', 'obj', '.', 'set_thin_interval_from_config', '(', 'cp', ',', 'section', ')', 'return', 'obj']
Loads the sampler from the given config file.
['Loads', 'the', 'sampler', 'from', 'the', 'given', 'config', 'file', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/emcee.py#L194-L218
2,206
saltstack/salt
salt/cloud/clouds/aliyun.py
create_node
def create_node(kwargs): ''' Convenience function to make the rest api call for node creation. ''' if not isinstance(kwargs, dict): kwargs = {} # Required parameters params = { 'Action': 'CreateInstance', 'InstanceType': kwargs.get('size_id', ''), 'RegionId': kwargs.get('region_id', DEFAULT_LOCATION), 'ImageId': kwargs.get('image_id', ''), 'SecurityGroupId': kwargs.get('securitygroup_id', ''), 'InstanceName': kwargs.get('name', ''), } # Optional parameters' optional = [ 'InstanceName', 'InternetChargeType', 'InternetMaxBandwidthIn', 'InternetMaxBandwidthOut', 'HostName', 'Password', 'SystemDisk.Category', 'VSwitchId' # 'DataDisk.n.Size', 'DataDisk.n.Category', 'DataDisk.n.SnapshotId' ] for item in optional: if item in kwargs: params.update({item: kwargs[item]}) # invoke web call result = query(params) return result['InstanceId']
python
def create_node(kwargs): ''' Convenience function to make the rest api call for node creation. ''' if not isinstance(kwargs, dict): kwargs = {} # Required parameters params = { 'Action': 'CreateInstance', 'InstanceType': kwargs.get('size_id', ''), 'RegionId': kwargs.get('region_id', DEFAULT_LOCATION), 'ImageId': kwargs.get('image_id', ''), 'SecurityGroupId': kwargs.get('securitygroup_id', ''), 'InstanceName': kwargs.get('name', ''), } # Optional parameters' optional = [ 'InstanceName', 'InternetChargeType', 'InternetMaxBandwidthIn', 'InternetMaxBandwidthOut', 'HostName', 'Password', 'SystemDisk.Category', 'VSwitchId' # 'DataDisk.n.Size', 'DataDisk.n.Category', 'DataDisk.n.SnapshotId' ] for item in optional: if item in kwargs: params.update({item: kwargs[item]}) # invoke web call result = query(params) return result['InstanceId']
['def', 'create_node', '(', 'kwargs', ')', ':', 'if', 'not', 'isinstance', '(', 'kwargs', ',', 'dict', ')', ':', 'kwargs', '=', '{', '}', '# Required parameters', 'params', '=', '{', "'Action'", ':', "'CreateInstance'", ',', "'InstanceType'", ':', 'kwargs', '.', 'get', '(', "'size_id'", ',', "''", ')', ',', "'RegionId'", ':', 'kwargs', '.', 'get', '(', "'region_id'", ',', 'DEFAULT_LOCATION', ')', ',', "'ImageId'", ':', 'kwargs', '.', 'get', '(', "'image_id'", ',', "''", ')', ',', "'SecurityGroupId'", ':', 'kwargs', '.', 'get', '(', "'securitygroup_id'", ',', "''", ')', ',', "'InstanceName'", ':', 'kwargs', '.', 'get', '(', "'name'", ',', "''", ')', ',', '}', "# Optional parameters'", 'optional', '=', '[', "'InstanceName'", ',', "'InternetChargeType'", ',', "'InternetMaxBandwidthIn'", ',', "'InternetMaxBandwidthOut'", ',', "'HostName'", ',', "'Password'", ',', "'SystemDisk.Category'", ',', "'VSwitchId'", "# 'DataDisk.n.Size', 'DataDisk.n.Category', 'DataDisk.n.SnapshotId'", ']', 'for', 'item', 'in', 'optional', ':', 'if', 'item', 'in', 'kwargs', ':', 'params', '.', 'update', '(', '{', 'item', ':', 'kwargs', '[', 'item', ']', '}', ')', '# invoke web call', 'result', '=', 'query', '(', 'params', ')', 'return', 'result', '[', "'InstanceId'", ']']
Convenience function to make the rest api call for node creation.
['Convenience', 'function', 'to', 'make', 'the', 'rest', 'api', 'call', 'for', 'node', 'creation', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/aliyun.py#L577-L608
2,207
supercoderz/pyflightdata
pyflightdata/flightdata.py
FlightData.get_history_by_tail_number
def get_history_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10) """ url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url, True)
python
def get_history_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10) """ url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url, True)
['def', 'get_history_by_tail_number', '(', 'self', ',', 'tail_number', ',', 'page', '=', '1', ',', 'limit', '=', '100', ')', ':', 'url', '=', 'REG_BASE', '.', 'format', '(', 'tail_number', ',', 'str', '(', 'self', '.', 'AUTH_TOKEN', ')', ',', 'page', ',', 'limit', ')', 'return', 'self', '.', '_fr24', '.', 'get_data', '(', 'url', ',', 'True', ')']
Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
['Fetch', 'the', 'history', 'of', 'a', 'particular', 'aircraft', 'by', 'its', 'tail', 'number', '.']
train
https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L85-L110
2,208
dtcooper/python-fitparse
fitparse/records.py
Crc.calculate
def calculate(cls, byte_arr, crc=0): """Compute CRC for input bytes.""" for byte in byte_iter(byte_arr): # Taken verbatim from FIT SDK docs tmp = cls.CRC_TABLE[crc & 0xF] crc = (crc >> 4) & 0x0FFF crc = crc ^ tmp ^ cls.CRC_TABLE[byte & 0xF] tmp = cls.CRC_TABLE[crc & 0xF] crc = (crc >> 4) & 0x0FFF crc = crc ^ tmp ^ cls.CRC_TABLE[(byte >> 4) & 0xF] return crc
python
def calculate(cls, byte_arr, crc=0): """Compute CRC for input bytes.""" for byte in byte_iter(byte_arr): # Taken verbatim from FIT SDK docs tmp = cls.CRC_TABLE[crc & 0xF] crc = (crc >> 4) & 0x0FFF crc = crc ^ tmp ^ cls.CRC_TABLE[byte & 0xF] tmp = cls.CRC_TABLE[crc & 0xF] crc = (crc >> 4) & 0x0FFF crc = crc ^ tmp ^ cls.CRC_TABLE[(byte >> 4) & 0xF] return crc
['def', 'calculate', '(', 'cls', ',', 'byte_arr', ',', 'crc', '=', '0', ')', ':', 'for', 'byte', 'in', 'byte_iter', '(', 'byte_arr', ')', ':', '# Taken verbatim from FIT SDK docs', 'tmp', '=', 'cls', '.', 'CRC_TABLE', '[', 'crc', '&', '0xF', ']', 'crc', '=', '(', 'crc', '>>', '4', ')', '&', '0x0FFF', 'crc', '=', 'crc', '^', 'tmp', '^', 'cls', '.', 'CRC_TABLE', '[', 'byte', '&', '0xF', ']', 'tmp', '=', 'cls', '.', 'CRC_TABLE', '[', 'crc', '&', '0xF', ']', 'crc', '=', '(', 'crc', '>>', '4', ')', '&', '0x0FFF', 'crc', '=', 'crc', '^', 'tmp', '^', 'cls', '.', 'CRC_TABLE', '[', '(', 'byte', '>>', '4', ')', '&', '0xF', ']', 'return', 'crc']
Compute CRC for input bytes.
['Compute', 'CRC', 'for', 'input', 'bytes', '.']
train
https://github.com/dtcooper/python-fitparse/blob/40fa2918c3e91bd8f89908ad3bad81c1c1189dd2/fitparse/records.py#L376-L387
2,209
saltstack/salt
salt/states/mac_xattr.py
exists
def exists(name, attributes): ''' Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: attr_id, attr_val = attr.split("=") attr_hex = attr_val.startswith("0x") if attr_hex: # Remove spaces and new lines so we can match these current_attrs[attr_id] = __salt__['xattr.read'](name, attr_id, hex=True).replace(" ", "").replace("\n", "") attr_val = attr_val[2:].replace(" ", "") if attr_id not in current_attrs: value_matches = False else: value_matches = ((current_attrs[attr_id] == attr_val) or (attr_hex and current_attrs[attr_id] == attr_val)) if attr_id in current_ids and value_matches: continue else: ret['changes'][attr_id] = attr_val __salt__['xattr.write'](name, attr_id, attr_val, attr_hex) if not ret['changes']: ret['comment'] = 'All values existed correctly.' return ret
python
def exists(name, attributes): ''' Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: attr_id, attr_val = attr.split("=") attr_hex = attr_val.startswith("0x") if attr_hex: # Remove spaces and new lines so we can match these current_attrs[attr_id] = __salt__['xattr.read'](name, attr_id, hex=True).replace(" ", "").replace("\n", "") attr_val = attr_val[2:].replace(" ", "") if attr_id not in current_attrs: value_matches = False else: value_matches = ((current_attrs[attr_id] == attr_val) or (attr_hex and current_attrs[attr_id] == attr_val)) if attr_id in current_ids and value_matches: continue else: ret['changes'][attr_id] = attr_val __salt__['xattr.write'](name, attr_id, attr_val, attr_hex) if not ret['changes']: ret['comment'] = 'All values existed correctly.' return ret
['def', 'exists', '(', 'name', ',', 'attributes', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'True', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'name', ')', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', '"File or directory doesn\'t exist"', 'return', 'ret', 'current_attrs', '=', '__salt__', '[', "'xattr.list'", ']', '(', 'name', ')', 'current_ids', '=', 'current_attrs', '.', 'keys', '(', ')', 'for', 'attr', 'in', 'attributes', ':', 'attr_id', ',', 'attr_val', '=', 'attr', '.', 'split', '(', '"="', ')', 'attr_hex', '=', 'attr_val', '.', 'startswith', '(', '"0x"', ')', 'if', 'attr_hex', ':', '# Remove spaces and new lines so we can match these', 'current_attrs', '[', 'attr_id', ']', '=', '__salt__', '[', "'xattr.read'", ']', '(', 'name', ',', 'attr_id', ',', 'hex', '=', 'True', ')', '.', 'replace', '(', '" "', ',', '""', ')', '.', 'replace', '(', '"\\n"', ',', '""', ')', 'attr_val', '=', 'attr_val', '[', '2', ':', ']', '.', 'replace', '(', '" "', ',', '""', ')', 'if', 'attr_id', 'not', 'in', 'current_attrs', ':', 'value_matches', '=', 'False', 'else', ':', 'value_matches', '=', '(', '(', 'current_attrs', '[', 'attr_id', ']', '==', 'attr_val', ')', 'or', '(', 'attr_hex', 'and', 'current_attrs', '[', 'attr_id', ']', '==', 'attr_val', ')', ')', 'if', 'attr_id', 'in', 'current_ids', 'and', 'value_matches', ':', 'continue', 'else', ':', 'ret', '[', "'changes'", ']', '[', 'attr_id', ']', '=', 'attr_val', '__salt__', '[', "'xattr.write'", ']', '(', 'name', ',', 'attr_id', ',', 'attr_val', ',', 'attr_hex', ')', 'if', 'not', 'ret', '[', "'changes'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'All values existed correctly.'", 'return', 'ret']
Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value.
['Make', 'sure', 'the', 'given', 'attributes', 'exist', 'on', 'the', 'file', '/', 'directory']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mac_xattr.py#L35-L85
2,210
riga/law
law/decorator.py
factory
def factory(**default_opts): """ Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies. """ def wrapper(decorator): @functools.wraps(decorator) def wrapper(fn=None, **opts): _opts = default_opts.copy() _opts.update(opts) def wrapper(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return decorator(fn, _opts, *args, **kwargs) return wrapper return wrapper if fn is None else wrapper(fn) return wrapper return wrapper
python
def factory(**default_opts): """ Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies. """ def wrapper(decorator): @functools.wraps(decorator) def wrapper(fn=None, **opts): _opts = default_opts.copy() _opts.update(opts) def wrapper(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return decorator(fn, _opts, *args, **kwargs) return wrapper return wrapper if fn is None else wrapper(fn) return wrapper return wrapper
['def', 'factory', '(', '*', '*', 'default_opts', ')', ':', 'def', 'wrapper', '(', 'decorator', ')', ':', '@', 'functools', '.', 'wraps', '(', 'decorator', ')', 'def', 'wrapper', '(', 'fn', '=', 'None', ',', '*', '*', 'opts', ')', ':', '_opts', '=', 'default_opts', '.', 'copy', '(', ')', '_opts', '.', 'update', '(', 'opts', ')', 'def', 'wrapper', '(', 'fn', ')', ':', '@', 'functools', '.', 'wraps', '(', 'fn', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'decorator', '(', 'fn', ',', '_opts', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wrapper', 'return', 'wrapper', 'if', 'fn', 'is', 'None', 'else', 'wrapper', '(', 'fn', ')', 'return', 'wrapper', 'return', 'wrapper']
Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies.
['Factory', 'function', 'to', 'create', 'decorators', 'for', 'tasks', 'run', 'methods', '.', 'Default', 'options', 'for', 'the', 'decorator', 'function', 'can', 'be', 'given', 'in', '*', 'default_opts', '*', '.', 'The', 'returned', 'decorator', 'can', 'be', 'used', 'with', 'or', 'without', 'function', 'invocation', '.', 'Example', ':']
train
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L46-L97
2,211
anthok/overwatch-api
overwatch_api/core.py
AsyncOWAPI.get_stats
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
python
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
['async', 'def', 'get_stats', '(', 'self', ',', 'battletag', ':', 'str', ',', 'regions', '=', '(', 'EUROPE', ',', 'KOREA', ',', 'AMERICAS', ',', 'CHINA', ',', 'JAPAN', ',', 'ANY', ')', ',', 'platform', '=', 'None', ',', '_session', '=', 'None', ',', 'handle_ratelimit', '=', 'None', ',', 'max_tries', '=', 'None', ',', 'request_timeout', '=', 'None', ')', ':', 'if', 'platform', 'is', 'None', ':', 'platform', '=', 'self', '.', 'default_platform', 'try', ':', 'blob_dict', '=', 'await', 'self', '.', '_base_request', '(', 'battletag', ',', '"stats"', ',', '_session', ',', 'platform', '=', 'platform', ',', 'handle_ratelimit', '=', 'handle_ratelimit', ',', 'max_tries', '=', 'max_tries', ',', 'request_timeout', '=', 'request_timeout', ')', 'except', 'ProfileNotFoundError', 'as', 'e', ':', "# The battletag doesn't exist", 'blob_dict', '=', '{', '}', 'existing_regions', '=', '{', 'key', ':', 'val', 'for', 'key', ',', 'val', 'in', 'blob_dict', '.', 'items', '(', ')', 'if', '(', '(', 'val', 'is', 'not', 'None', ')', 'and', '(', 'key', '!=', '"_request"', ')', ')', '}', 'return', '{', 'key', ':', '[', 'inner_val', 'for', 'inner_key', ',', 'inner_val', 'in', 'val', '.', 'items', '(', ')', 'if', 'inner_key', '==', '"stats"', ']', '[', '0', ']', 'for', 'key', ',', 'val', 'in', 'existing_regions', '.', 'items', '(', ')', 'if', 'key', 'in', 'regions', '}']
Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.
['Returns', 'the', 'stats', 'for', 'the', 'profiles', 'on', 'the', 'specified', 'regions', 'and', 'platform', '.', 'The', 'format', 'for', 'regions', 'without', 'a', 'matching', 'user', 'the', 'format', 'is', 'the', 'same', 'as', 'get_profile', '.', 'The', 'stats', 'are', 'returned', 'in', 'a', 'dictionary', 'with', 'a', 'similar', 'format', 'to', 'what', 'https', ':', '//', 'github', '.', 'com', '/', 'SunDwarf', '/', 'OWAPI', '/', 'blob', '/', 'master', '/', 'api', '.', 'md#get', '-', 'apiv3ubattletagstats', 'specifies', '.']
train
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L98-L114
2,212
quantopian/zipline
zipline/finance/asset_restrictions.py
HistoricalRestrictions.is_restricted
def is_restricted(self, assets, dt): """ Returns whether or not an asset or iterable of assets is restricted on a dt. """ if isinstance(assets, Asset): return self._is_restricted_for_asset(assets, dt) is_restricted = partial(self._is_restricted_for_asset, dt=dt) return pd.Series( index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets) )
python
def is_restricted(self, assets, dt): """ Returns whether or not an asset or iterable of assets is restricted on a dt. """ if isinstance(assets, Asset): return self._is_restricted_for_asset(assets, dt) is_restricted = partial(self._is_restricted_for_asset, dt=dt) return pd.Series( index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets) )
['def', 'is_restricted', '(', 'self', ',', 'assets', ',', 'dt', ')', ':', 'if', 'isinstance', '(', 'assets', ',', 'Asset', ')', ':', 'return', 'self', '.', '_is_restricted_for_asset', '(', 'assets', ',', 'dt', ')', 'is_restricted', '=', 'partial', '(', 'self', '.', '_is_restricted_for_asset', ',', 'dt', '=', 'dt', ')', 'return', 'pd', '.', 'Series', '(', 'index', '=', 'pd', '.', 'Index', '(', 'assets', ')', ',', 'data', '=', 'vectorize', '(', 'is_restricted', ',', 'otypes', '=', '[', 'bool', ']', ')', '(', 'assets', ')', ')']
Returns whether or not an asset or iterable of assets is restricted on a dt.
['Returns', 'whether', 'or', 'not', 'an', 'asset', 'or', 'iterable', 'of', 'assets', 'is', 'restricted', 'on', 'a', 'dt', '.']
train
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/asset_restrictions.py#L177-L189
2,213
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
RocketChat.groups_moderators
def groups_moderators(self, room_id=None, group=None, **kwargs): """Lists all moderators of a group.""" if room_id: return self.__call_api_get('groups.moderators', roomId=room_id, kwargs=kwargs) elif group: return self.__call_api_get('groups.moderators', roomName=group, kwargs=kwargs) else: raise RocketMissingParamException('roomId or group required')
python
def groups_moderators(self, room_id=None, group=None, **kwargs): """Lists all moderators of a group.""" if room_id: return self.__call_api_get('groups.moderators', roomId=room_id, kwargs=kwargs) elif group: return self.__call_api_get('groups.moderators', roomName=group, kwargs=kwargs) else: raise RocketMissingParamException('roomId or group required')
['def', 'groups_moderators', '(', 'self', ',', 'room_id', '=', 'None', ',', 'group', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'room_id', ':', 'return', 'self', '.', '__call_api_get', '(', "'groups.moderators'", ',', 'roomId', '=', 'room_id', ',', 'kwargs', '=', 'kwargs', ')', 'elif', 'group', ':', 'return', 'self', '.', '__call_api_get', '(', "'groups.moderators'", ',', 'roomName', '=', 'group', ',', 'kwargs', '=', 'kwargs', ')', 'else', ':', 'raise', 'RocketMissingParamException', '(', "'roomId or group required'", ')']
Lists all moderators of a group.
['Lists', 'all', 'moderators', 'of', 'a', 'group', '.']
train
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L449-L456
2,214
c0fec0de/anytree
anytree/exporter/dictexporter.py
DictExporter.export
def export(self, node): """Export tree starting at `node`.""" attriter = self.attriter or (lambda attr_values: attr_values) return self.__export(node, self.dictcls, attriter, self.childiter)
python
def export(self, node): """Export tree starting at `node`.""" attriter = self.attriter or (lambda attr_values: attr_values) return self.__export(node, self.dictcls, attriter, self.childiter)
['def', 'export', '(', 'self', ',', 'node', ')', ':', 'attriter', '=', 'self', '.', 'attriter', 'or', '(', 'lambda', 'attr_values', ':', 'attr_values', ')', 'return', 'self', '.', '__export', '(', 'node', ',', 'self', '.', 'dictcls', ',', 'attriter', ',', 'self', '.', 'childiter', ')']
Export tree starting at `node`.
['Export', 'tree', 'starting', 'at', 'node', '.']
train
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/exporter/dictexporter.py#L70-L73
2,215
volafiled/python-volapi
volapi/volapi.py
Room.__get_config
def __get_config(self): """ Really connect """ if not self.name: room_resp = self.conn.get(BASE_URL + "/new") room_resp.raise_for_status() url = room_resp.url try: self.name = re.search(r"r/(.+?)$", url).group(1) except Exception: raise IOError("Failed to create room") params = {"room": self.name} if self.key: params["roomKey"] = self.key if self.password: params["password"] = self.password config = self.conn.make_api_call("getRoomConfig", params) if "error" in config: raise RuntimeError( f"Failed to get room config for {self.name}\n" f"{config['error'].get('message') or config['error']}" ) self.config.update(config) self.__add_prop("private") self.__add_prop("title") self.__add_prop("motd") self.__add_prop("adult") self.__add_prop("disabled", True) self.__add_prop("file_ttl", True) return (self.config.room_id, self.config.owner, config["checksum2"])
python
def __get_config(self): """ Really connect """ if not self.name: room_resp = self.conn.get(BASE_URL + "/new") room_resp.raise_for_status() url = room_resp.url try: self.name = re.search(r"r/(.+?)$", url).group(1) except Exception: raise IOError("Failed to create room") params = {"room": self.name} if self.key: params["roomKey"] = self.key if self.password: params["password"] = self.password config = self.conn.make_api_call("getRoomConfig", params) if "error" in config: raise RuntimeError( f"Failed to get room config for {self.name}\n" f"{config['error'].get('message') or config['error']}" ) self.config.update(config) self.__add_prop("private") self.__add_prop("title") self.__add_prop("motd") self.__add_prop("adult") self.__add_prop("disabled", True) self.__add_prop("file_ttl", True) return (self.config.room_id, self.config.owner, config["checksum2"])
['def', '__get_config', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'name', ':', 'room_resp', '=', 'self', '.', 'conn', '.', 'get', '(', 'BASE_URL', '+', '"/new"', ')', 'room_resp', '.', 'raise_for_status', '(', ')', 'url', '=', 'room_resp', '.', 'url', 'try', ':', 'self', '.', 'name', '=', 're', '.', 'search', '(', 'r"r/(.+?)$"', ',', 'url', ')', '.', 'group', '(', '1', ')', 'except', 'Exception', ':', 'raise', 'IOError', '(', '"Failed to create room"', ')', 'params', '=', '{', '"room"', ':', 'self', '.', 'name', '}', 'if', 'self', '.', 'key', ':', 'params', '[', '"roomKey"', ']', '=', 'self', '.', 'key', 'if', 'self', '.', 'password', ':', 'params', '[', '"password"', ']', '=', 'self', '.', 'password', 'config', '=', 'self', '.', 'conn', '.', 'make_api_call', '(', '"getRoomConfig"', ',', 'params', ')', 'if', '"error"', 'in', 'config', ':', 'raise', 'RuntimeError', '(', 'f"Failed to get room config for {self.name}\\n"', 'f"{config[\'error\'].get(\'message\') or config[\'error\']}"', ')', 'self', '.', 'config', '.', 'update', '(', 'config', ')', 'self', '.', '__add_prop', '(', '"private"', ')', 'self', '.', '__add_prop', '(', '"title"', ')', 'self', '.', '__add_prop', '(', '"motd"', ')', 'self', '.', '__add_prop', '(', '"adult"', ')', 'self', '.', '__add_prop', '(', '"disabled"', ',', 'True', ')', 'self', '.', '__add_prop', '(', '"file_ttl"', ',', 'True', ')', 'return', '(', 'self', '.', 'config', '.', 'room_id', ',', 'self', '.', 'config', '.', 'owner', ',', 'config', '[', '"checksum2"', ']', ')']
Really connect
['Really', 'connect']
train
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L442-L471
2,216
keon/algorithms
algorithms/tree/avl/avl.py
AvlTree.update_balances
def update_balances(self, recursive=True): """ Calculate tree balance factor """ if self.node: if recursive: if self.node.left: self.node.left.update_balances() if self.node.right: self.node.right.update_balances() self.balance = self.node.left.height - self.node.right.height else: self.balance = 0
python
def update_balances(self, recursive=True): """ Calculate tree balance factor """ if self.node: if recursive: if self.node.left: self.node.left.update_balances() if self.node.right: self.node.right.update_balances() self.balance = self.node.left.height - self.node.right.height else: self.balance = 0
['def', 'update_balances', '(', 'self', ',', 'recursive', '=', 'True', ')', ':', 'if', 'self', '.', 'node', ':', 'if', 'recursive', ':', 'if', 'self', '.', 'node', '.', 'left', ':', 'self', '.', 'node', '.', 'left', '.', 'update_balances', '(', ')', 'if', 'self', '.', 'node', '.', 'right', ':', 'self', '.', 'node', '.', 'right', '.', 'update_balances', '(', ')', 'self', '.', 'balance', '=', 'self', '.', 'node', '.', 'left', '.', 'height', '-', 'self', '.', 'node', '.', 'right', '.', 'height', 'else', ':', 'self', '.', 'balance', '=', '0']
Calculate tree balance factor
['Calculate', 'tree', 'balance', 'factor']
train
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L72-L86
2,217
pandas-dev/pandas
pandas/io/pytables.py
Table._get_metadata_path
def _get_metadata_path(self, key): """ return the metadata pathname for this key """ return "{group}/meta/{key}/meta".format(group=self.group._v_pathname, key=key)
python
def _get_metadata_path(self, key): """ return the metadata pathname for this key """ return "{group}/meta/{key}/meta".format(group=self.group._v_pathname, key=key)
['def', '_get_metadata_path', '(', 'self', ',', 'key', ')', ':', 'return', '"{group}/meta/{key}/meta"', '.', 'format', '(', 'group', '=', 'self', '.', 'group', '.', '_v_pathname', ',', 'key', '=', 'key', ')']
return the metadata pathname for this key
['return', 'the', 'metadata', 'pathname', 'for', 'this', 'key']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3213-L3216
2,218
Robin8Put/pmes
wallet_manager/handlers/withdraw.py
Withdraw.withdraw_bulk
async def withdraw_bulk(self, *args, **kwargs): """ Withdraw funds requests to user wallet Accepts: - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) - address [string] withdrawal address (in hex for tokens) - amount [int] withdrawal amount multiplied by decimals_k (10**8) Returns dictionary with following fields: - success [bool] """ await self.db.withdraw_requests.insert_one({ 'coinid': kwargs.get("coinid"), 'address': kwargs.get("address"), 'amount': int(kwargs.get("amount")), 'timestamp': datetime.datetime.utcnow() }) return {'success': True}
python
async def withdraw_bulk(self, *args, **kwargs): """ Withdraw funds requests to user wallet Accepts: - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) - address [string] withdrawal address (in hex for tokens) - amount [int] withdrawal amount multiplied by decimals_k (10**8) Returns dictionary with following fields: - success [bool] """ await self.db.withdraw_requests.insert_one({ 'coinid': kwargs.get("coinid"), 'address': kwargs.get("address"), 'amount': int(kwargs.get("amount")), 'timestamp': datetime.datetime.utcnow() }) return {'success': True}
['async', 'def', 'withdraw_bulk', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'await', 'self', '.', 'db', '.', 'withdraw_requests', '.', 'insert_one', '(', '{', "'coinid'", ':', 'kwargs', '.', 'get', '(', '"coinid"', ')', ',', "'address'", ':', 'kwargs', '.', 'get', '(', '"address"', ')', ',', "'amount'", ':', 'int', '(', 'kwargs', '.', 'get', '(', '"amount"', ')', ')', ',', "'timestamp'", ':', 'datetime', '.', 'datetime', '.', 'utcnow', '(', ')', '}', ')', 'return', '{', "'success'", ':', 'True', '}']
Withdraw funds requests to user wallet Accepts: - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) - address [string] withdrawal address (in hex for tokens) - amount [int] withdrawal amount multiplied by decimals_k (10**8) Returns dictionary with following fields: - success [bool]
['Withdraw', 'funds', 'requests', 'to', 'user', 'wallet']
train
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/wallet_manager/handlers/withdraw.py#L156-L174
2,219
twilio/twilio-python
twilio/rest/preview/bulk_exports/export_configuration.py
ExportConfigurationContext.update
def update(self, enabled=values.unset, webhook_url=values.unset, webhook_method=values.unset): """ Update the ExportConfigurationInstance :param bool enabled: The enabled :param unicode webhook_url: The webhook_url :param unicode webhook_method: The webhook_method :returns: Updated ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance """ data = values.of({'Enabled': enabled, 'WebhookUrl': webhook_url, 'WebhookMethod': webhook_method, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ExportConfigurationInstance( self._version, payload, resource_type=self._solution['resource_type'], )
python
def update(self, enabled=values.unset, webhook_url=values.unset, webhook_method=values.unset): """ Update the ExportConfigurationInstance :param bool enabled: The enabled :param unicode webhook_url: The webhook_url :param unicode webhook_method: The webhook_method :returns: Updated ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance """ data = values.of({'Enabled': enabled, 'WebhookUrl': webhook_url, 'WebhookMethod': webhook_method, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ExportConfigurationInstance( self._version, payload, resource_type=self._solution['resource_type'], )
['def', 'update', '(', 'self', ',', 'enabled', '=', 'values', '.', 'unset', ',', 'webhook_url', '=', 'values', '.', 'unset', ',', 'webhook_method', '=', 'values', '.', 'unset', ')', ':', 'data', '=', 'values', '.', 'of', '(', '{', "'Enabled'", ':', 'enabled', ',', "'WebhookUrl'", ':', 'webhook_url', ',', "'WebhookMethod'", ':', 'webhook_method', ',', '}', ')', 'payload', '=', 'self', '.', '_version', '.', 'update', '(', "'POST'", ',', 'self', '.', '_uri', ',', 'data', '=', 'data', ',', ')', 'return', 'ExportConfigurationInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'resource_type', '=', 'self', '.', '_solution', '[', "'resource_type'", ']', ',', ')']
Update the ExportConfigurationInstance :param bool enabled: The enabled :param unicode webhook_url: The webhook_url :param unicode webhook_method: The webhook_method :returns: Updated ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance
['Update', 'the', 'ExportConfigurationInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/bulk_exports/export_configuration.py#L150-L174
2,220
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/poller.py
AsyncPoller.poll
def poll(self): """ Poll the job status. Returns the changes in this iteration.""" self.runner.module_name = 'async_status' self.runner.module_args = "jid=%s" % self.jid self.runner.pattern = "*" self.runner.background = 0 self.runner.inventory.restrict_to(self.hosts_to_poll) results = self.runner.run() self.runner.inventory.lift_restriction() hosts = [] poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}} for (host, res) in results['contacted'].iteritems(): if res.get('started',False): hosts.append(host) poll_results['polled'][host] = res else: self.results['contacted'][host] = res poll_results['contacted'][host] = res if 'failed' in res: self.runner.callbacks.on_async_failed(host, res, self.jid) else: self.runner.callbacks.on_async_ok(host, res, self.jid) for (host, res) in results['dark'].iteritems(): self.results['dark'][host] = res poll_results['dark'][host] = res self.runner.callbacks.on_async_failed(host, res, self.jid) self.hosts_to_poll = hosts if len(hosts)==0: self.completed = True return poll_results
python
def poll(self): """ Poll the job status. Returns the changes in this iteration.""" self.runner.module_name = 'async_status' self.runner.module_args = "jid=%s" % self.jid self.runner.pattern = "*" self.runner.background = 0 self.runner.inventory.restrict_to(self.hosts_to_poll) results = self.runner.run() self.runner.inventory.lift_restriction() hosts = [] poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}} for (host, res) in results['contacted'].iteritems(): if res.get('started',False): hosts.append(host) poll_results['polled'][host] = res else: self.results['contacted'][host] = res poll_results['contacted'][host] = res if 'failed' in res: self.runner.callbacks.on_async_failed(host, res, self.jid) else: self.runner.callbacks.on_async_ok(host, res, self.jid) for (host, res) in results['dark'].iteritems(): self.results['dark'][host] = res poll_results['dark'][host] = res self.runner.callbacks.on_async_failed(host, res, self.jid) self.hosts_to_poll = hosts if len(hosts)==0: self.completed = True return poll_results
['def', 'poll', '(', 'self', ')', ':', 'self', '.', 'runner', '.', 'module_name', '=', "'async_status'", 'self', '.', 'runner', '.', 'module_args', '=', '"jid=%s"', '%', 'self', '.', 'jid', 'self', '.', 'runner', '.', 'pattern', '=', '"*"', 'self', '.', 'runner', '.', 'background', '=', '0', 'self', '.', 'runner', '.', 'inventory', '.', 'restrict_to', '(', 'self', '.', 'hosts_to_poll', ')', 'results', '=', 'self', '.', 'runner', '.', 'run', '(', ')', 'self', '.', 'runner', '.', 'inventory', '.', 'lift_restriction', '(', ')', 'hosts', '=', '[', ']', 'poll_results', '=', '{', "'contacted'", ':', '{', '}', ',', "'dark'", ':', '{', '}', ',', "'polled'", ':', '{', '}', '}', 'for', '(', 'host', ',', 'res', ')', 'in', 'results', '[', "'contacted'", ']', '.', 'iteritems', '(', ')', ':', 'if', 'res', '.', 'get', '(', "'started'", ',', 'False', ')', ':', 'hosts', '.', 'append', '(', 'host', ')', 'poll_results', '[', "'polled'", ']', '[', 'host', ']', '=', 'res', 'else', ':', 'self', '.', 'results', '[', "'contacted'", ']', '[', 'host', ']', '=', 'res', 'poll_results', '[', "'contacted'", ']', '[', 'host', ']', '=', 'res', 'if', "'failed'", 'in', 'res', ':', 'self', '.', 'runner', '.', 'callbacks', '.', 'on_async_failed', '(', 'host', ',', 'res', ',', 'self', '.', 'jid', ')', 'else', ':', 'self', '.', 'runner', '.', 'callbacks', '.', 'on_async_ok', '(', 'host', ',', 'res', ',', 'self', '.', 'jid', ')', 'for', '(', 'host', ',', 'res', ')', 'in', 'results', '[', "'dark'", ']', '.', 'iteritems', '(', ')', ':', 'self', '.', 'results', '[', "'dark'", ']', '[', 'host', ']', '=', 'res', 'poll_results', '[', "'dark'", ']', '[', 'host', ']', '=', 'res', 'self', '.', 'runner', '.', 'callbacks', '.', 'on_async_failed', '(', 'host', ',', 'res', ',', 'self', '.', 'jid', ')', 'self', '.', 'hosts_to_poll', '=', 'hosts', 'if', 'len', '(', 'hosts', ')', '==', '0', ':', 'self', '.', 'completed', '=', 'True', 'return', 'poll_results']
Poll the job status. Returns the changes in this iteration.
['Poll', 'the', 'job', 'status', '.']
train
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/poller.py#L54-L89
2,221
saltstack/salt
salt/modules/schedule.py
modify
def modify(name, **kwargs): ''' Modify an existing job in the schedule CLI Example: .. code-block:: bash salt '*' schedule.modify job1 function='test.ping' seconds=3600 ''' ret = {'comment': '', 'changes': {}, 'result': True} time_conflict = False for item in ['seconds', 'minutes', 'hours', 'days']: if item in kwargs and 'when' in kwargs: time_conflict = True if item in kwargs and 'cron' in kwargs: time_conflict = True if time_conflict: ret['result'] = False ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.' return ret if 'when' in kwargs and 'cron' in kwargs: ret['result'] = False ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.' return ret current_schedule = list_(show_all=True, return_yaml=False) if name not in current_schedule: ret['comment'] = 'Job {0} does not exist in schedule.'.format(name) ret['result'] = False return ret _current = current_schedule[name] if '_seconds' in _current: _current['seconds'] = _current['_seconds'] del _current['_seconds'] _new = build_schedule_item(name, **kwargs) if 'result' in _new and not _new['result']: return _new if _new == _current: ret['comment'] = 'Job {0} in correct state'.format(name) return ret _current_lines = ['{0}:{1}\n'.format(key, value) for (key, value) in sorted(_current.items())] _new_lines = ['{0}:{1}\n'.format(key, value) for (key, value) in sorted(_new.items())] _diff = difflib.unified_diff(_current_lines, _new_lines) ret['changes']['diff'] = ''.join(_diff) if 'test' in kwargs and kwargs['test']: ret['comment'] = 'Job: {0} would be modified in schedule.'.format(name) else: persist = True if 'persist' in kwargs: persist = kwargs['persist'] if name in list_(show_all=True, where='opts', return_yaml=False): event_data = {'name': name, 'schedule': _new, 'func': 'modify', 'persist': persist} elif name in list_(show_all=True, where='pillar', return_yaml=False): event_data = {'name': name, 'schedule': _new, 'where': 'pillar', 'func': 'modify', 'persist': False} out = __salt__['event.fire'](event_data, 'manage_schedule') if out: ret['comment'] = 'Modified job: {0} in schedule.'.format(name) else: ret['comment'] = 'Failed to modify job {0} in schedule.'.format(name) ret['result'] = False return ret
python
def modify(name, **kwargs): ''' Modify an existing job in the schedule CLI Example: .. code-block:: bash salt '*' schedule.modify job1 function='test.ping' seconds=3600 ''' ret = {'comment': '', 'changes': {}, 'result': True} time_conflict = False for item in ['seconds', 'minutes', 'hours', 'days']: if item in kwargs and 'when' in kwargs: time_conflict = True if item in kwargs and 'cron' in kwargs: time_conflict = True if time_conflict: ret['result'] = False ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.' return ret if 'when' in kwargs and 'cron' in kwargs: ret['result'] = False ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.' return ret current_schedule = list_(show_all=True, return_yaml=False) if name not in current_schedule: ret['comment'] = 'Job {0} does not exist in schedule.'.format(name) ret['result'] = False return ret _current = current_schedule[name] if '_seconds' in _current: _current['seconds'] = _current['_seconds'] del _current['_seconds'] _new = build_schedule_item(name, **kwargs) if 'result' in _new and not _new['result']: return _new if _new == _current: ret['comment'] = 'Job {0} in correct state'.format(name) return ret _current_lines = ['{0}:{1}\n'.format(key, value) for (key, value) in sorted(_current.items())] _new_lines = ['{0}:{1}\n'.format(key, value) for (key, value) in sorted(_new.items())] _diff = difflib.unified_diff(_current_lines, _new_lines) ret['changes']['diff'] = ''.join(_diff) if 'test' in kwargs and kwargs['test']: ret['comment'] = 'Job: {0} would be modified in schedule.'.format(name) else: persist = True if 'persist' in kwargs: persist = kwargs['persist'] if name in list_(show_all=True, where='opts', return_yaml=False): event_data = {'name': name, 'schedule': _new, 'func': 'modify', 'persist': persist} elif name in list_(show_all=True, where='pillar', return_yaml=False): event_data = {'name': name, 'schedule': _new, 'where': 'pillar', 'func': 'modify', 'persist': False} out = __salt__['event.fire'](event_data, 'manage_schedule') if out: ret['comment'] = 'Modified job: {0} in schedule.'.format(name) else: ret['comment'] = 'Failed to modify job {0} in schedule.'.format(name) ret['result'] = False return ret
['def', 'modify', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', '{', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'True', '}', 'time_conflict', '=', 'False', 'for', 'item', 'in', '[', "'seconds'", ',', "'minutes'", ',', "'hours'", ',', "'days'", ']', ':', 'if', 'item', 'in', 'kwargs', 'and', "'when'", 'in', 'kwargs', ':', 'time_conflict', '=', 'True', 'if', 'item', 'in', 'kwargs', 'and', "'cron'", 'in', 'kwargs', ':', 'time_conflict', '=', 'True', 'if', 'time_conflict', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', '\'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.\'', 'return', 'ret', 'if', "'when'", 'in', 'kwargs', 'and', "'cron'", 'in', 'kwargs', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', '\'Unable to use "when" and "cron" options together. Ignoring.\'', 'return', 'ret', 'current_schedule', '=', 'list_', '(', 'show_all', '=', 'True', ',', 'return_yaml', '=', 'False', ')', 'if', 'name', 'not', 'in', 'current_schedule', ':', 'ret', '[', "'comment'", ']', '=', "'Job {0} does not exist in schedule.'", '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'False', 'return', 'ret', '_current', '=', 'current_schedule', '[', 'name', ']', 'if', "'_seconds'", 'in', '_current', ':', '_current', '[', "'seconds'", ']', '=', '_current', '[', "'_seconds'", ']', 'del', '_current', '[', "'_seconds'", ']', '_new', '=', 'build_schedule_item', '(', 'name', ',', '*', '*', 'kwargs', ')', 'if', "'result'", 'in', '_new', 'and', 'not', '_new', '[', "'result'", ']', ':', 'return', '_new', 'if', '_new', '==', '_current', ':', 'ret', '[', "'comment'", ']', '=', "'Job {0} in correct state'", '.', 'format', '(', 'name', ')', 'return', 'ret', '_current_lines', '=', '[', "'{0}:{1}\\n'", '.', 'format', '(', 'key', ',', 'value', ')', 'for', '(', 'key', ',', 'value', ')', 'in', 'sorted', '(', '_current', '.', 'items', '(', ')', ')', ']', '_new_lines', '=', '[', "'{0}:{1}\\n'", '.', 'format', '(', 'key', ',', 'value', ')', 'for', '(', 'key', ',', 'value', ')', 'in', 'sorted', '(', '_new', '.', 'items', '(', ')', ')', ']', '_diff', '=', 'difflib', '.', 'unified_diff', '(', '_current_lines', ',', '_new_lines', ')', 'ret', '[', "'changes'", ']', '[', "'diff'", ']', '=', "''", '.', 'join', '(', '_diff', ')', 'if', "'test'", 'in', 'kwargs', 'and', 'kwargs', '[', "'test'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'Job: {0} would be modified in schedule.'", '.', 'format', '(', 'name', ')', 'else', ':', 'persist', '=', 'True', 'if', "'persist'", 'in', 'kwargs', ':', 'persist', '=', 'kwargs', '[', "'persist'", ']', 'if', 'name', 'in', 'list_', '(', 'show_all', '=', 'True', ',', 'where', '=', "'opts'", ',', 'return_yaml', '=', 'False', ')', ':', 'event_data', '=', '{', "'name'", ':', 'name', ',', "'schedule'", ':', '_new', ',', "'func'", ':', "'modify'", ',', "'persist'", ':', 'persist', '}', 'elif', 'name', 'in', 'list_', '(', 'show_all', '=', 'True', ',', 'where', '=', "'pillar'", ',', 'return_yaml', '=', 'False', ')', ':', 'event_data', '=', '{', "'name'", ':', 'name', ',', "'schedule'", ':', '_new', ',', "'where'", ':', "'pillar'", ',', "'func'", ':', "'modify'", ',', "'persist'", ':', 'False', '}', 'out', '=', '__salt__', '[', "'event.fire'", ']', '(', 'event_data', ',', "'manage_schedule'", ')', 'if', 'out', ':', 'ret', '[', "'comment'", ']', '=', "'Modified job: {0} in schedule.'", '.', 'format', '(', 'name', ')', 'else', ':', 'ret', '[', "'comment'", ']', '=', "'Failed to modify job {0} in schedule.'", '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'False', 'return', 'ret']
Modify an existing job in the schedule CLI Example: .. code-block:: bash salt '*' schedule.modify job1 function='test.ping' seconds=3600
['Modify', 'an', 'existing', 'job', 'in', 'the', 'schedule']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/schedule.py#L471-L556
2,222
neo4j-drivers/neobolt
neobolt/impl/python/direct.py
AbstractConnectionPool.close
def close(self): """ Close all connections and empty the pool. This method is thread safe. """ if self._closed: return try: with self.lock: if not self._closed: self._closed = True for address in list(self.connections): self.remove(address) except TypeError as e: pass
python
def close(self): """ Close all connections and empty the pool. This method is thread safe. """ if self._closed: return try: with self.lock: if not self._closed: self._closed = True for address in list(self.connections): self.remove(address) except TypeError as e: pass
['def', 'close', '(', 'self', ')', ':', 'if', 'self', '.', '_closed', ':', 'return', 'try', ':', 'with', 'self', '.', 'lock', ':', 'if', 'not', 'self', '.', '_closed', ':', 'self', '.', '_closed', '=', 'True', 'for', 'address', 'in', 'list', '(', 'self', '.', 'connections', ')', ':', 'self', '.', 'remove', '(', 'address', ')', 'except', 'TypeError', 'as', 'e', ':', 'pass']
Close all connections and empty the pool. This method is thread safe.
['Close', 'all', 'connections', 'and', 'empty', 'the', 'pool', '.', 'This', 'method', 'is', 'thread', 'safe', '.']
train
https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/direct.py#L600-L613
2,223
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
ThumbnailScrollBar.setup_scrollarea
def setup_scrollarea(self): """Setup the scrollarea that will contain the FigureThumbnails.""" self.view = QWidget() self.scene = QGridLayout(self.view) self.scene.setColumnStretch(0, 100) self.scene.setColumnStretch(2, 100) self.scrollarea = QScrollArea() self.scrollarea.setWidget(self.view) self.scrollarea.setWidgetResizable(True) self.scrollarea.setFrameStyle(0) self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)) # Set the vertical scrollbar explicitely : # This is required to avoid a "RuntimeError: no access to protected # functions or signals for objects not created from Python" in Linux. self.scrollarea.setVerticalScrollBar(QScrollBar()) return self.scrollarea
python
def setup_scrollarea(self): """Setup the scrollarea that will contain the FigureThumbnails.""" self.view = QWidget() self.scene = QGridLayout(self.view) self.scene.setColumnStretch(0, 100) self.scene.setColumnStretch(2, 100) self.scrollarea = QScrollArea() self.scrollarea.setWidget(self.view) self.scrollarea.setWidgetResizable(True) self.scrollarea.setFrameStyle(0) self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)) # Set the vertical scrollbar explicitely : # This is required to avoid a "RuntimeError: no access to protected # functions or signals for objects not created from Python" in Linux. self.scrollarea.setVerticalScrollBar(QScrollBar()) return self.scrollarea
['def', 'setup_scrollarea', '(', 'self', ')', ':', 'self', '.', 'view', '=', 'QWidget', '(', ')', 'self', '.', 'scene', '=', 'QGridLayout', '(', 'self', '.', 'view', ')', 'self', '.', 'scene', '.', 'setColumnStretch', '(', '0', ',', '100', ')', 'self', '.', 'scene', '.', 'setColumnStretch', '(', '2', ',', '100', ')', 'self', '.', 'scrollarea', '=', 'QScrollArea', '(', ')', 'self', '.', 'scrollarea', '.', 'setWidget', '(', 'self', '.', 'view', ')', 'self', '.', 'scrollarea', '.', 'setWidgetResizable', '(', 'True', ')', 'self', '.', 'scrollarea', '.', 'setFrameStyle', '(', '0', ')', 'self', '.', 'scrollarea', '.', 'setVerticalScrollBarPolicy', '(', 'Qt', '.', 'ScrollBarAlwaysOff', ')', 'self', '.', 'scrollarea', '.', 'setHorizontalScrollBarPolicy', '(', 'Qt', '.', 'ScrollBarAlwaysOff', ')', 'self', '.', 'scrollarea', '.', 'setSizePolicy', '(', 'QSizePolicy', '(', 'QSizePolicy', '.', 'Ignored', ',', 'QSizePolicy', '.', 'Preferred', ')', ')', '# Set the vertical scrollbar explicitely :', '# This is required to avoid a "RuntimeError: no access to protected', '# functions or signals for objects not created from Python" in Linux.', 'self', '.', 'scrollarea', '.', 'setVerticalScrollBar', '(', 'QScrollBar', '(', ')', ')', 'return', 'self', '.', 'scrollarea']
Setup the scrollarea that will contain the FigureThumbnails.
['Setup', 'the', 'scrollarea', 'that', 'will', 'contain', 'the', 'FigureThumbnails', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L517-L539
2,224
jaraco/path.py
path/__init__.py
Path.glob
def glob(self, pattern): """ Return a list of Path objects that match the pattern. `pattern` - a path relative to this directory, with wildcards. For example, ``Path('/users').glob('*/bin/*')`` returns a list of all the files users have in their :file:`bin` directories. .. seealso:: :func:`glob.glob` .. note:: Glob is **not** recursive, even when using ``**``. To do recursive globbing see :func:`walk`, :func:`walkdirs` or :func:`walkfiles`. """ cls = self._next_class return [cls(s) for s in glob.glob(self / pattern)]
python
def glob(self, pattern): """ Return a list of Path objects that match the pattern. `pattern` - a path relative to this directory, with wildcards. For example, ``Path('/users').glob('*/bin/*')`` returns a list of all the files users have in their :file:`bin` directories. .. seealso:: :func:`glob.glob` .. note:: Glob is **not** recursive, even when using ``**``. To do recursive globbing see :func:`walk`, :func:`walkdirs` or :func:`walkfiles`. """ cls = self._next_class return [cls(s) for s in glob.glob(self / pattern)]
['def', 'glob', '(', 'self', ',', 'pattern', ')', ':', 'cls', '=', 'self', '.', '_next_class', 'return', '[', 'cls', '(', 's', ')', 'for', 's', 'in', 'glob', '.', 'glob', '(', 'self', '/', 'pattern', ')', ']']
Return a list of Path objects that match the pattern. `pattern` - a path relative to this directory, with wildcards. For example, ``Path('/users').glob('*/bin/*')`` returns a list of all the files users have in their :file:`bin` directories. .. seealso:: :func:`glob.glob` .. note:: Glob is **not** recursive, even when using ``**``. To do recursive globbing see :func:`walk`, :func:`walkdirs` or :func:`walkfiles`.
['Return', 'a', 'list', 'of', 'Path', 'objects', 'that', 'match', 'the', 'pattern', '.']
train
https://github.com/jaraco/path.py/blob/bbe7d99e7a64a004f866ace9ec12bd9b296908f5/path/__init__.py#L616-L631
2,225
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
brocade_fcoe_ext.fcoe_get_login_output_fcoe_login_list_fcoe_login_fcoe_interface_name
def fcoe_get_login_output_fcoe_login_list_fcoe_login_fcoe_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login output = ET.SubElement(fcoe_get_login, "output") fcoe_login_list = ET.SubElement(output, "fcoe-login-list") fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac") fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac') fcoe_login_fcoe_interface_name = ET.SubElement(fcoe_login_list, "fcoe-login-fcoe-interface-name") fcoe_login_fcoe_interface_name.text = kwargs.pop('fcoe_login_fcoe_interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def fcoe_get_login_output_fcoe_login_list_fcoe_login_fcoe_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login output = ET.SubElement(fcoe_get_login, "output") fcoe_login_list = ET.SubElement(output, "fcoe-login-list") fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac") fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac') fcoe_login_fcoe_interface_name = ET.SubElement(fcoe_login_list, "fcoe-login-fcoe-interface-name") fcoe_login_fcoe_interface_name.text = kwargs.pop('fcoe_login_fcoe_interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'fcoe_get_login_output_fcoe_login_list_fcoe_login_fcoe_interface_name', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'fcoe_get_login', '=', 'ET', '.', 'Element', '(', '"fcoe_get_login"', ')', 'config', '=', 'fcoe_get_login', 'output', '=', 'ET', '.', 'SubElement', '(', 'fcoe_get_login', ',', '"output"', ')', 'fcoe_login_list', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"fcoe-login-list"', ')', 'fcoe_login_session_mac_key', '=', 'ET', '.', 'SubElement', '(', 'fcoe_login_list', ',', '"fcoe-login-session-mac"', ')', 'fcoe_login_session_mac_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'fcoe_login_session_mac'", ')', 'fcoe_login_fcoe_interface_name', '=', 'ET', '.', 'SubElement', '(', 'fcoe_login_list', ',', '"fcoe-login-fcoe-interface-name"', ')', 'fcoe_login_fcoe_interface_name', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'fcoe_login_fcoe_interface_name'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L640-L654
2,226
openvax/datacache
datacache/download.py
fetch_and_transform
def fetch_and_transform( transformed_filename, transformer, loader, source_filename, source_url, subdir=None): """ Fetch a remote file from `source_url`, save it locally as `source_filename` and then use the `loader` and `transformer` function arguments to turn this saved data into an in-memory object. """ transformed_path = build_path(transformed_filename, subdir) if not os.path.exists(transformed_path): source_path = fetch_file(source_url, source_filename, subdir) logger.info("Generating data file %s from %s", transformed_path, source_path) result = transformer(source_path, transformed_path) else: logger.info("Cached data file: %s", transformed_path) result = loader(transformed_path) assert os.path.exists(transformed_path) return result
python
def fetch_and_transform( transformed_filename, transformer, loader, source_filename, source_url, subdir=None): """ Fetch a remote file from `source_url`, save it locally as `source_filename` and then use the `loader` and `transformer` function arguments to turn this saved data into an in-memory object. """ transformed_path = build_path(transformed_filename, subdir) if not os.path.exists(transformed_path): source_path = fetch_file(source_url, source_filename, subdir) logger.info("Generating data file %s from %s", transformed_path, source_path) result = transformer(source_path, transformed_path) else: logger.info("Cached data file: %s", transformed_path) result = loader(transformed_path) assert os.path.exists(transformed_path) return result
['def', 'fetch_and_transform', '(', 'transformed_filename', ',', 'transformer', ',', 'loader', ',', 'source_filename', ',', 'source_url', ',', 'subdir', '=', 'None', ')', ':', 'transformed_path', '=', 'build_path', '(', 'transformed_filename', ',', 'subdir', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'transformed_path', ')', ':', 'source_path', '=', 'fetch_file', '(', 'source_url', ',', 'source_filename', ',', 'subdir', ')', 'logger', '.', 'info', '(', '"Generating data file %s from %s"', ',', 'transformed_path', ',', 'source_path', ')', 'result', '=', 'transformer', '(', 'source_path', ',', 'transformed_path', ')', 'else', ':', 'logger', '.', 'info', '(', '"Cached data file: %s"', ',', 'transformed_path', ')', 'result', '=', 'loader', '(', 'transformed_path', ')', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'transformed_path', ')', 'return', 'result']
Fetch a remote file from `source_url`, save it locally as `source_filename` and then use the `loader` and `transformer` function arguments to turn this saved data into an in-memory object.
['Fetch', 'a', 'remote', 'file', 'from', 'source_url', 'save', 'it', 'locally', 'as', 'source_filename', 'and', 'then', 'use', 'the', 'loader', 'and', 'transformer', 'function', 'arguments', 'to', 'turn', 'this', 'saved', 'data', 'into', 'an', 'in', '-', 'memory', 'object', '.']
train
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/download.py#L217-L238
2,227
Yelp/detect-secrets
detect_secrets/core/log.py
get_logger
def get_logger(name=None, format_string=None): """ :type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting """ logging.captureWarnings(True) log = logging.getLogger(name) # Bind custom method to instance. # Source: https://stackoverflow.com/a/2982 log.set_debug_level = _set_debug_level.__get__(log) log.set_debug_level(0) if not format_string: format_string = '[%(module)s]\t%(levelname)s\t%(message)s' # Setting up log formats log.handlers = [] handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter(format_string), ) log.addHandler(handler) return log
python
def get_logger(name=None, format_string=None): """ :type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting """ logging.captureWarnings(True) log = logging.getLogger(name) # Bind custom method to instance. # Source: https://stackoverflow.com/a/2982 log.set_debug_level = _set_debug_level.__get__(log) log.set_debug_level(0) if not format_string: format_string = '[%(module)s]\t%(levelname)s\t%(message)s' # Setting up log formats log.handlers = [] handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter(format_string), ) log.addHandler(handler) return log
['def', 'get_logger', '(', 'name', '=', 'None', ',', 'format_string', '=', 'None', ')', ':', 'logging', '.', 'captureWarnings', '(', 'True', ')', 'log', '=', 'logging', '.', 'getLogger', '(', 'name', ')', '# Bind custom method to instance.', '# Source: https://stackoverflow.com/a/2982', 'log', '.', 'set_debug_level', '=', '_set_debug_level', '.', '__get__', '(', 'log', ')', 'log', '.', 'set_debug_level', '(', '0', ')', 'if', 'not', 'format_string', ':', 'format_string', '=', "'[%(module)s]\\t%(levelname)s\\t%(message)s'", '# Setting up log formats', 'log', '.', 'handlers', '=', '[', ']', 'handler', '=', 'logging', '.', 'StreamHandler', '(', 'sys', '.', 'stderr', ')', 'handler', '.', 'setFormatter', '(', 'logging', '.', 'Formatter', '(', 'format_string', ')', ',', ')', 'log', '.', 'addHandler', '(', 'handler', ')', 'return', 'log']
:type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting
[':', 'type', 'name', ':', 'str', ':', 'param', 'name', ':', 'used', 'for', 'declaring', 'log', 'channels', '.']
train
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/log.py#L5-L32
2,228
wbond/asn1crypto
asn1crypto/util.py
extended_datetime.utcoffset
def utcoffset(self): """ :return: None or a datetime.timedelta() of the offset from UTC """ if self.tzinfo is None: return None return self.tzinfo.utcoffset(self.replace(year=2000))
python
def utcoffset(self): """ :return: None or a datetime.timedelta() of the offset from UTC """ if self.tzinfo is None: return None return self.tzinfo.utcoffset(self.replace(year=2000))
['def', 'utcoffset', '(', 'self', ')', ':', 'if', 'self', '.', 'tzinfo', 'is', 'None', ':', 'return', 'None', 'return', 'self', '.', 'tzinfo', '.', 'utcoffset', '(', 'self', '.', 'replace', '(', 'year', '=', '2000', ')', ')']
:return: None or a datetime.timedelta() of the offset from UTC
[':', 'return', ':', 'None', 'or', 'a', 'datetime', '.', 'timedelta', '()', 'of', 'the', 'offset', 'from', 'UTC']
train
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/util.py#L473-L481
2,229
yeasy/hyperledger-py
hyperledger/ssladapter/ssl_match_hostname.py
_dnsname_match
def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False split_dn = dn.split(r'.') leftmost, remainder = split_dn[0], split_dn[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname)
python
def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False split_dn = dn.split(r'.') leftmost, remainder = split_dn[0], split_dn[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname)
['def', '_dnsname_match', '(', 'dn', ',', 'hostname', ',', 'max_wildcards', '=', '1', ')', ':', 'pats', '=', '[', ']', 'if', 'not', 'dn', ':', 'return', 'False', 'split_dn', '=', 'dn', '.', 'split', '(', "r'.'", ')', 'leftmost', ',', 'remainder', '=', 'split_dn', '[', '0', ']', ',', 'split_dn', '[', '1', ':', ']', 'wildcards', '=', 'leftmost', '.', 'count', '(', "'*'", ')', 'if', 'wildcards', '>', 'max_wildcards', ':', '# Issue #17980: avoid denials of service by refusing more', '# than one wildcard per fragment. A survey of established', '# policy among SSL implementations showed it to be a', '# reasonable choice.', 'raise', 'CertificateError', '(', '"too many wildcards in certificate DNS name: "', '+', 'repr', '(', 'dn', ')', ')', '# speed up common case w/o wildcards', 'if', 'not', 'wildcards', ':', 'return', 'dn', '.', 'lower', '(', ')', '==', 'hostname', '.', 'lower', '(', ')', '# RFC 6125, section 6.4.3, subitem 1.', '# The client SHOULD NOT attempt to match a presented identifier in which', '# the wildcard character comprises a label other than the left-most label.', 'if', 'leftmost', '==', "'*'", ':', "# When '*' is a fragment by itself, it matches a non-empty dotless", '# fragment.', 'pats', '.', 'append', '(', "'[^.]+'", ')', 'elif', 'leftmost', '.', 'startswith', '(', "'xn--'", ')', 'or', 'hostname', '.', 'startswith', '(', "'xn--'", ')', ':', '# RFC 6125, section 6.4.3, subitem 3.', '# The client SHOULD NOT attempt to match a presented identifier', '# where the wildcard character is embedded within an A-label or', '# U-label of an internationalized domain name.', 'pats', '.', 'append', '(', 're', '.', 'escape', '(', 'leftmost', ')', ')', 'else', ':', "# Otherwise, '*' matches any dotless string, e.g. www*", 'pats', '.', 'append', '(', 're', '.', 'escape', '(', 'leftmost', ')', '.', 'replace', '(', "r'\\*'", ',', "'[^.]*'", ')', ')', '# add the remaining fragments, ignore any wildcards', 'for', 'frag', 'in', 'remainder', ':', 'pats', '.', 'append', '(', 're', '.', 'escape', '(', 'frag', ')', ')', 'pat', '=', 're', '.', 'compile', '(', "r'\\A'", '+', "r'\\.'", '.', 'join', '(', 'pats', ')', '+', "r'\\Z'", ',', 're', '.', 'IGNORECASE', ')', 'return', 'pat', '.', 'match', '(', 'hostname', ')']
Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3
['Matching', 'according', 'to', 'RFC', '6125', 'section', '6', '.', '4', '.', '3']
train
https://github.com/yeasy/hyperledger-py/blob/f24e9cc409b50628b911950466786be6fe74f09f/hyperledger/ssladapter/ssl_match_hostname.py#L28-L75
2,230
getsentry/raven-python
raven/base.py
Client.captureQuery
def captureQuery(self, query, params=(), engine=None, **kwargs): """ Creates an event for a SQL query. >>> client.captureQuery('SELECT * FROM foo') """ return self.capture( 'raven.events.Query', query=query, params=params, engine=engine, **kwargs)
python
def captureQuery(self, query, params=(), engine=None, **kwargs): """ Creates an event for a SQL query. >>> client.captureQuery('SELECT * FROM foo') """ return self.capture( 'raven.events.Query', query=query, params=params, engine=engine, **kwargs)
['def', 'captureQuery', '(', 'self', ',', 'query', ',', 'params', '=', '(', ')', ',', 'engine', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'capture', '(', "'raven.events.Query'", ',', 'query', '=', 'query', ',', 'params', '=', 'params', ',', 'engine', '=', 'engine', ',', '*', '*', 'kwargs', ')']
Creates an event for a SQL query. >>> client.captureQuery('SELECT * FROM foo')
['Creates', 'an', 'event', 'for', 'a', 'SQL', 'query', '.']
train
https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/base.py#L892-L900
2,231
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
AristaDriver.create_segments
def create_segments(self, segments): """Enqueue segment creates""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
python
def create_segments(self, segments): """Enqueue segment creates""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
['def', 'create_segments', '(', 'self', ',', 'segments', ')', ':', 'for', 'segment', 'in', 'segments', ':', 's_res', '=', 'MechResource', '(', 'segment', '[', "'id'", ']', ',', 'a_const', '.', 'SEGMENT_RESOURCE', ',', 'a_const', '.', 'CREATE', ')', 'self', '.', 'provision_queue', '.', 'put', '(', 's_res', ')']
Enqueue segment creates
['Enqueue', 'segment', 'creates']
train
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L103-L108
2,232
bitprophet/botox
botox/aws.py
AWS.get
def get(self, arg): """ Return instance object with given EC2 ID or nametag. """ try: reservations = self.get_all_instances(filters={'tag:Name': [arg]}) instance = reservations[0].instances[0] except IndexError: try: instance = self.get_all_instances([arg])[0].instances[0] except (_ResponseError, IndexError): # TODO: encapsulate actual exception for debugging err = "Can't find any instance with name or ID '%s'" % arg raise ValueError(err) return instance
python
def get(self, arg): """ Return instance object with given EC2 ID or nametag. """ try: reservations = self.get_all_instances(filters={'tag:Name': [arg]}) instance = reservations[0].instances[0] except IndexError: try: instance = self.get_all_instances([arg])[0].instances[0] except (_ResponseError, IndexError): # TODO: encapsulate actual exception for debugging err = "Can't find any instance with name or ID '%s'" % arg raise ValueError(err) return instance
['def', 'get', '(', 'self', ',', 'arg', ')', ':', 'try', ':', 'reservations', '=', 'self', '.', 'get_all_instances', '(', 'filters', '=', '{', "'tag:Name'", ':', '[', 'arg', ']', '}', ')', 'instance', '=', 'reservations', '[', '0', ']', '.', 'instances', '[', '0', ']', 'except', 'IndexError', ':', 'try', ':', 'instance', '=', 'self', '.', 'get_all_instances', '(', '[', 'arg', ']', ')', '[', '0', ']', '.', 'instances', '[', '0', ']', 'except', '(', '_ResponseError', ',', 'IndexError', ')', ':', '# TODO: encapsulate actual exception for debugging', 'err', '=', '"Can\'t find any instance with name or ID \'%s\'"', '%', 'arg', 'raise', 'ValueError', '(', 'err', ')', 'return', 'instance']
Return instance object with given EC2 ID or nametag.
['Return', 'instance', 'object', 'with', 'given', 'EC2', 'ID', 'or', 'nametag', '.']
train
https://github.com/bitprophet/botox/blob/02c887a28bd2638273548cc7d1e6d6f1d4d38bf9/botox/aws.py#L324-L338
2,233
persephone-tools/persephone
persephone/corpus.py
Corpus.indices_to_labels
def indices_to_labels(self, indices: Sequence[int]) -> List[str]: """ Converts a sequence of indices into their corresponding labels.""" return [(self.INDEX_TO_LABEL[index]) for index in indices]
python
def indices_to_labels(self, indices: Sequence[int]) -> List[str]: """ Converts a sequence of indices into their corresponding labels.""" return [(self.INDEX_TO_LABEL[index]) for index in indices]
['def', 'indices_to_labels', '(', 'self', ',', 'indices', ':', 'Sequence', '[', 'int', ']', ')', '->', 'List', '[', 'str', ']', ':', 'return', '[', '(', 'self', '.', 'INDEX_TO_LABEL', '[', 'index', ']', ')', 'for', 'index', 'in', 'indices', ']']
Converts a sequence of indices into their corresponding labels.
['Converts', 'a', 'sequence', 'of', 'indices', 'into', 'their', 'corresponding', 'labels', '.']
train
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L497-L500
2,234
tensorflow/datasets
tensorflow_datasets/image/cifar10_corrupted.py
Cifar10Corrupted._generate_examples
def _generate_examples(self, data_dir): """Generate corrupted Cifar10 test data. Apply corruptions to the raw images according to self.corruption_type. Args: data_dir: root directory of downloaded dataset Yields: dictionary with image file and label. """ corruption = self.builder_config.corruption severity = self.builder_config.severity images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption]) labels_file = os.path.join(data_dir, _LABELS_FILENAME) with tf.io.gfile.GFile(labels_file, mode='rb') as f: labels = np.load(f) num_images = labels.shape[0] // 5 # Labels are stacked 5 times so we can just read the first iteration labels = labels[:num_images] with tf.io.gfile.GFile(images_file, mode='rb') as f: images = np.load(f) # Slice images corresponding to correct severity level images = images[(severity - 1) * num_images:severity * num_images] for image, label in zip(images, labels): yield { 'image': image, 'label': label, }
python
def _generate_examples(self, data_dir): """Generate corrupted Cifar10 test data. Apply corruptions to the raw images according to self.corruption_type. Args: data_dir: root directory of downloaded dataset Yields: dictionary with image file and label. """ corruption = self.builder_config.corruption severity = self.builder_config.severity images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption]) labels_file = os.path.join(data_dir, _LABELS_FILENAME) with tf.io.gfile.GFile(labels_file, mode='rb') as f: labels = np.load(f) num_images = labels.shape[0] // 5 # Labels are stacked 5 times so we can just read the first iteration labels = labels[:num_images] with tf.io.gfile.GFile(images_file, mode='rb') as f: images = np.load(f) # Slice images corresponding to correct severity level images = images[(severity - 1) * num_images:severity * num_images] for image, label in zip(images, labels): yield { 'image': image, 'label': label, }
['def', '_generate_examples', '(', 'self', ',', 'data_dir', ')', ':', 'corruption', '=', 'self', '.', 'builder_config', '.', 'corruption', 'severity', '=', 'self', '.', 'builder_config', '.', 'severity', 'images_file', '=', 'os', '.', 'path', '.', 'join', '(', 'data_dir', ',', '_CORRUPTIONS_TO_FILENAMES', '[', 'corruption', ']', ')', 'labels_file', '=', 'os', '.', 'path', '.', 'join', '(', 'data_dir', ',', '_LABELS_FILENAME', ')', 'with', 'tf', '.', 'io', '.', 'gfile', '.', 'GFile', '(', 'labels_file', ',', 'mode', '=', "'rb'", ')', 'as', 'f', ':', 'labels', '=', 'np', '.', 'load', '(', 'f', ')', 'num_images', '=', 'labels', '.', 'shape', '[', '0', ']', '//', '5', '# Labels are stacked 5 times so we can just read the first iteration', 'labels', '=', 'labels', '[', ':', 'num_images', ']', 'with', 'tf', '.', 'io', '.', 'gfile', '.', 'GFile', '(', 'images_file', ',', 'mode', '=', "'rb'", ')', 'as', 'f', ':', 'images', '=', 'np', '.', 'load', '(', 'f', ')', '# Slice images corresponding to correct severity level', 'images', '=', 'images', '[', '(', 'severity', '-', '1', ')', '*', 'num_images', ':', 'severity', '*', 'num_images', ']', 'for', 'image', ',', 'label', 'in', 'zip', '(', 'images', ',', 'labels', ')', ':', 'yield', '{', "'image'", ':', 'image', ',', "'label'", ':', 'label', ',', '}']
Generate corrupted Cifar10 test data. Apply corruptions to the raw images according to self.corruption_type. Args: data_dir: root directory of downloaded dataset Yields: dictionary with image file and label.
['Generate', 'corrupted', 'Cifar10', 'test', 'data', '.']
train
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L155-L189
2,235
bwohlberg/sporco
sporco/admm/parcbpdn.py
par_relax_AX
def par_relax_AX(i): """Parallel implementation of relaxation if option ``RelaxParam`` != 1.0. """ global mp_X global mp_Xnr global mp_DX global mp_DXnr mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]] mp_DXnr[i] = mp_DX[i] if mp_rlx != 1.0: grpind = slice(mp_grp[i], mp_grp[i+1]) mp_X[grpind] = mp_rlx * mp_X[grpind] + (1-mp_rlx)*mp_Y1[grpind] mp_DX[i] = mp_rlx*mp_DX[i] + (1-mp_rlx)*mp_Y0[i]
python
def par_relax_AX(i): """Parallel implementation of relaxation if option ``RelaxParam`` != 1.0. """ global mp_X global mp_Xnr global mp_DX global mp_DXnr mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]] mp_DXnr[i] = mp_DX[i] if mp_rlx != 1.0: grpind = slice(mp_grp[i], mp_grp[i+1]) mp_X[grpind] = mp_rlx * mp_X[grpind] + (1-mp_rlx)*mp_Y1[grpind] mp_DX[i] = mp_rlx*mp_DX[i] + (1-mp_rlx)*mp_Y0[i]
['def', 'par_relax_AX', '(', 'i', ')', ':', 'global', 'mp_X', 'global', 'mp_Xnr', 'global', 'mp_DX', 'global', 'mp_DXnr', 'mp_Xnr', '[', 'mp_grp', '[', 'i', ']', ':', 'mp_grp', '[', 'i', '+', '1', ']', ']', '=', 'mp_X', '[', 'mp_grp', '[', 'i', ']', ':', 'mp_grp', '[', 'i', '+', '1', ']', ']', 'mp_DXnr', '[', 'i', ']', '=', 'mp_DX', '[', 'i', ']', 'if', 'mp_rlx', '!=', '1.0', ':', 'grpind', '=', 'slice', '(', 'mp_grp', '[', 'i', ']', ',', 'mp_grp', '[', 'i', '+', '1', ']', ')', 'mp_X', '[', 'grpind', ']', '=', 'mp_rlx', '*', 'mp_X', '[', 'grpind', ']', '+', '(', '1', '-', 'mp_rlx', ')', '*', 'mp_Y1', '[', 'grpind', ']', 'mp_DX', '[', 'i', ']', '=', 'mp_rlx', '*', 'mp_DX', '[', 'i', ']', '+', '(', '1', '-', 'mp_rlx', ')', '*', 'mp_Y0', '[', 'i', ']']
Parallel implementation of relaxation if option ``RelaxParam`` != 1.0.
['Parallel', 'implementation', 'of', 'relaxation', 'if', 'option', 'RelaxParam', '!', '=', '1', '.', '0', '.']
train
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/parcbpdn.py#L163-L177
2,236
log2timeline/plaso
plaso/engine/worker.py
EventExtractionWorker._ExtractMetadataFromFileEntry
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream): """Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream. """ # Do not extract metadata from the root file entry when it is virtual. if file_entry.IsRoot() and file_entry.type_indicator not in ( self._TYPES_WITH_ROOT_METADATA): return # We always want to extract the file entry metadata but we only want # to parse it once per file entry, so we only use it if we are # processing the default data stream of regular files. if data_stream and not data_stream.IsDefault(): return display_name = mediator.GetDisplayName() logger.debug( '[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming('extracting') self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) if self._processing_profiler: self._processing_profiler.StopTiming('extracting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING
python
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream): """Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream. """ # Do not extract metadata from the root file entry when it is virtual. if file_entry.IsRoot() and file_entry.type_indicator not in ( self._TYPES_WITH_ROOT_METADATA): return # We always want to extract the file entry metadata but we only want # to parse it once per file entry, so we only use it if we are # processing the default data stream of regular files. if data_stream and not data_stream.IsDefault(): return display_name = mediator.GetDisplayName() logger.debug( '[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming('extracting') self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) if self._processing_profiler: self._processing_profiler.StopTiming('extracting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING
['def', '_ExtractMetadataFromFileEntry', '(', 'self', ',', 'mediator', ',', 'file_entry', ',', 'data_stream', ')', ':', '# Do not extract metadata from the root file entry when it is virtual.', 'if', 'file_entry', '.', 'IsRoot', '(', ')', 'and', 'file_entry', '.', 'type_indicator', 'not', 'in', '(', 'self', '.', '_TYPES_WITH_ROOT_METADATA', ')', ':', 'return', '# We always want to extract the file entry metadata but we only want', '# to parse it once per file entry, so we only use it if we are', '# processing the default data stream of regular files.', 'if', 'data_stream', 'and', 'not', 'data_stream', '.', 'IsDefault', '(', ')', ':', 'return', 'display_name', '=', 'mediator', '.', 'GetDisplayName', '(', ')', 'logger', '.', 'debug', '(', "'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'", '.', 'format', '(', 'display_name', ')', ')', 'self', '.', 'processing_status', '=', 'definitions', '.', 'STATUS_INDICATOR_EXTRACTING', 'if', 'self', '.', '_processing_profiler', ':', 'self', '.', '_processing_profiler', '.', 'StartTiming', '(', "'extracting'", ')', 'self', '.', '_event_extractor', '.', 'ParseFileEntryMetadata', '(', 'mediator', ',', 'file_entry', ')', 'if', 'self', '.', '_processing_profiler', ':', 'self', '.', '_processing_profiler', '.', 'StopTiming', '(', "'extracting'", ')', 'self', '.', 'processing_status', '=', 'definitions', '.', 'STATUS_INDICATOR_RUNNING']
Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
['Extracts', 'metadata', 'from', 'a', 'file', 'entry', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/worker.py#L338-L374
2,237
ktbyers/netmiko
netmiko/citrix/netscaler_ssh.py
NetscalerSSH.strip_prompt
def strip_prompt(self, a_string): """ Strip 'Done' from command output """ output = super(NetscalerSSH, self).strip_prompt(a_string) lines = output.split(self.RESPONSE_RETURN) if "Done" in lines[-1]: return self.RESPONSE_RETURN.join(lines[:-1]) else: return output
python
def strip_prompt(self, a_string): """ Strip 'Done' from command output """ output = super(NetscalerSSH, self).strip_prompt(a_string) lines = output.split(self.RESPONSE_RETURN) if "Done" in lines[-1]: return self.RESPONSE_RETURN.join(lines[:-1]) else: return output
['def', 'strip_prompt', '(', 'self', ',', 'a_string', ')', ':', 'output', '=', 'super', '(', 'NetscalerSSH', ',', 'self', ')', '.', 'strip_prompt', '(', 'a_string', ')', 'lines', '=', 'output', '.', 'split', '(', 'self', '.', 'RESPONSE_RETURN', ')', 'if', '"Done"', 'in', 'lines', '[', '-', '1', ']', ':', 'return', 'self', '.', 'RESPONSE_RETURN', '.', 'join', '(', 'lines', '[', ':', '-', '1', ']', ')', 'else', ':', 'return', 'output']
Strip 'Done' from command output
['Strip', 'Done', 'from', 'command', 'output']
train
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/citrix/netscaler_ssh.py#L53-L60
2,238
tompollard/tableone
tableone.py
TableOne._create_significance_table
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
python
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
['def', '_create_significance_table', '(', 'self', ',', 'data', ')', ':', '# list features of the variable e.g. matched, paired, n_expected', 'df', '=', 'pd', '.', 'DataFrame', '(', 'index', '=', 'self', '.', '_continuous', '+', 'self', '.', '_categorical', ',', 'columns', '=', '[', "'continuous'", ',', "'nonnormal'", ',', "'min_observed'", ',', "'pval'", ',', "'ptest'", ']', ')', 'df', '.', 'index', '.', 'rename', '(', "'variable'", ',', 'inplace', '=', 'True', ')', 'df', '[', "'continuous'", ']', '=', 'np', '.', 'where', '(', 'df', '.', 'index', '.', 'isin', '(', 'self', '.', '_continuous', ')', ',', 'True', ',', 'False', ')', 'df', '[', "'nonnormal'", ']', '=', 'np', '.', 'where', '(', 'df', '.', 'index', '.', 'isin', '(', 'self', '.', '_nonnormal', ')', ',', 'True', ',', 'False', ')', '# list values for each variable, grouped by groupby levels', 'for', 'v', 'in', 'df', '.', 'index', ':', 'is_continuous', '=', 'df', '.', 'loc', '[', 'v', ']', '[', "'continuous'", ']', 'is_categorical', '=', '~', 'df', '.', 'loc', '[', 'v', ']', '[', "'continuous'", ']', 'is_normal', '=', '~', 'df', '.', 'loc', '[', 'v', ']', '[', "'nonnormal'", ']', '# if continuous, group data into list of lists', 'if', 'is_continuous', ':', 'catlevels', '=', 'None', 'grouped_data', '=', '[', ']', 'for', 's', 'in', 'self', '.', '_groupbylvls', ':', 'lvl_data', '=', 'data', '.', 'loc', '[', 'data', '[', 'self', '.', '_groupby', ']', '==', 's', ',', 'v', ']', '# coerce to numeric and drop non-numeric data', 'lvl_data', '=', 'lvl_data', '.', 'apply', '(', 'pd', '.', 'to_numeric', ',', 'errors', '=', "'coerce'", ')', '.', 'dropna', '(', ')', '# append to overall group data', 'grouped_data', '.', 'append', '(', 'lvl_data', '.', 'values', ')', 'min_observed', '=', 'len', '(', 'min', '(', 'grouped_data', ',', 'key', '=', 'len', ')', ')', '# if categorical, create contingency table', 'elif', 'is_categorical', ':', 'catlevels', '=', 'sorted', '(', 'data', '[', 'v', ']', '.', 'astype', '(', "'category'", ')', '.', 'cat', '.', 'categories', ')', 'grouped_data', '=', 'pd', '.', 'crosstab', '(', 'data', '[', 'self', '.', '_groupby', ']', '.', 'rename', '(', "'_groupby_var_'", ')', ',', 'data', '[', 'v', ']', ')', 'min_observed', '=', 'grouped_data', '.', 'sum', '(', 'axis', '=', '1', ')', '.', 'min', '(', ')', '# minimum number of observations across all levels', 'df', '.', 'loc', '[', 'v', ',', "'min_observed'", ']', '=', 'min_observed', '# compute pvalues', 'df', '.', 'loc', '[', 'v', ',', "'pval'", ']', ',', 'df', '.', 'loc', '[', 'v', ',', "'ptest'", ']', '=', 'self', '.', '_p_test', '(', 'v', ',', 'grouped_data', ',', 'is_continuous', ',', 'is_categorical', ',', 'is_normal', ',', 'min_observed', ',', 'catlevels', ')', 'return', 'df']
Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc.
['Create', 'a', 'table', 'containing', 'p', '-', 'values', 'for', 'significance', 'tests', '.', 'Add', 'features', 'of', 'the', 'distributions', 'and', 'the', 'p', '-', 'values', 'to', 'the', 'dataframe', '.']
train
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L518-L572
2,239
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based_params.py
rlmb_tiny_stochastic
def rlmb_tiny_stochastic(): """Tiny setting with a stochastic next-frame model.""" hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_basic_stochastic" return hparams
python
def rlmb_tiny_stochastic(): """Tiny setting with a stochastic next-frame model.""" hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_basic_stochastic" return hparams
['def', 'rlmb_tiny_stochastic', '(', ')', ':', 'hparams', '=', 'rlmb_ppo_tiny', '(', ')', 'hparams', '.', 'epochs', '=', '1', '# Too slow with 2 for regular runs.', 'hparams', '.', 'generative_model', '=', '"next_frame_basic_stochastic"', 'hparams', '.', 'generative_model_params', '=', '"next_frame_basic_stochastic"', 'return', 'hparams']
Tiny setting with a stochastic next-frame model.
['Tiny', 'setting', 'with', 'a', 'stochastic', 'next', '-', 'frame', 'model', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L596-L602
2,240
dcos/shakedown
shakedown/cli/main.py
cli
def cli(**args): """ Shakedown is a DC/OS test-harness wrapper for the pytest tool. """ import shakedown # Read configuration options from ~/.shakedown (if exists) args = read_config(args) # Set configuration defaults args = set_config_defaults(args) if args['quiet']: shakedown.cli.quiet = True if not args['dcos_url']: try: args['dcos_url'] = dcos_url() except: click.secho('error: cluster URL not set, use --dcos-url or see --help for more information.', fg='red', bold=True) sys.exit(1) if not args['dcos_url']: click.secho('error: --dcos-url is a required option; see --help for more information.', fg='red', bold=True) sys.exit(1) if args['ssh_key_file']: shakedown.cli.ssh_key_file = args['ssh_key_file'] if args['ssh_user']: shakedown.cli.ssh_user = args['ssh_user'] if not args['no_banner']: echo(banner(), n=False) echo('Running pre-flight checks...', d='step-maj') # required modules and their 'version' method imported = {} requirements = { 'pytest': '__version__', 'dcos': 'version' } for req in requirements: ver = requirements[req] echo("Checking for {} library...".format(req), d='step-min', n=False) try: imported[req] = importlib.import_module(req, package=None) except ImportError: click.secho("error: {p} is not installed; run 'pip install {p}'.".format(p=req), fg='red', bold=True) sys.exit(1) echo(getattr(imported[req], requirements[req])) if shakedown.attach_cluster(args['dcos_url']): echo('Checking DC/OS cluster version...', d='step-min', n=False) echo(shakedown.dcos_version()) else: with imported['dcos'].cluster.setup_directory() as temp_path: imported['dcos'].cluster.set_attached(temp_path) imported['dcos'].config.set_val('core.dcos_url', args['dcos_url']) if args['ssl_no_verify']: imported['dcos'].config.set_val('core.ssl_verify', 'False') try: imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False) except: echo('Authenticating with DC/OS cluster...', d='step-min') authenticated = False token = imported['dcos'].config.get_config_val("core.dcos_acs_token") if token is not None: echo('trying existing ACS token...', d='step-min', n=False) try: shakedown.dcos_leader() authenticated = True echo(fchr('PP'), d='pass') except imported['dcos'].errors.DCOSException: echo(fchr('FF'), d='fail') if not authenticated and args['oauth_token']: try: echo('trying OAuth token...', d='item-maj', n=False) token = shakedown.authenticate_oauth(args['oauth_token']) with stdchannel_redirected(sys.stderr, os.devnull): imported['dcos'].config.set_val('core.dcos_acs_token', token) authenticated = True echo(fchr('PP'), d='pass') except: echo(fchr('FF'), d='fail') if not authenticated and args['username'] and args['password']: try: echo('trying username and password...', d='item-maj', n=False) token = shakedown.authenticate(args['username'], args['password']) with stdchannel_redirected(sys.stderr, os.devnull): imported['dcos'].config.set_val('core.dcos_acs_token', token) authenticated = True echo(fchr('PP'), d='pass') except: echo(fchr('FF'), d='fail') if authenticated: imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False) echo('Checking DC/OS cluster version...', d='step-min', n=False) echo(shakedown.dcos_version()) else: click.secho("error: no authentication credentials or token found.", fg='red', bold=True) sys.exit(1) class shakedown: """ This encapsulates a PyTest wrapper plugin """ state = {} stdout = [] tests = { 'file': {}, 'test': {} } report_stats = { 'passed':[], 'skipped':[], 'failed':[], 'total_passed':0, 'total_skipped':0, 'total_failed':0, } def output(title, state, text, status=True): """ Capture and display stdout/stderr output :param title: the title of the output box (eg. test name) :type title: str :param state: state of the result (pass, fail) :type state: str :param text: the stdout/stderr output :type text: str :param status: whether to output a status marker :type status: bool """ if state == 'fail': schr = fchr('FF') elif state == 'pass': schr = fchr('PP') elif state == 'skip': schr = fchr('SK') else: schr = '' if status: if not args['stdout_inline']: if state == 'fail': echo(schr, d='fail') elif state == 'pass': echo(schr, d='pass') else: if not text: if state == 'fail': echo(schr, d='fail') elif state == 'pass': if '::' in title: echo(title.split('::')[-1], d='item-min', n=False) echo(schr, d='pass') if text and args['stdout'] in [state, 'all']: o = decorate(schr + ': ', 'quote-head-' + state) o += click.style(decorate(title, style=state), bold=True) + "\n" o += decorate(str(text).strip(), style='quote-' + state) if args['stdout_inline']: echo(o) else: shakedown.stdout.append(o) def pytest_collectreport(self, report): """ Collect and validate individual test files """ if not 'collect' in shakedown.state: shakedown.state['collect'] = 1 echo('Collecting and validating test files...', d='step-min') if report.nodeid: echo(report.nodeid, d='item-maj', n=False) state = None if report.failed: state = 'fail' if report.passed: state = 'pass' if report.skipped: state = 'skip' if state: if report.longrepr: shakedown.output(report.nodeid, state, report.longrepr) else: shakedown.output(report.nodeid, state, None) def pytest_sessionstart(self): """ Tests have been collected, begin running them... """ echo('Initiating testing phase...', d='step-maj') def pytest_report_teststatus(self, report): """ Print report results to the console as they are run """ try: report_file, report_test = report.nodeid.split('::', 1) except ValueError: return if not 'test' in shakedown.state: shakedown.state['test'] = 1 echo('Running individual tests...', d='step-min') if not report_file in shakedown.tests['file']: shakedown.tests['file'][report_file] = 1 echo(report_file, d='item-maj') if not report.nodeid in shakedown.tests['test']: shakedown.tests['test'][report.nodeid] = {} if args['stdout_inline']: echo('') echo(report_test + ':', d='item-min') else: echo(report_test, d='item-min', n=False) if report.failed: shakedown.tests['test'][report.nodeid]['fail'] = True if report.when == 'teardown' and not 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, 'pass', None) # Suppress excess terminal output return report.outcome, None, None def pytest_runtest_logreport(self, report): """ Log the [stdout, stderr] results of tests if desired """ state = None for secname, content in report.sections: if report.failed: state = 'fail' if report.passed: state = 'pass' if report.skipped: state = 'skip' if state and secname != 'Captured stdout call': module = report.nodeid.split('::', 1)[0] cap_type = secname.split(' ')[-1] if not 'setup' in shakedown.tests['test'][report.nodeid]: shakedown.tests['test'][report.nodeid]['setup'] = True shakedown.output(module + ' ' + cap_type, state, content, False) elif cap_type == 'teardown': shakedown.output(module + ' ' + cap_type, state, content, False) elif state and report.when == 'call': if 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, state, content, False) else: shakedown.tests['test'][report.nodeid]['tested'] = True shakedown.output(report.nodeid, state, content) # Capture execution crashes if hasattr(report.longrepr, 'reprcrash'): longreport = report.longrepr if 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash), False) else: shakedown.tests['test'][report.nodeid]['tested'] = True shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash)) def pytest_sessionfinish(self, session, exitstatus): """ Testing phase is complete; print extra reports (stdout/stderr, JSON) as requested """ echo('Test phase completed.', d='step-maj') if ('stdout' in args and args['stdout']) and shakedown.stdout: for output in shakedown.stdout: echo(output) opts = ['-q', '--tb=no', "--timeout={}".format(args['timeout'])] if args['fail'] == 'fast': opts.append('-x') if args['pytest_option']: for opt in args['pytest_option']: opts.append(opt) if args['stdout_inline']: opts.append('-s') if args['tests']: tests_to_run = [] for test in args['tests']: tests_to_run.extend(test.split()) for test in tests_to_run: opts.append(test) exitstatus = imported['pytest'].main(opts, plugins=[shakedown()]) sys.exit(exitstatus)
python
def cli(**args): """ Shakedown is a DC/OS test-harness wrapper for the pytest tool. """ import shakedown # Read configuration options from ~/.shakedown (if exists) args = read_config(args) # Set configuration defaults args = set_config_defaults(args) if args['quiet']: shakedown.cli.quiet = True if not args['dcos_url']: try: args['dcos_url'] = dcos_url() except: click.secho('error: cluster URL not set, use --dcos-url or see --help for more information.', fg='red', bold=True) sys.exit(1) if not args['dcos_url']: click.secho('error: --dcos-url is a required option; see --help for more information.', fg='red', bold=True) sys.exit(1) if args['ssh_key_file']: shakedown.cli.ssh_key_file = args['ssh_key_file'] if args['ssh_user']: shakedown.cli.ssh_user = args['ssh_user'] if not args['no_banner']: echo(banner(), n=False) echo('Running pre-flight checks...', d='step-maj') # required modules and their 'version' method imported = {} requirements = { 'pytest': '__version__', 'dcos': 'version' } for req in requirements: ver = requirements[req] echo("Checking for {} library...".format(req), d='step-min', n=False) try: imported[req] = importlib.import_module(req, package=None) except ImportError: click.secho("error: {p} is not installed; run 'pip install {p}'.".format(p=req), fg='red', bold=True) sys.exit(1) echo(getattr(imported[req], requirements[req])) if shakedown.attach_cluster(args['dcos_url']): echo('Checking DC/OS cluster version...', d='step-min', n=False) echo(shakedown.dcos_version()) else: with imported['dcos'].cluster.setup_directory() as temp_path: imported['dcos'].cluster.set_attached(temp_path) imported['dcos'].config.set_val('core.dcos_url', args['dcos_url']) if args['ssl_no_verify']: imported['dcos'].config.set_val('core.ssl_verify', 'False') try: imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False) except: echo('Authenticating with DC/OS cluster...', d='step-min') authenticated = False token = imported['dcos'].config.get_config_val("core.dcos_acs_token") if token is not None: echo('trying existing ACS token...', d='step-min', n=False) try: shakedown.dcos_leader() authenticated = True echo(fchr('PP'), d='pass') except imported['dcos'].errors.DCOSException: echo(fchr('FF'), d='fail') if not authenticated and args['oauth_token']: try: echo('trying OAuth token...', d='item-maj', n=False) token = shakedown.authenticate_oauth(args['oauth_token']) with stdchannel_redirected(sys.stderr, os.devnull): imported['dcos'].config.set_val('core.dcos_acs_token', token) authenticated = True echo(fchr('PP'), d='pass') except: echo(fchr('FF'), d='fail') if not authenticated and args['username'] and args['password']: try: echo('trying username and password...', d='item-maj', n=False) token = shakedown.authenticate(args['username'], args['password']) with stdchannel_redirected(sys.stderr, os.devnull): imported['dcos'].config.set_val('core.dcos_acs_token', token) authenticated = True echo(fchr('PP'), d='pass') except: echo(fchr('FF'), d='fail') if authenticated: imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False) echo('Checking DC/OS cluster version...', d='step-min', n=False) echo(shakedown.dcos_version()) else: click.secho("error: no authentication credentials or token found.", fg='red', bold=True) sys.exit(1) class shakedown: """ This encapsulates a PyTest wrapper plugin """ state = {} stdout = [] tests = { 'file': {}, 'test': {} } report_stats = { 'passed':[], 'skipped':[], 'failed':[], 'total_passed':0, 'total_skipped':0, 'total_failed':0, } def output(title, state, text, status=True): """ Capture and display stdout/stderr output :param title: the title of the output box (eg. test name) :type title: str :param state: state of the result (pass, fail) :type state: str :param text: the stdout/stderr output :type text: str :param status: whether to output a status marker :type status: bool """ if state == 'fail': schr = fchr('FF') elif state == 'pass': schr = fchr('PP') elif state == 'skip': schr = fchr('SK') else: schr = '' if status: if not args['stdout_inline']: if state == 'fail': echo(schr, d='fail') elif state == 'pass': echo(schr, d='pass') else: if not text: if state == 'fail': echo(schr, d='fail') elif state == 'pass': if '::' in title: echo(title.split('::')[-1], d='item-min', n=False) echo(schr, d='pass') if text and args['stdout'] in [state, 'all']: o = decorate(schr + ': ', 'quote-head-' + state) o += click.style(decorate(title, style=state), bold=True) + "\n" o += decorate(str(text).strip(), style='quote-' + state) if args['stdout_inline']: echo(o) else: shakedown.stdout.append(o) def pytest_collectreport(self, report): """ Collect and validate individual test files """ if not 'collect' in shakedown.state: shakedown.state['collect'] = 1 echo('Collecting and validating test files...', d='step-min') if report.nodeid: echo(report.nodeid, d='item-maj', n=False) state = None if report.failed: state = 'fail' if report.passed: state = 'pass' if report.skipped: state = 'skip' if state: if report.longrepr: shakedown.output(report.nodeid, state, report.longrepr) else: shakedown.output(report.nodeid, state, None) def pytest_sessionstart(self): """ Tests have been collected, begin running them... """ echo('Initiating testing phase...', d='step-maj') def pytest_report_teststatus(self, report): """ Print report results to the console as they are run """ try: report_file, report_test = report.nodeid.split('::', 1) except ValueError: return if not 'test' in shakedown.state: shakedown.state['test'] = 1 echo('Running individual tests...', d='step-min') if not report_file in shakedown.tests['file']: shakedown.tests['file'][report_file] = 1 echo(report_file, d='item-maj') if not report.nodeid in shakedown.tests['test']: shakedown.tests['test'][report.nodeid] = {} if args['stdout_inline']: echo('') echo(report_test + ':', d='item-min') else: echo(report_test, d='item-min', n=False) if report.failed: shakedown.tests['test'][report.nodeid]['fail'] = True if report.when == 'teardown' and not 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, 'pass', None) # Suppress excess terminal output return report.outcome, None, None def pytest_runtest_logreport(self, report): """ Log the [stdout, stderr] results of tests if desired """ state = None for secname, content in report.sections: if report.failed: state = 'fail' if report.passed: state = 'pass' if report.skipped: state = 'skip' if state and secname != 'Captured stdout call': module = report.nodeid.split('::', 1)[0] cap_type = secname.split(' ')[-1] if not 'setup' in shakedown.tests['test'][report.nodeid]: shakedown.tests['test'][report.nodeid]['setup'] = True shakedown.output(module + ' ' + cap_type, state, content, False) elif cap_type == 'teardown': shakedown.output(module + ' ' + cap_type, state, content, False) elif state and report.when == 'call': if 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, state, content, False) else: shakedown.tests['test'][report.nodeid]['tested'] = True shakedown.output(report.nodeid, state, content) # Capture execution crashes if hasattr(report.longrepr, 'reprcrash'): longreport = report.longrepr if 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash), False) else: shakedown.tests['test'][report.nodeid]['tested'] = True shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash)) def pytest_sessionfinish(self, session, exitstatus): """ Testing phase is complete; print extra reports (stdout/stderr, JSON) as requested """ echo('Test phase completed.', d='step-maj') if ('stdout' in args and args['stdout']) and shakedown.stdout: for output in shakedown.stdout: echo(output) opts = ['-q', '--tb=no', "--timeout={}".format(args['timeout'])] if args['fail'] == 'fast': opts.append('-x') if args['pytest_option']: for opt in args['pytest_option']: opts.append(opt) if args['stdout_inline']: opts.append('-s') if args['tests']: tests_to_run = [] for test in args['tests']: tests_to_run.extend(test.split()) for test in tests_to_run: opts.append(test) exitstatus = imported['pytest'].main(opts, plugins=[shakedown()]) sys.exit(exitstatus)
['def', 'cli', '(', '*', '*', 'args', ')', ':', 'import', 'shakedown', '# Read configuration options from ~/.shakedown (if exists)', 'args', '=', 'read_config', '(', 'args', ')', '# Set configuration defaults', 'args', '=', 'set_config_defaults', '(', 'args', ')', 'if', 'args', '[', "'quiet'", ']', ':', 'shakedown', '.', 'cli', '.', 'quiet', '=', 'True', 'if', 'not', 'args', '[', "'dcos_url'", ']', ':', 'try', ':', 'args', '[', "'dcos_url'", ']', '=', 'dcos_url', '(', ')', 'except', ':', 'click', '.', 'secho', '(', "'error: cluster URL not set, use --dcos-url or see --help for more information.'", ',', 'fg', '=', "'red'", ',', 'bold', '=', 'True', ')', 'sys', '.', 'exit', '(', '1', ')', 'if', 'not', 'args', '[', "'dcos_url'", ']', ':', 'click', '.', 'secho', '(', "'error: --dcos-url is a required option; see --help for more information.'", ',', 'fg', '=', "'red'", ',', 'bold', '=', 'True', ')', 'sys', '.', 'exit', '(', '1', ')', 'if', 'args', '[', "'ssh_key_file'", ']', ':', 'shakedown', '.', 'cli', '.', 'ssh_key_file', '=', 'args', '[', "'ssh_key_file'", ']', 'if', 'args', '[', "'ssh_user'", ']', ':', 'shakedown', '.', 'cli', '.', 'ssh_user', '=', 'args', '[', "'ssh_user'", ']', 'if', 'not', 'args', '[', "'no_banner'", ']', ':', 'echo', '(', 'banner', '(', ')', ',', 'n', '=', 'False', ')', 'echo', '(', "'Running pre-flight checks...'", ',', 'd', '=', "'step-maj'", ')', "# required modules and their 'version' method", 'imported', '=', '{', '}', 'requirements', '=', '{', "'pytest'", ':', "'__version__'", ',', "'dcos'", ':', "'version'", '}', 'for', 'req', 'in', 'requirements', ':', 'ver', '=', 'requirements', '[', 'req', ']', 'echo', '(', '"Checking for {} library..."', '.', 'format', '(', 'req', ')', ',', 'd', '=', "'step-min'", ',', 'n', '=', 'False', ')', 'try', ':', 'imported', '[', 'req', ']', '=', 'importlib', '.', 'import_module', '(', 'req', ',', 'package', '=', 'None', ')', 'except', 'ImportError', ':', 'click', '.', 'secho', '(', '"error: {p} is not installed; run \'pip install {p}\'."', '.', 'format', '(', 'p', '=', 'req', ')', ',', 'fg', '=', "'red'", ',', 'bold', '=', 'True', ')', 'sys', '.', 'exit', '(', '1', ')', 'echo', '(', 'getattr', '(', 'imported', '[', 'req', ']', ',', 'requirements', '[', 'req', ']', ')', ')', 'if', 'shakedown', '.', 'attach_cluster', '(', 'args', '[', "'dcos_url'", ']', ')', ':', 'echo', '(', "'Checking DC/OS cluster version...'", ',', 'd', '=', "'step-min'", ',', 'n', '=', 'False', ')', 'echo', '(', 'shakedown', '.', 'dcos_version', '(', ')', ')', 'else', ':', 'with', 'imported', '[', "'dcos'", ']', '.', 'cluster', '.', 'setup_directory', '(', ')', 'as', 'temp_path', ':', 'imported', '[', "'dcos'", ']', '.', 'cluster', '.', 'set_attached', '(', 'temp_path', ')', 'imported', '[', "'dcos'", ']', '.', 'config', '.', 'set_val', '(', "'core.dcos_url'", ',', 'args', '[', "'dcos_url'", ']', ')', 'if', 'args', '[', "'ssl_no_verify'", ']', ':', 'imported', '[', "'dcos'", ']', '.', 'config', '.', 'set_val', '(', "'core.ssl_verify'", ',', "'False'", ')', 'try', ':', 'imported', '[', "'dcos'", ']', '.', 'cluster', '.', 'setup_cluster_config', '(', 'args', '[', "'dcos_url'", ']', ',', 'temp_path', ',', 'False', ')', 'except', ':', 'echo', '(', "'Authenticating with DC/OS cluster...'", ',', 'd', '=', "'step-min'", ')', 'authenticated', '=', 'False', 'token', '=', 'imported', '[', "'dcos'", ']', '.', 'config', '.', 'get_config_val', '(', '"core.dcos_acs_token"', ')', 'if', 'token', 'is', 'not', 'None', ':', 'echo', '(', "'trying existing ACS token...'", ',', 'd', '=', "'step-min'", ',', 'n', '=', 'False', ')', 'try', ':', 'shakedown', '.', 'dcos_leader', '(', ')', 'authenticated', '=', 'True', 'echo', '(', 'fchr', '(', "'PP'", ')', ',', 'd', '=', "'pass'", ')', 'except', 'imported', '[', "'dcos'", ']', '.', 'errors', '.', 'DCOSException', ':', 'echo', '(', 'fchr', '(', "'FF'", ')', ',', 'd', '=', "'fail'", ')', 'if', 'not', 'authenticated', 'and', 'args', '[', "'oauth_token'", ']', ':', 'try', ':', 'echo', '(', "'trying OAuth token...'", ',', 'd', '=', "'item-maj'", ',', 'n', '=', 'False', ')', 'token', '=', 'shakedown', '.', 'authenticate_oauth', '(', 'args', '[', "'oauth_token'", ']', ')', 'with', 'stdchannel_redirected', '(', 'sys', '.', 'stderr', ',', 'os', '.', 'devnull', ')', ':', 'imported', '[', "'dcos'", ']', '.', 'config', '.', 'set_val', '(', "'core.dcos_acs_token'", ',', 'token', ')', 'authenticated', '=', 'True', 'echo', '(', 'fchr', '(', "'PP'", ')', ',', 'd', '=', "'pass'", ')', 'except', ':', 'echo', '(', 'fchr', '(', "'FF'", ')', ',', 'd', '=', "'fail'", ')', 'if', 'not', 'authenticated', 'and', 'args', '[', "'username'", ']', 'and', 'args', '[', "'password'", ']', ':', 'try', ':', 'echo', '(', "'trying username and password...'", ',', 'd', '=', "'item-maj'", ',', 'n', '=', 'False', ')', 'token', '=', 'shakedown', '.', 'authenticate', '(', 'args', '[', "'username'", ']', ',', 'args', '[', "'password'", ']', ')', 'with', 'stdchannel_redirected', '(', 'sys', '.', 'stderr', ',', 'os', '.', 'devnull', ')', ':', 'imported', '[', "'dcos'", ']', '.', 'config', '.', 'set_val', '(', "'core.dcos_acs_token'", ',', 'token', ')', 'authenticated', '=', 'True', 'echo', '(', 'fchr', '(', "'PP'", ')', ',', 'd', '=', "'pass'", ')', 'except', ':', 'echo', '(', 'fchr', '(', "'FF'", ')', ',', 'd', '=', "'fail'", ')', 'if', 'authenticated', ':', 'imported', '[', "'dcos'", ']', '.', 'cluster', '.', 'setup_cluster_config', '(', 'args', '[', "'dcos_url'", ']', ',', 'temp_path', ',', 'False', ')', 'echo', '(', "'Checking DC/OS cluster version...'", ',', 'd', '=', "'step-min'", ',', 'n', '=', 'False', ')', 'echo', '(', 'shakedown', '.', 'dcos_version', '(', ')', ')', 'else', ':', 'click', '.', 'secho', '(', '"error: no authentication credentials or token found."', ',', 'fg', '=', "'red'", ',', 'bold', '=', 'True', ')', 'sys', '.', 'exit', '(', '1', ')', 'class', 'shakedown', ':', '""" This encapsulates a PyTest wrapper plugin\n """', 'state', '=', '{', '}', 'stdout', '=', '[', ']', 'tests', '=', '{', "'file'", ':', '{', '}', ',', "'test'", ':', '{', '}', '}', 'report_stats', '=', '{', "'passed'", ':', '[', ']', ',', "'skipped'", ':', '[', ']', ',', "'failed'", ':', '[', ']', ',', "'total_passed'", ':', '0', ',', "'total_skipped'", ':', '0', ',', "'total_failed'", ':', '0', ',', '}', 'def', 'output', '(', 'title', ',', 'state', ',', 'text', ',', 'status', '=', 'True', ')', ':', '""" Capture and display stdout/stderr output\n\n :param title: the title of the output box (eg. test name)\n :type title: str\n :param state: state of the result (pass, fail)\n :type state: str\n :param text: the stdout/stderr output\n :type text: str\n :param status: whether to output a status marker\n :type status: bool\n """', 'if', 'state', '==', "'fail'", ':', 'schr', '=', 'fchr', '(', "'FF'", ')', 'elif', 'state', '==', "'pass'", ':', 'schr', '=', 'fchr', '(', "'PP'", ')', 'elif', 'state', '==', "'skip'", ':', 'schr', '=', 'fchr', '(', "'SK'", ')', 'else', ':', 'schr', '=', "''", 'if', 'status', ':', 'if', 'not', 'args', '[', "'stdout_inline'", ']', ':', 'if', 'state', '==', "'fail'", ':', 'echo', '(', 'schr', ',', 'd', '=', "'fail'", ')', 'elif', 'state', '==', "'pass'", ':', 'echo', '(', 'schr', ',', 'd', '=', "'pass'", ')', 'else', ':', 'if', 'not', 'text', ':', 'if', 'state', '==', "'fail'", ':', 'echo', '(', 'schr', ',', 'd', '=', "'fail'", ')', 'elif', 'state', '==', "'pass'", ':', 'if', "'::'", 'in', 'title', ':', 'echo', '(', 'title', '.', 'split', '(', "'::'", ')', '[', '-', '1', ']', ',', 'd', '=', "'item-min'", ',', 'n', '=', 'False', ')', 'echo', '(', 'schr', ',', 'd', '=', "'pass'", ')', 'if', 'text', 'and', 'args', '[', "'stdout'", ']', 'in', '[', 'state', ',', "'all'", ']', ':', 'o', '=', 'decorate', '(', 'schr', '+', "': '", ',', "'quote-head-'", '+', 'state', ')', 'o', '+=', 'click', '.', 'style', '(', 'decorate', '(', 'title', ',', 'style', '=', 'state', ')', ',', 'bold', '=', 'True', ')', '+', '"\\n"', 'o', '+=', 'decorate', '(', 'str', '(', 'text', ')', '.', 'strip', '(', ')', ',', 'style', '=', "'quote-'", '+', 'state', ')', 'if', 'args', '[', "'stdout_inline'", ']', ':', 'echo', '(', 'o', ')', 'else', ':', 'shakedown', '.', 'stdout', '.', 'append', '(', 'o', ')', 'def', 'pytest_collectreport', '(', 'self', ',', 'report', ')', ':', '""" Collect and validate individual test files\n """', 'if', 'not', "'collect'", 'in', 'shakedown', '.', 'state', ':', 'shakedown', '.', 'state', '[', "'collect'", ']', '=', '1', 'echo', '(', "'Collecting and validating test files...'", ',', 'd', '=', "'step-min'", ')', 'if', 'report', '.', 'nodeid', ':', 'echo', '(', 'report', '.', 'nodeid', ',', 'd', '=', "'item-maj'", ',', 'n', '=', 'False', ')', 'state', '=', 'None', 'if', 'report', '.', 'failed', ':', 'state', '=', "'fail'", 'if', 'report', '.', 'passed', ':', 'state', '=', "'pass'", 'if', 'report', '.', 'skipped', ':', 'state', '=', "'skip'", 'if', 'state', ':', 'if', 'report', '.', 'longrepr', ':', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', 'state', ',', 'report', '.', 'longrepr', ')', 'else', ':', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', 'state', ',', 'None', ')', 'def', 'pytest_sessionstart', '(', 'self', ')', ':', '""" Tests have been collected, begin running them...\n """', 'echo', '(', "'Initiating testing phase...'", ',', 'd', '=', "'step-maj'", ')', 'def', 'pytest_report_teststatus', '(', 'self', ',', 'report', ')', ':', '""" Print report results to the console as they are run\n """', 'try', ':', 'report_file', ',', 'report_test', '=', 'report', '.', 'nodeid', '.', 'split', '(', "'::'", ',', '1', ')', 'except', 'ValueError', ':', 'return', 'if', 'not', "'test'", 'in', 'shakedown', '.', 'state', ':', 'shakedown', '.', 'state', '[', "'test'", ']', '=', '1', 'echo', '(', "'Running individual tests...'", ',', 'd', '=', "'step-min'", ')', 'if', 'not', 'report_file', 'in', 'shakedown', '.', 'tests', '[', "'file'", ']', ':', 'shakedown', '.', 'tests', '[', "'file'", ']', '[', 'report_file', ']', '=', '1', 'echo', '(', 'report_file', ',', 'd', '=', "'item-maj'", ')', 'if', 'not', 'report', '.', 'nodeid', 'in', 'shakedown', '.', 'tests', '[', "'test'", ']', ':', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', '=', '{', '}', 'if', 'args', '[', "'stdout_inline'", ']', ':', 'echo', '(', "''", ')', 'echo', '(', 'report_test', '+', "':'", ',', 'd', '=', "'item-min'", ')', 'else', ':', 'echo', '(', 'report_test', ',', 'd', '=', "'item-min'", ',', 'n', '=', 'False', ')', 'if', 'report', '.', 'failed', ':', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', '[', "'fail'", ']', '=', 'True', 'if', 'report', '.', 'when', '==', "'teardown'", 'and', 'not', "'tested'", 'in', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', ':', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', "'pass'", ',', 'None', ')', '# Suppress excess terminal output', 'return', 'report', '.', 'outcome', ',', 'None', ',', 'None', 'def', 'pytest_runtest_logreport', '(', 'self', ',', 'report', ')', ':', '""" Log the [stdout, stderr] results of tests if desired\n """', 'state', '=', 'None', 'for', 'secname', ',', 'content', 'in', 'report', '.', 'sections', ':', 'if', 'report', '.', 'failed', ':', 'state', '=', "'fail'", 'if', 'report', '.', 'passed', ':', 'state', '=', "'pass'", 'if', 'report', '.', 'skipped', ':', 'state', '=', "'skip'", 'if', 'state', 'and', 'secname', '!=', "'Captured stdout call'", ':', 'module', '=', 'report', '.', 'nodeid', '.', 'split', '(', "'::'", ',', '1', ')', '[', '0', ']', 'cap_type', '=', 'secname', '.', 'split', '(', "' '", ')', '[', '-', '1', ']', 'if', 'not', "'setup'", 'in', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', ':', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', '[', "'setup'", ']', '=', 'True', 'shakedown', '.', 'output', '(', 'module', '+', "' '", '+', 'cap_type', ',', 'state', ',', 'content', ',', 'False', ')', 'elif', 'cap_type', '==', "'teardown'", ':', 'shakedown', '.', 'output', '(', 'module', '+', "' '", '+', 'cap_type', ',', 'state', ',', 'content', ',', 'False', ')', 'elif', 'state', 'and', 'report', '.', 'when', '==', "'call'", ':', 'if', "'tested'", 'in', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', ':', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', 'state', ',', 'content', ',', 'False', ')', 'else', ':', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', '[', "'tested'", ']', '=', 'True', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', 'state', ',', 'content', ')', '# Capture execution crashes', 'if', 'hasattr', '(', 'report', '.', 'longrepr', ',', "'reprcrash'", ')', ':', 'longreport', '=', 'report', '.', 'longrepr', 'if', "'tested'", 'in', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', ':', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', "'fail'", ',', "'error: '", '+', 'str', '(', 'longreport', '.', 'reprcrash', ')', ',', 'False', ')', 'else', ':', 'shakedown', '.', 'tests', '[', "'test'", ']', '[', 'report', '.', 'nodeid', ']', '[', "'tested'", ']', '=', 'True', 'shakedown', '.', 'output', '(', 'report', '.', 'nodeid', ',', "'fail'", ',', "'error: '", '+', 'str', '(', 'longreport', '.', 'reprcrash', ')', ')', 'def', 'pytest_sessionfinish', '(', 'self', ',', 'session', ',', 'exitstatus', ')', ':', '""" Testing phase is complete; print extra reports (stdout/stderr, JSON) as requested\n """', 'echo', '(', "'Test phase completed.'", ',', 'd', '=', "'step-maj'", ')', 'if', '(', "'stdout'", 'in', 'args', 'and', 'args', '[', "'stdout'", ']', ')', 'and', 'shakedown', '.', 'stdout', ':', 'for', 'output', 'in', 'shakedown', '.', 'stdout', ':', 'echo', '(', 'output', ')', 'opts', '=', '[', "'-q'", ',', "'--tb=no'", ',', '"--timeout={}"', '.', 'format', '(', 'args', '[', "'timeout'", ']', ')', ']', 'if', 'args', '[', "'fail'", ']', '==', "'fast'", ':', 'opts', '.', 'append', '(', "'-x'", ')', 'if', 'args', '[', "'pytest_option'", ']', ':', 'for', 'opt', 'in', 'args', '[', "'pytest_option'", ']', ':', 'opts', '.', 'append', '(', 'opt', ')', 'if', 'args', '[', "'stdout_inline'", ']', ':', 'opts', '.', 'append', '(', "'-s'", ')', 'if', 'args', '[', "'tests'", ']', ':', 'tests_to_run', '=', '[', ']', 'for', 'test', 'in', 'args', '[', "'tests'", ']', ':', 'tests_to_run', '.', 'extend', '(', 'test', '.', 'split', '(', ')', ')', 'for', 'test', 'in', 'tests_to_run', ':', 'opts', '.', 'append', '(', 'test', ')', 'exitstatus', '=', 'imported', '[', "'pytest'", ']', '.', 'main', '(', 'opts', ',', 'plugins', '=', '[', 'shakedown', '(', ')', ']', ')', 'sys', '.', 'exit', '(', 'exitstatus', ')']
Shakedown is a DC/OS test-harness wrapper for the pytest tool.
['Shakedown', 'is', 'a', 'DC', '/', 'OS', 'test', '-', 'harness', 'wrapper', 'for', 'the', 'pytest', 'tool', '.']
train
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/cli/main.py#L30-L355
2,241
saltstack/salt
salt/modules/rh_ip.py
_parse_network_settings
def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result
python
def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result
['def', '_parse_network_settings', '(', 'opts', ',', 'current', ')', ':', '# Normalize keys', 'opts', '=', 'dict', '(', '(', 'k', '.', 'lower', '(', ')', ',', 'v', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'six', '.', 'iteritems', '(', 'opts', ')', ')', 'current', '=', 'dict', '(', '(', 'k', '.', 'lower', '(', ')', ',', 'v', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'six', '.', 'iteritems', '(', 'current', ')', ')', '# Check for supported parameters', 'retain_settings', '=', 'opts', '.', 'get', '(', "'retain_settings'", ',', 'False', ')', 'result', '=', 'current', 'if', 'retain_settings', 'else', '{', '}', '# Default quote type is an empty string, which will not quote values', 'quote_type', '=', "''", 'valid', '=', '_CONFIG_TRUE', '+', '_CONFIG_FALSE', 'if', "'enabled'", 'not', 'in', 'opts', ':', 'try', ':', 'opts', '[', "'networking'", ']', '=', 'current', '[', "'networking'", ']', '# If networking option is quoted, use its quote type', 'quote_type', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'is_quoted', '(', 'opts', '[', "'networking'", ']', ')', '_log_default_network', '(', "'networking'", ',', 'current', '[', "'networking'", ']', ')', 'except', 'ValueError', ':', '_raise_error_network', '(', "'networking'", ',', 'valid', ')', 'else', ':', 'opts', '[', "'networking'", ']', '=', 'opts', '[', "'enabled'", ']', 'true_val', '=', "'{0}yes{0}'", '.', 'format', '(', 'quote_type', ')', 'false_val', '=', "'{0}no{0}'", '.', 'format', '(', 'quote_type', ')', 'networking', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'dequote', '(', 'opts', '[', "'networking'", ']', ')', 'if', 'networking', 'in', 'valid', ':', 'if', 'networking', 'in', '_CONFIG_TRUE', ':', 'result', '[', "'networking'", ']', '=', 'true_val', 'elif', 'networking', 'in', '_CONFIG_FALSE', ':', 'result', '[', "'networking'", ']', '=', 'false_val', 'else', ':', '_raise_error_network', '(', "'networking'", ',', 'valid', ')', 'if', "'hostname'", 'not', 'in', 'opts', ':', 'try', ':', 'opts', '[', "'hostname'", ']', '=', 'current', '[', "'hostname'", ']', '_log_default_network', '(', "'hostname'", ',', 'current', '[', "'hostname'", ']', ')', 'except', 'Exception', ':', '_raise_error_network', '(', "'hostname'", ',', '[', "'server1.example.com'", ']', ')', 'if', 'opts', '[', "'hostname'", ']', ':', 'result', '[', "'hostname'", ']', '=', "'{1}{0}{1}'", '.', 'format', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'dequote', '(', 'opts', '[', "'hostname'", ']', ')', ',', 'quote_type', ')', 'else', ':', '_raise_error_network', '(', "'hostname'", ',', '[', "'server1.example.com'", ']', ')', 'if', "'nozeroconf'", 'in', 'opts', ':', 'nozeroconf', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'dequote', '(', 'opts', '[', "'nozeroconf'", ']', ')', 'if', 'nozeroconf', 'in', 'valid', ':', 'if', 'nozeroconf', 'in', '_CONFIG_TRUE', ':', 'result', '[', "'nozeroconf'", ']', '=', 'true_val', 'elif', 'nozeroconf', 'in', '_CONFIG_FALSE', ':', 'result', '[', "'nozeroconf'", ']', '=', 'false_val', 'else', ':', '_raise_error_network', '(', "'nozeroconf'", ',', 'valid', ')', 'for', 'opt', 'in', 'opts', ':', 'if', 'opt', 'not', 'in', '[', "'networking'", ',', "'hostname'", ',', "'nozeroconf'", ']', ':', 'result', '[', 'opt', ']', '=', "'{1}{0}{1}'", '.', 'format', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'dequote', '(', 'opts', '[', 'opt', ']', ')', ',', 'quote_type', ')', 'return', 'result']
Filters given options and outputs valid settings for the global network settings file.
['Filters', 'given', 'options', 'and', 'outputs', 'valid', 'settings', 'for', 'the', 'global', 'network', 'settings', 'file', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L836-L903
2,242
saltstack/salt
salt/modules/glassfish.py
delete_connector_c_pool
def delete_connector_c_pool(name, target='server', cascade=True, server=None): ''' Delete a connection pool ''' data = {'target': target, 'cascade': cascade} return _delete_element(name, 'resources/connector-connection-pool', data, server)
python
def delete_connector_c_pool(name, target='server', cascade=True, server=None): ''' Delete a connection pool ''' data = {'target': target, 'cascade': cascade} return _delete_element(name, 'resources/connector-connection-pool', data, server)
['def', 'delete_connector_c_pool', '(', 'name', ',', 'target', '=', "'server'", ',', 'cascade', '=', 'True', ',', 'server', '=', 'None', ')', ':', 'data', '=', '{', "'target'", ':', 'target', ',', "'cascade'", ':', 'cascade', '}', 'return', '_delete_element', '(', 'name', ',', "'resources/connector-connection-pool'", ',', 'data', ',', 'server', ')']
Delete a connection pool
['Delete', 'a', 'connection', 'pool']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glassfish.py#L338-L343
2,243
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport._check_lookup_prop
def _check_lookup_prop(self, result_data): """Checks that selected lookup property can be used for this testcase.""" if not self._lookup_prop: return False if not result_data.get("id") and self._lookup_prop != "name": return False if not result_data.get("title") and self._lookup_prop == "name": return False return True
python
def _check_lookup_prop(self, result_data): """Checks that selected lookup property can be used for this testcase.""" if not self._lookup_prop: return False if not result_data.get("id") and self._lookup_prop != "name": return False if not result_data.get("title") and self._lookup_prop == "name": return False return True
['def', '_check_lookup_prop', '(', 'self', ',', 'result_data', ')', ':', 'if', 'not', 'self', '.', '_lookup_prop', ':', 'return', 'False', 'if', 'not', 'result_data', '.', 'get', '(', '"id"', ')', 'and', 'self', '.', '_lookup_prop', '!=', '"name"', ':', 'return', 'False', 'if', 'not', 'result_data', '.', 'get', '(', '"title"', ')', 'and', 'self', '.', '_lookup_prop', '==', '"name"', ':', 'return', 'False', 'return', 'True']
Checks that selected lookup property can be used for this testcase.
['Checks', 'that', 'selected', 'lookup', 'property', 'can', 'be', 'used', 'for', 'this', 'testcase', '.']
train
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L175-L184
2,244
saltstack/salt
salt/utils/verify.py
list_path_traversal
def list_path_traversal(path): ''' Returns a full list of directories leading up to, and including, a path. So list_path_traversal('/path/to/salt') would return: ['/', '/path', '/path/to', '/path/to/salt'] in that order. This routine has been tested on Windows systems as well. list_path_traversal('c:\\path\\to\\salt') on Windows would return: ['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt'] ''' out = [path] (head, tail) = os.path.split(path) if tail == '': # paths with trailing separators will return an empty string out = [head] (head, tail) = os.path.split(head) while head != out[0]: # loop until head is the same two consecutive times out.insert(0, head) (head, tail) = os.path.split(head) return out
python
def list_path_traversal(path): ''' Returns a full list of directories leading up to, and including, a path. So list_path_traversal('/path/to/salt') would return: ['/', '/path', '/path/to', '/path/to/salt'] in that order. This routine has been tested on Windows systems as well. list_path_traversal('c:\\path\\to\\salt') on Windows would return: ['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt'] ''' out = [path] (head, tail) = os.path.split(path) if tail == '': # paths with trailing separators will return an empty string out = [head] (head, tail) = os.path.split(head) while head != out[0]: # loop until head is the same two consecutive times out.insert(0, head) (head, tail) = os.path.split(head) return out
['def', 'list_path_traversal', '(', 'path', ')', ':', 'out', '=', '[', 'path', ']', '(', 'head', ',', 'tail', ')', '=', 'os', '.', 'path', '.', 'split', '(', 'path', ')', 'if', 'tail', '==', "''", ':', '# paths with trailing separators will return an empty string', 'out', '=', '[', 'head', ']', '(', 'head', ',', 'tail', ')', '=', 'os', '.', 'path', '.', 'split', '(', 'head', ')', 'while', 'head', '!=', 'out', '[', '0', ']', ':', '# loop until head is the same two consecutive times', 'out', '.', 'insert', '(', '0', ',', 'head', ')', '(', 'head', ',', 'tail', ')', '=', 'os', '.', 'path', '.', 'split', '(', 'head', ')', 'return', 'out']
Returns a full list of directories leading up to, and including, a path. So list_path_traversal('/path/to/salt') would return: ['/', '/path', '/path/to', '/path/to/salt'] in that order. This routine has been tested on Windows systems as well. list_path_traversal('c:\\path\\to\\salt') on Windows would return: ['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt']
['Returns', 'a', 'full', 'list', 'of', 'directories', 'leading', 'up', 'to', 'and', 'including', 'a', 'path', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/verify.py#L359-L381
2,245
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/__subprocess.py
check_call
def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise CalledProcessError(retcode, cmd) return retcode
python
def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise CalledProcessError(retcode, cmd) return retcode
['def', 'check_call', '(', '*', 'popenargs', ',', '*', '*', 'kwargs', ')', ':', 'retcode', '=', 'call', '(', '*', 'popenargs', ',', '*', '*', 'kwargs', ')', 'cmd', '=', 'kwargs', '.', 'get', '(', '"args"', ')', 'if', 'cmd', 'is', 'None', ':', 'cmd', '=', 'popenargs', '[', '0', ']', 'if', 'retcode', ':', 'raise', 'CalledProcessError', '(', 'retcode', ',', 'cmd', ')', 'return', 'retcode']
Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"])
['Run', 'command', 'with', 'arguments', '.', 'Wait', 'for', 'command', 'to', 'complete', '.', 'If', 'the', 'exit', 'code', 'was', 'zero', 'then', 'return', 'otherwise', 'raise', 'CalledProcessError', '.', 'The', 'CalledProcessError', 'object', 'will', 'have', 'the', 'return', 'code', 'in', 'the', 'returncode', 'attribute', '.']
train
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/__subprocess.py#L493-L509
2,246
wright-group/WrightTools
WrightTools/kit/_timestamp.py
TimeStamp.path
def path(self): """Timestamp for placing into filepaths.""" out = self.datetime.strftime("%Y-%m-%d") out += " " ssm = ( self.datetime - self.datetime.replace(hour=0, minute=0, second=0, microsecond=0) ).total_seconds() out += str(int(ssm)).zfill(5) return out
python
def path(self): """Timestamp for placing into filepaths.""" out = self.datetime.strftime("%Y-%m-%d") out += " " ssm = ( self.datetime - self.datetime.replace(hour=0, minute=0, second=0, microsecond=0) ).total_seconds() out += str(int(ssm)).zfill(5) return out
['def', 'path', '(', 'self', ')', ':', 'out', '=', 'self', '.', 'datetime', '.', 'strftime', '(', '"%Y-%m-%d"', ')', 'out', '+=', '" "', 'ssm', '=', '(', 'self', '.', 'datetime', '-', 'self', '.', 'datetime', '.', 'replace', '(', 'hour', '=', '0', ',', 'minute', '=', '0', ',', 'second', '=', '0', ',', 'microsecond', '=', '0', ')', ')', '.', 'total_seconds', '(', ')', 'out', '+=', 'str', '(', 'int', '(', 'ssm', ')', ')', '.', 'zfill', '(', '5', ')', 'return', 'out']
Timestamp for placing into filepaths.
['Timestamp', 'for', 'placing', 'into', 'filepaths', '.']
train
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/kit/_timestamp.py#L187-L195
2,247
goshuirc/irc
girc/client.py
ServerConnection.register_event
def register_event(self, direction, verb, child_fn, priority=10): """Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will not match `raw` events. If you wish to receive both `raw` and all other events, you need to register these separately. """ event_managers = [] if direction in ('in', 'both'): event_managers.append(self._events_in) if direction in ('out', 'both'): event_managers.append(self._events_out) if direction == 'girc': event_managers.append(self._girc_events) for event_manager in event_managers: event_manager.register(verb, child_fn, priority=priority)
python
def register_event(self, direction, verb, child_fn, priority=10): """Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will not match `raw` events. If you wish to receive both `raw` and all other events, you need to register these separately. """ event_managers = [] if direction in ('in', 'both'): event_managers.append(self._events_in) if direction in ('out', 'both'): event_managers.append(self._events_out) if direction == 'girc': event_managers.append(self._girc_events) for event_manager in event_managers: event_manager.register(verb, child_fn, priority=priority)
['def', 'register_event', '(', 'self', ',', 'direction', ',', 'verb', ',', 'child_fn', ',', 'priority', '=', '10', ')', ':', 'event_managers', '=', '[', ']', 'if', 'direction', 'in', '(', "'in'", ',', "'both'", ')', ':', 'event_managers', '.', 'append', '(', 'self', '.', '_events_in', ')', 'if', 'direction', 'in', '(', "'out'", ',', "'both'", ')', ':', 'event_managers', '.', 'append', '(', 'self', '.', '_events_out', ')', 'if', 'direction', '==', "'girc'", ':', 'event_managers', '.', 'append', '(', 'self', '.', '_girc_events', ')', 'for', 'event_manager', 'in', 'event_managers', ':', 'event_manager', '.', 'register', '(', 'verb', ',', 'child_fn', ',', 'priority', '=', 'priority', ')']
Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will not match `raw` events. If you wish to receive both `raw` and all other events, you need to register these separately.
['Register', 'an', 'event', 'with', 'all', 'servers', '.']
train
https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/client.py#L98-L119
2,248
briney/abutils
abutils/core/pair.py
Pair._refine_v
def _refine_v(seq, species): ''' Completes the 5' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species. ''' vgerm = germlines.get_germline(seq['v_gene']['full'], species) aln = global_alignment(seq['vdj_nt'], vgerm) prepend = '' for s, g in zip(aln.aligned_query, aln.aligned_target): if s != '-': break else: prepend += g seq['vdj_nt'] = prepend + seq['vdj_nt']
python
def _refine_v(seq, species): ''' Completes the 5' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species. ''' vgerm = germlines.get_germline(seq['v_gene']['full'], species) aln = global_alignment(seq['vdj_nt'], vgerm) prepend = '' for s, g in zip(aln.aligned_query, aln.aligned_target): if s != '-': break else: prepend += g seq['vdj_nt'] = prepend + seq['vdj_nt']
['def', '_refine_v', '(', 'seq', ',', 'species', ')', ':', 'vgerm', '=', 'germlines', '.', 'get_germline', '(', 'seq', '[', "'v_gene'", ']', '[', "'full'", ']', ',', 'species', ')', 'aln', '=', 'global_alignment', '(', 'seq', '[', "'vdj_nt'", ']', ',', 'vgerm', ')', 'prepend', '=', "''", 'for', 's', ',', 'g', 'in', 'zip', '(', 'aln', '.', 'aligned_query', ',', 'aln', '.', 'aligned_target', ')', ':', 'if', 's', '!=', "'-'", ':', 'break', 'else', ':', 'prepend', '+=', 'g', 'seq', '[', "'vdj_nt'", ']', '=', 'prepend', '+', 'seq', '[', "'vdj_nt'", ']']
Completes the 5' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species.
['Completes', 'the', '5', 'end', 'of', 'a', 'a', 'truncated', 'sequence', 'with', 'germline', 'nucleotides', '.', 'Input', 'is', 'a', 'MongoDB', 'dict', '(', 'seq', ')', 'and', 'the', 'species', '.']
train
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/core/pair.py#L262-L275
2,249
RudolfCardinal/pythonlib
cardinal_pythonlib/datetimefunc.py
format_datetime
def format_datetime(d: PotentialDatetimeType, fmt: str, default: str = None) -> Optional[str]: """ Format a datetime with a ``strftime`` format specification string, or return ``default`` if the input is ``None``. """ d = coerce_to_pendulum(d) if d is None: return default return d.strftime(fmt)
python
def format_datetime(d: PotentialDatetimeType, fmt: str, default: str = None) -> Optional[str]: """ Format a datetime with a ``strftime`` format specification string, or return ``default`` if the input is ``None``. """ d = coerce_to_pendulum(d) if d is None: return default return d.strftime(fmt)
['def', 'format_datetime', '(', 'd', ':', 'PotentialDatetimeType', ',', 'fmt', ':', 'str', ',', 'default', ':', 'str', '=', 'None', ')', '->', 'Optional', '[', 'str', ']', ':', 'd', '=', 'coerce_to_pendulum', '(', 'd', ')', 'if', 'd', 'is', 'None', ':', 'return', 'default', 'return', 'd', '.', 'strftime', '(', 'fmt', ')']
Format a datetime with a ``strftime`` format specification string, or return ``default`` if the input is ``None``.
['Format', 'a', 'datetime', 'with', 'a', 'strftime', 'format', 'specification', 'string', 'or', 'return', 'default', 'if', 'the', 'input', 'is', 'None', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L191-L201
2,250
ansible-community/ara
ara/views/file.py
index
def index(): """ This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes. """ if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = (models.File.query .filter(models.File.playbook_id.in_(override))) else: files = models.File.query.all() return render_template('file_index.html', files=files)
python
def index(): """ This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes. """ if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = (models.File.query .filter(models.File.playbook_id.in_(override))) else: files = models.File.query.all() return render_template('file_index.html', files=files)
['def', 'index', '(', ')', ':', 'if', 'current_app', '.', 'config', '[', "'ARA_PLAYBOOK_OVERRIDE'", ']', 'is', 'not', 'None', ':', 'override', '=', 'current_app', '.', 'config', '[', "'ARA_PLAYBOOK_OVERRIDE'", ']', 'files', '=', '(', 'models', '.', 'File', '.', 'query', '.', 'filter', '(', 'models', '.', 'File', '.', 'playbook_id', '.', 'in_', '(', 'override', ')', ')', ')', 'else', ':', 'files', '=', 'models', '.', 'File', '.', 'query', '.', 'all', '(', ')', 'return', 'render_template', '(', "'file_index.html'", ',', 'files', '=', 'files', ')']
This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes.
['This', 'is', 'not', 'served', 'anywhere', 'in', 'the', 'web', 'application', '.', 'It', 'is', 'used', 'explicitly', 'in', 'the', 'context', 'of', 'generating', 'static', 'files', 'since', 'flask', '-', 'frozen', 'requires', 'url_for', 's', 'to', 'crawl', 'content', '.', 'url_for', 's', 'are', 'not', 'used', 'with', 'file', '.', 'show_file', 'directly', 'and', 'are', 'instead', 'dynamically', 'generated', 'through', 'javascript', 'for', 'performance', 'purposes', '.']
train
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/views/file.py#L28-L43
2,251
rosenbrockc/fortpy
fortpy/isense/builtin.py
load
def load(parser, serializer): """Returns a dictionary of builtin functions for Fortran. Checks the cache first to see if we have a serialized version. If we don't, it loads it from the XML file. :arg parser: the DocParser instance for parsing the XML tags. :arg serializer: a Serializer instance from the CodeParser to cache the loaded XML file. """ fortdir = os.path.dirname(fortpy.__file__) xmlpath = os.path.join(fortdir, "isense", "builtin.xml") if not os.path.isfile(xmlpath): return {} changed_time = os.path.getmtime(xmlpath) cached = serializer.load_module("builtin.xml", changed_time) if cached is None: result = _load_builtin_xml(xmlpath, parser) serializer.save_module("builtin.xml", result, changed_time) else: result = cached return result
python
def load(parser, serializer): """Returns a dictionary of builtin functions for Fortran. Checks the cache first to see if we have a serialized version. If we don't, it loads it from the XML file. :arg parser: the DocParser instance for parsing the XML tags. :arg serializer: a Serializer instance from the CodeParser to cache the loaded XML file. """ fortdir = os.path.dirname(fortpy.__file__) xmlpath = os.path.join(fortdir, "isense", "builtin.xml") if not os.path.isfile(xmlpath): return {} changed_time = os.path.getmtime(xmlpath) cached = serializer.load_module("builtin.xml", changed_time) if cached is None: result = _load_builtin_xml(xmlpath, parser) serializer.save_module("builtin.xml", result, changed_time) else: result = cached return result
['def', 'load', '(', 'parser', ',', 'serializer', ')', ':', 'fortdir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'fortpy', '.', '__file__', ')', 'xmlpath', '=', 'os', '.', 'path', '.', 'join', '(', 'fortdir', ',', '"isense"', ',', '"builtin.xml"', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'xmlpath', ')', ':', 'return', '{', '}', 'changed_time', '=', 'os', '.', 'path', '.', 'getmtime', '(', 'xmlpath', ')', 'cached', '=', 'serializer', '.', 'load_module', '(', '"builtin.xml"', ',', 'changed_time', ')', 'if', 'cached', 'is', 'None', ':', 'result', '=', '_load_builtin_xml', '(', 'xmlpath', ',', 'parser', ')', 'serializer', '.', 'save_module', '(', '"builtin.xml"', ',', 'result', ',', 'changed_time', ')', 'else', ':', 'result', '=', 'cached', 'return', 'result']
Returns a dictionary of builtin functions for Fortran. Checks the cache first to see if we have a serialized version. If we don't, it loads it from the XML file. :arg parser: the DocParser instance for parsing the XML tags. :arg serializer: a Serializer instance from the CodeParser to cache the loaded XML file.
['Returns', 'a', 'dictionary', 'of', 'builtin', 'functions', 'for', 'Fortran', '.', 'Checks', 'the', 'cache', 'first', 'to', 'see', 'if', 'we', 'have', 'a', 'serialized', 'version', '.', 'If', 'we', 'don', 't', 'it', 'loads', 'it', 'from', 'the', 'XML', 'file', '.']
train
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/builtin.py#L11-L33
2,252
twilio/twilio-python
twilio/rest/messaging/v1/session/message.py
MessagePage.get_instance
def get_instance(self, payload): """ Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.session.message.MessageInstance :rtype: twilio.rest.messaging.v1.session.message.MessageInstance """ return MessageInstance(self._version, payload, session_sid=self._solution['session_sid'], )
python
def get_instance(self, payload): """ Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.session.message.MessageInstance :rtype: twilio.rest.messaging.v1.session.message.MessageInstance """ return MessageInstance(self._version, payload, session_sid=self._solution['session_sid'], )
['def', 'get_instance', '(', 'self', ',', 'payload', ')', ':', 'return', 'MessageInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'session_sid', '=', 'self', '.', '_solution', '[', "'session_sid'", ']', ',', ')']
Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.session.message.MessageInstance :rtype: twilio.rest.messaging.v1.session.message.MessageInstance
['Build', 'an', 'instance', 'of', 'MessageInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/session/message.py#L204-L213
2,253
opennode/waldur-core
waldur_core/core/models.py
ReversionMixin._is_version_duplicate
def _is_version_duplicate(self): """ Define should new version be created for object or no. Reasons to provide custom check instead of default `ignore_revision_duplicates`: - no need to compare all revisions - it is OK if right object version exists in any revision; - need to compare object attributes (not serialized data) to avoid version creation on wrong <float> vs <int> comparison; """ if self.id is None: return False try: latest_version = Version.objects.get_for_object(self).latest('revision__date_created') except Version.DoesNotExist: return False latest_version_object = latest_version._object_version.object fields = self.get_version_fields() return all([getattr(self, f) == getattr(latest_version_object, f) for f in fields])
python
def _is_version_duplicate(self): """ Define should new version be created for object or no. Reasons to provide custom check instead of default `ignore_revision_duplicates`: - no need to compare all revisions - it is OK if right object version exists in any revision; - need to compare object attributes (not serialized data) to avoid version creation on wrong <float> vs <int> comparison; """ if self.id is None: return False try: latest_version = Version.objects.get_for_object(self).latest('revision__date_created') except Version.DoesNotExist: return False latest_version_object = latest_version._object_version.object fields = self.get_version_fields() return all([getattr(self, f) == getattr(latest_version_object, f) for f in fields])
['def', '_is_version_duplicate', '(', 'self', ')', ':', 'if', 'self', '.', 'id', 'is', 'None', ':', 'return', 'False', 'try', ':', 'latest_version', '=', 'Version', '.', 'objects', '.', 'get_for_object', '(', 'self', ')', '.', 'latest', '(', "'revision__date_created'", ')', 'except', 'Version', '.', 'DoesNotExist', ':', 'return', 'False', 'latest_version_object', '=', 'latest_version', '.', '_object_version', '.', 'object', 'fields', '=', 'self', '.', 'get_version_fields', '(', ')', 'return', 'all', '(', '[', 'getattr', '(', 'self', ',', 'f', ')', '==', 'getattr', '(', 'latest_version_object', ',', 'f', ')', 'for', 'f', 'in', 'fields', ']', ')']
Define should new version be created for object or no. Reasons to provide custom check instead of default `ignore_revision_duplicates`: - no need to compare all revisions - it is OK if right object version exists in any revision; - need to compare object attributes (not serialized data) to avoid version creation on wrong <float> vs <int> comparison;
['Define', 'should', 'new', 'version', 'be', 'created', 'for', 'object', 'or', 'no', '.']
train
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/core/models.py#L385-L401
2,254
brmscheiner/ideogram
ideogram/importAnalysis.py
getModulePath
def getModulePath(project_path,module_name,verbose): '''Searches for module_name in searchpath and returns the filepath. If no filepath was found, returns None.''' if not module_name: return None sys.path.append(project_path) try: package = pkgutil.get_loader(module_name) except ImportError: if verbose: print("Parent module for "+module_name+" not found.") return None except: if verbose: print(module_name+" not loaded for bizarre reasons") try: if package: if package.get_code(module_name): filename = package.get_code(module_name).co_filename return filename elif package.find_spec(module_name).has_location==False: return None #built-in module such as itertools else: pass #perhaps filename is in package.find_spec(module_name).origin? pass #a good reference is https://www.python.org/dev/peps/pep-0302/ except ImportError: if verbose: print("Code object unavailable for "+module_name) return None except AttributeError: if verbose: print(module_name+" is an ExtensionFileLoader object") return None except: if verbose: print(module_name+" not loaded for bizarre reasons") return None else: if verbose: print ("Module "+module_name+" not found.") return None
python
def getModulePath(project_path,module_name,verbose): '''Searches for module_name in searchpath and returns the filepath. If no filepath was found, returns None.''' if not module_name: return None sys.path.append(project_path) try: package = pkgutil.get_loader(module_name) except ImportError: if verbose: print("Parent module for "+module_name+" not found.") return None except: if verbose: print(module_name+" not loaded for bizarre reasons") try: if package: if package.get_code(module_name): filename = package.get_code(module_name).co_filename return filename elif package.find_spec(module_name).has_location==False: return None #built-in module such as itertools else: pass #perhaps filename is in package.find_spec(module_name).origin? pass #a good reference is https://www.python.org/dev/peps/pep-0302/ except ImportError: if verbose: print("Code object unavailable for "+module_name) return None except AttributeError: if verbose: print(module_name+" is an ExtensionFileLoader object") return None except: if verbose: print(module_name+" not loaded for bizarre reasons") return None else: if verbose: print ("Module "+module_name+" not found.") return None
['def', 'getModulePath', '(', 'project_path', ',', 'module_name', ',', 'verbose', ')', ':', 'if', 'not', 'module_name', ':', 'return', 'None', 'sys', '.', 'path', '.', 'append', '(', 'project_path', ')', 'try', ':', 'package', '=', 'pkgutil', '.', 'get_loader', '(', 'module_name', ')', 'except', 'ImportError', ':', 'if', 'verbose', ':', 'print', '(', '"Parent module for "', '+', 'module_name', '+', '" not found."', ')', 'return', 'None', 'except', ':', 'if', 'verbose', ':', 'print', '(', 'module_name', '+', '" not loaded for bizarre reasons"', ')', 'try', ':', 'if', 'package', ':', 'if', 'package', '.', 'get_code', '(', 'module_name', ')', ':', 'filename', '=', 'package', '.', 'get_code', '(', 'module_name', ')', '.', 'co_filename', 'return', 'filename', 'elif', 'package', '.', 'find_spec', '(', 'module_name', ')', '.', 'has_location', '==', 'False', ':', 'return', 'None', '#built-in module such as itertools', 'else', ':', 'pass', '#perhaps filename is in package.find_spec(module_name).origin?', 'pass', '#a good reference is https://www.python.org/dev/peps/pep-0302/', 'except', 'ImportError', ':', 'if', 'verbose', ':', 'print', '(', '"Code object unavailable for "', '+', 'module_name', ')', 'return', 'None', 'except', 'AttributeError', ':', 'if', 'verbose', ':', 'print', '(', 'module_name', '+', '" is an ExtensionFileLoader object"', ')', 'return', 'None', 'except', ':', 'if', 'verbose', ':', 'print', '(', 'module_name', '+', '" not loaded for bizarre reasons"', ')', 'return', 'None', 'else', ':', 'if', 'verbose', ':', 'print', '(', '"Module "', '+', 'module_name', '+', '" not found."', ')', 'return', 'None']
Searches for module_name in searchpath and returns the filepath. If no filepath was found, returns None.
['Searches', 'for', 'module_name', 'in', 'searchpath', 'and', 'returns', 'the', 'filepath', '.', 'If', 'no', 'filepath', 'was', 'found', 'returns', 'None', '.']
train
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/importAnalysis.py#L4-L44
2,255
google/mobly
mobly/controllers/android_device_lib/snippet_client.py
SnippetClient._read_protocol_line
def _read_protocol_line(self): """Reads the next line of instrumentation output relevant to snippets. This method will skip over lines that don't start with 'SNIPPET' or 'INSTRUMENTATION_RESULT'. Returns: (str) Next line of snippet-related instrumentation output, stripped. Raises: jsonrpc_client_base.AppStartError: If EOF is reached without any protocol lines being read. """ while True: line = self._proc.stdout.readline().decode('utf-8') if not line: raise jsonrpc_client_base.AppStartError( self._ad, 'Unexpected EOF waiting for app to start') # readline() uses an empty string to mark EOF, and a single newline # to mark regular empty lines in the output. Don't move the strip() # call above the truthiness check, or this method will start # considering any blank output line to be EOF. line = line.strip() if (line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET ')): self.log.debug( 'Accepted line from instrumentation output: "%s"', line) return line self.log.debug('Discarded line from instrumentation output: "%s"', line)
python
def _read_protocol_line(self): """Reads the next line of instrumentation output relevant to snippets. This method will skip over lines that don't start with 'SNIPPET' or 'INSTRUMENTATION_RESULT'. Returns: (str) Next line of snippet-related instrumentation output, stripped. Raises: jsonrpc_client_base.AppStartError: If EOF is reached without any protocol lines being read. """ while True: line = self._proc.stdout.readline().decode('utf-8') if not line: raise jsonrpc_client_base.AppStartError( self._ad, 'Unexpected EOF waiting for app to start') # readline() uses an empty string to mark EOF, and a single newline # to mark regular empty lines in the output. Don't move the strip() # call above the truthiness check, or this method will start # considering any blank output line to be EOF. line = line.strip() if (line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET ')): self.log.debug( 'Accepted line from instrumentation output: "%s"', line) return line self.log.debug('Discarded line from instrumentation output: "%s"', line)
['def', '_read_protocol_line', '(', 'self', ')', ':', 'while', 'True', ':', 'line', '=', 'self', '.', '_proc', '.', 'stdout', '.', 'readline', '(', ')', '.', 'decode', '(', "'utf-8'", ')', 'if', 'not', 'line', ':', 'raise', 'jsonrpc_client_base', '.', 'AppStartError', '(', 'self', '.', '_ad', ',', "'Unexpected EOF waiting for app to start'", ')', '# readline() uses an empty string to mark EOF, and a single newline', "# to mark regular empty lines in the output. Don't move the strip()", '# call above the truthiness check, or this method will start', '# considering any blank output line to be EOF.', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', '(', 'line', '.', 'startswith', '(', "'INSTRUMENTATION_RESULT:'", ')', 'or', 'line', '.', 'startswith', '(', "'SNIPPET '", ')', ')', ':', 'self', '.', 'log', '.', 'debug', '(', '\'Accepted line from instrumentation output: "%s"\'', ',', 'line', ')', 'return', 'line', 'self', '.', 'log', '.', 'debug', '(', '\'Discarded line from instrumentation output: "%s"\'', ',', 'line', ')']
Reads the next line of instrumentation output relevant to snippets. This method will skip over lines that don't start with 'SNIPPET' or 'INSTRUMENTATION_RESULT'. Returns: (str) Next line of snippet-related instrumentation output, stripped. Raises: jsonrpc_client_base.AppStartError: If EOF is reached without any protocol lines being read.
['Reads', 'the', 'next', 'line', 'of', 'instrumentation', 'output', 'relevant', 'to', 'snippets', '.']
train
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device_lib/snippet_client.py#L294-L323
2,256
lablup/backend.ai-client-py
src/ai/backend/client/auth.py
generate_signature
def generate_signature(method, version, endpoint, date, rel_url, content_type, content, access_key, secret_key, hash_type): ''' Generates the API request signature from the given parameters. ''' hash_type = hash_type hostname = endpoint._val.netloc # FIXME: migrate to public API if version >= 'v4.20181215': content = b'' else: if content_type.startswith('multipart/'): content = b'' body_hash = hashlib.new(hash_type, content).hexdigest() sign_str = '{}\n{}\n{}\nhost:{}\ncontent-type:{}\nx-backendai-version:{}\n{}'.format( # noqa method.upper(), rel_url, date.isoformat(), hostname, content_type.lower(), version, body_hash ) sign_bytes = sign_str.encode() sign_key = hmac.new(secret_key.encode(), date.strftime('%Y%m%d').encode(), hash_type).digest() sign_key = hmac.new(sign_key, hostname.encode(), hash_type).digest() signature = hmac.new(sign_key, sign_bytes, hash_type).hexdigest() headers = { 'Authorization': 'BackendAI signMethod=HMAC-{}, credential={}:{}'.format( hash_type.upper(), access_key, signature ), } return headers, signature
python
def generate_signature(method, version, endpoint, date, rel_url, content_type, content, access_key, secret_key, hash_type): ''' Generates the API request signature from the given parameters. ''' hash_type = hash_type hostname = endpoint._val.netloc # FIXME: migrate to public API if version >= 'v4.20181215': content = b'' else: if content_type.startswith('multipart/'): content = b'' body_hash = hashlib.new(hash_type, content).hexdigest() sign_str = '{}\n{}\n{}\nhost:{}\ncontent-type:{}\nx-backendai-version:{}\n{}'.format( # noqa method.upper(), rel_url, date.isoformat(), hostname, content_type.lower(), version, body_hash ) sign_bytes = sign_str.encode() sign_key = hmac.new(secret_key.encode(), date.strftime('%Y%m%d').encode(), hash_type).digest() sign_key = hmac.new(sign_key, hostname.encode(), hash_type).digest() signature = hmac.new(sign_key, sign_bytes, hash_type).hexdigest() headers = { 'Authorization': 'BackendAI signMethod=HMAC-{}, credential={}:{}'.format( hash_type.upper(), access_key, signature ), } return headers, signature
['def', 'generate_signature', '(', 'method', ',', 'version', ',', 'endpoint', ',', 'date', ',', 'rel_url', ',', 'content_type', ',', 'content', ',', 'access_key', ',', 'secret_key', ',', 'hash_type', ')', ':', 'hash_type', '=', 'hash_type', 'hostname', '=', 'endpoint', '.', '_val', '.', 'netloc', '# FIXME: migrate to public API', 'if', 'version', '>=', "'v4.20181215'", ':', 'content', '=', "b''", 'else', ':', 'if', 'content_type', '.', 'startswith', '(', "'multipart/'", ')', ':', 'content', '=', "b''", 'body_hash', '=', 'hashlib', '.', 'new', '(', 'hash_type', ',', 'content', ')', '.', 'hexdigest', '(', ')', 'sign_str', '=', "'{}\\n{}\\n{}\\nhost:{}\\ncontent-type:{}\\nx-backendai-version:{}\\n{}'", '.', 'format', '(', '# noqa', 'method', '.', 'upper', '(', ')', ',', 'rel_url', ',', 'date', '.', 'isoformat', '(', ')', ',', 'hostname', ',', 'content_type', '.', 'lower', '(', ')', ',', 'version', ',', 'body_hash', ')', 'sign_bytes', '=', 'sign_str', '.', 'encode', '(', ')', 'sign_key', '=', 'hmac', '.', 'new', '(', 'secret_key', '.', 'encode', '(', ')', ',', 'date', '.', 'strftime', '(', "'%Y%m%d'", ')', '.', 'encode', '(', ')', ',', 'hash_type', ')', '.', 'digest', '(', ')', 'sign_key', '=', 'hmac', '.', 'new', '(', 'sign_key', ',', 'hostname', '.', 'encode', '(', ')', ',', 'hash_type', ')', '.', 'digest', '(', ')', 'signature', '=', 'hmac', '.', 'new', '(', 'sign_key', ',', 'sign_bytes', ',', 'hash_type', ')', '.', 'hexdigest', '(', ')', 'headers', '=', '{', "'Authorization'", ':', "'BackendAI signMethod=HMAC-{}, credential={}:{}'", '.', 'format', '(', 'hash_type', '.', 'upper', '(', ')', ',', 'access_key', ',', 'signature', ')', ',', '}', 'return', 'headers', ',', 'signature']
Generates the API request signature from the given parameters.
['Generates', 'the', 'API', 'request', 'signature', 'from', 'the', 'given', 'parameters', '.']
train
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/auth.py#L5-L43
2,257
Alignak-monitoring/alignak
alignak/util.py
list_to_serialized
def list_to_serialized(ref, the_list): """Serialize the list of elements Used for the retention store :param ref: Not used :type ref: :param the_list: dictionary to convert :type the_list: dict :return: dict of serialized :rtype: dict """ result = [] for elt in the_list: if not getattr(elt, 'serialize', None): continue result.append(elt.serialize()) return result
python
def list_to_serialized(ref, the_list): """Serialize the list of elements Used for the retention store :param ref: Not used :type ref: :param the_list: dictionary to convert :type the_list: dict :return: dict of serialized :rtype: dict """ result = [] for elt in the_list: if not getattr(elt, 'serialize', None): continue result.append(elt.serialize()) return result
['def', 'list_to_serialized', '(', 'ref', ',', 'the_list', ')', ':', 'result', '=', '[', ']', 'for', 'elt', 'in', 'the_list', ':', 'if', 'not', 'getattr', '(', 'elt', ',', "'serialize'", ',', 'None', ')', ':', 'continue', 'result', '.', 'append', '(', 'elt', '.', 'serialize', '(', ')', ')', 'return', 'result']
Serialize the list of elements Used for the retention store :param ref: Not used :type ref: :param the_list: dictionary to convert :type the_list: dict :return: dict of serialized :rtype: dict
['Serialize', 'the', 'list', 'of', 'elements']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/util.py#L607-L624
2,258
gem/oq-engine
openquake/hazardlib/gsim/travasarou_2003.py
TravasarouEtAl2003._compute_distance
def _compute_distance(self, dists, C): """ Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.) """ return C["c4"] * np.log(np.sqrt(dists.rrup ** 2. + C["h"] ** 2.))
python
def _compute_distance(self, dists, C): """ Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.) """ return C["c4"] * np.log(np.sqrt(dists.rrup ** 2. + C["h"] ** 2.))
['def', '_compute_distance', '(', 'self', ',', 'dists', ',', 'C', ')', ':', 'return', 'C', '[', '"c4"', ']', '*', 'np', '.', 'log', '(', 'np', '.', 'sqrt', '(', 'dists', '.', 'rrup', '**', '2.', '+', 'C', '[', '"h"', ']', '**', '2.', ')', ')']
Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.)
['Compute', 'the', 'second', 'term', 'of', 'the', 'equation', 'described', 'on', 'p', '.', '1144', ':']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/travasarou_2003.py#L156-L162
2,259
bwohlberg/sporco
sporco/prox/_lp.py
proj_l2
def proj_l2(v, gamma, axis=None): r"""Compute the projection operator of the :math:`\ell_2` norm. The projection operator of the uncentered :math:`\ell_2` norm, .. math:: \mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \; \text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma can be computed as :math:`\mathbf{s} + \mathrm{proj}_{f,\gamma} (\mathbf{v} - \mathbf{s})` where :math:`f(\mathbf{x}) = \| \mathbf{x} \|_2`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int or tuple of ints, optional (default None) Axes of `v` over which to compute the :math:`\ell_2` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. Returns ------- x : ndarray Output array """ d = np.sqrt(np.sum(v**2, axis=axis, keepdims=True)) return np.asarray((d <= gamma) * v + (d > gamma) * (gamma * sl.zdivide(v, d)), dtype=v.dtype)
python
def proj_l2(v, gamma, axis=None): r"""Compute the projection operator of the :math:`\ell_2` norm. The projection operator of the uncentered :math:`\ell_2` norm, .. math:: \mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \; \text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma can be computed as :math:`\mathbf{s} + \mathrm{proj}_{f,\gamma} (\mathbf{v} - \mathbf{s})` where :math:`f(\mathbf{x}) = \| \mathbf{x} \|_2`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int or tuple of ints, optional (default None) Axes of `v` over which to compute the :math:`\ell_2` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. Returns ------- x : ndarray Output array """ d = np.sqrt(np.sum(v**2, axis=axis, keepdims=True)) return np.asarray((d <= gamma) * v + (d > gamma) * (gamma * sl.zdivide(v, d)), dtype=v.dtype)
['def', 'proj_l2', '(', 'v', ',', 'gamma', ',', 'axis', '=', 'None', ')', ':', 'd', '=', 'np', '.', 'sqrt', '(', 'np', '.', 'sum', '(', 'v', '**', '2', ',', 'axis', '=', 'axis', ',', 'keepdims', '=', 'True', ')', ')', 'return', 'np', '.', 'asarray', '(', '(', 'd', '<=', 'gamma', ')', '*', 'v', '+', '(', 'd', '>', 'gamma', ')', '*', '(', 'gamma', '*', 'sl', '.', 'zdivide', '(', 'v', ',', 'd', ')', ')', ',', 'dtype', '=', 'v', '.', 'dtype', ')']
r"""Compute the projection operator of the :math:`\ell_2` norm. The projection operator of the uncentered :math:`\ell_2` norm, .. math:: \mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \; \text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma can be computed as :math:`\mathbf{s} + \mathrm{proj}_{f,\gamma} (\mathbf{v} - \mathbf{s})` where :math:`f(\mathbf{x}) = \| \mathbf{x} \|_2`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int or tuple of ints, optional (default None) Axes of `v` over which to compute the :math:`\ell_2` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. Returns ------- x : ndarray Output array
['r', 'Compute', 'the', 'projection', 'operator', 'of', 'the', ':', 'math', ':', '\\', 'ell_2', 'norm', '.']
train
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/prox/_lp.py#L284-L319
2,260
gwastro/pycbc
pycbc/tmpltbank/coord_utils.py
find_max_and_min_frequencies
def find_max_and_min_frequencies(name, mass_range_params, freqs): """ ADD DOCS """ cutoff_fns = pnutils.named_frequency_cutoffs if name not in cutoff_fns.keys(): err_msg = "%s not recognized as a valid cutoff frequency choice." %name err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys()) raise ValueError(err_msg) # Can I do this quickly? total_mass_approxs = { "SchwarzISCO": pnutils.f_SchwarzISCO, "LightRing" : pnutils.f_LightRing, "ERD" : pnutils.f_ERD } if name in total_mass_approxs.keys(): # This can be done quickly if the cutoff only depends on total mass # Assumes that lower total mass = higher cutoff frequency upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass) lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass) else: # Do this numerically # FIXME: Is 1000000 the right choice? I think so, but just highlighting mass1, mass2, spin1z, spin2z = \ get_random_mass(1000000, mass_range_params) mass_dict = {} mass_dict['mass1'] = mass1 mass_dict['mass2'] = mass2 mass_dict['spin1z'] = spin1z mass_dict['spin2z'] = spin2z tmp_freqs = cutoff_fns[name](mass_dict) upper_f_cutoff = tmp_freqs.max() lower_f_cutoff = tmp_freqs.min() cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff]) if lower_f_cutoff < freqs.min(): warn_msg = "WARNING: " warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,) warn_msg += "which is lower than the lowest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.min()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the lowest available metric frequency." logging.warn(warn_msg) if upper_f_cutoff > freqs.max(): warn_msg = "WARNING: " warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,) warn_msg += "which is larger than the highest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.max()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the largest available metric frequency." logging.warn(warn_msg) return find_closest_calculated_frequencies(cutoffs, freqs)
python
def find_max_and_min_frequencies(name, mass_range_params, freqs): """ ADD DOCS """ cutoff_fns = pnutils.named_frequency_cutoffs if name not in cutoff_fns.keys(): err_msg = "%s not recognized as a valid cutoff frequency choice." %name err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys()) raise ValueError(err_msg) # Can I do this quickly? total_mass_approxs = { "SchwarzISCO": pnutils.f_SchwarzISCO, "LightRing" : pnutils.f_LightRing, "ERD" : pnutils.f_ERD } if name in total_mass_approxs.keys(): # This can be done quickly if the cutoff only depends on total mass # Assumes that lower total mass = higher cutoff frequency upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass) lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass) else: # Do this numerically # FIXME: Is 1000000 the right choice? I think so, but just highlighting mass1, mass2, spin1z, spin2z = \ get_random_mass(1000000, mass_range_params) mass_dict = {} mass_dict['mass1'] = mass1 mass_dict['mass2'] = mass2 mass_dict['spin1z'] = spin1z mass_dict['spin2z'] = spin2z tmp_freqs = cutoff_fns[name](mass_dict) upper_f_cutoff = tmp_freqs.max() lower_f_cutoff = tmp_freqs.min() cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff]) if lower_f_cutoff < freqs.min(): warn_msg = "WARNING: " warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,) warn_msg += "which is lower than the lowest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.min()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the lowest available metric frequency." logging.warn(warn_msg) if upper_f_cutoff > freqs.max(): warn_msg = "WARNING: " warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,) warn_msg += "which is larger than the highest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.max()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the largest available metric frequency." logging.warn(warn_msg) return find_closest_calculated_frequencies(cutoffs, freqs)
['def', 'find_max_and_min_frequencies', '(', 'name', ',', 'mass_range_params', ',', 'freqs', ')', ':', 'cutoff_fns', '=', 'pnutils', '.', 'named_frequency_cutoffs', 'if', 'name', 'not', 'in', 'cutoff_fns', '.', 'keys', '(', ')', ':', 'err_msg', '=', '"%s not recognized as a valid cutoff frequency choice."', '%', 'name', 'err_msg', '+=', '"Recognized choices: "', '+', '" "', '.', 'join', '(', 'cutoff_fns', '.', 'keys', '(', ')', ')', 'raise', 'ValueError', '(', 'err_msg', ')', '# Can I do this quickly?', 'total_mass_approxs', '=', '{', '"SchwarzISCO"', ':', 'pnutils', '.', 'f_SchwarzISCO', ',', '"LightRing"', ':', 'pnutils', '.', 'f_LightRing', ',', '"ERD"', ':', 'pnutils', '.', 'f_ERD', '}', 'if', 'name', 'in', 'total_mass_approxs', '.', 'keys', '(', ')', ':', '# This can be done quickly if the cutoff only depends on total mass', '# Assumes that lower total mass = higher cutoff frequency', 'upper_f_cutoff', '=', 'total_mass_approxs', '[', 'name', ']', '(', 'mass_range_params', '.', 'minTotMass', ')', 'lower_f_cutoff', '=', 'total_mass_approxs', '[', 'name', ']', '(', 'mass_range_params', '.', 'maxTotMass', ')', 'else', ':', '# Do this numerically', '# FIXME: Is 1000000 the right choice? I think so, but just highlighting', 'mass1', ',', 'mass2', ',', 'spin1z', ',', 'spin2z', '=', 'get_random_mass', '(', '1000000', ',', 'mass_range_params', ')', 'mass_dict', '=', '{', '}', 'mass_dict', '[', "'mass1'", ']', '=', 'mass1', 'mass_dict', '[', "'mass2'", ']', '=', 'mass2', 'mass_dict', '[', "'spin1z'", ']', '=', 'spin1z', 'mass_dict', '[', "'spin2z'", ']', '=', 'spin2z', 'tmp_freqs', '=', 'cutoff_fns', '[', 'name', ']', '(', 'mass_dict', ')', 'upper_f_cutoff', '=', 'tmp_freqs', '.', 'max', '(', ')', 'lower_f_cutoff', '=', 'tmp_freqs', '.', 'min', '(', ')', 'cutoffs', '=', 'numpy', '.', 'array', '(', '[', 'lower_f_cutoff', ',', 'upper_f_cutoff', ']', ')', 'if', 'lower_f_cutoff', '<', 'freqs', '.', 'min', '(', ')', ':', 'warn_msg', '=', '"WARNING: "', 'warn_msg', '+=', '"Lowest frequency cutoff is %s Hz "', '%', '(', 'lower_f_cutoff', ',', ')', 'warn_msg', '+=', '"which is lower than the lowest frequency calculated "', 'warn_msg', '+=', '"for the metric: %s Hz. "', '%', '(', 'freqs', '.', 'min', '(', ')', ')', 'warn_msg', '+=', '"Distances for these waveforms will be calculated at "', 'warn_msg', '+=', '"the lowest available metric frequency."', 'logging', '.', 'warn', '(', 'warn_msg', ')', 'if', 'upper_f_cutoff', '>', 'freqs', '.', 'max', '(', ')', ':', 'warn_msg', '=', '"WARNING: "', 'warn_msg', '+=', '"Highest frequency cutoff is %s Hz "', '%', '(', 'upper_f_cutoff', ',', ')', 'warn_msg', '+=', '"which is larger than the highest frequency calculated "', 'warn_msg', '+=', '"for the metric: %s Hz. "', '%', '(', 'freqs', '.', 'max', '(', ')', ')', 'warn_msg', '+=', '"Distances for these waveforms will be calculated at "', 'warn_msg', '+=', '"the largest available metric frequency."', 'logging', '.', 'warn', '(', 'warn_msg', ')', 'return', 'find_closest_calculated_frequencies', '(', 'cutoffs', ',', 'freqs', ')']
ADD DOCS
['ADD', 'DOCS']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/coord_utils.py#L633-L687
2,261
delph-in/pydelphin
delphin/mrs/query.py
select_eps
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None): """ Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications """ epmatch = lambda n: ((nodeid is None or n.nodeid == nodeid) and (iv is None or n.iv == iv) and (label is None or n.label == label) and (pred is None or n.pred == pred)) return list(filter(epmatch, xmrs.eps()))
python
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None): """ Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications """ epmatch = lambda n: ((nodeid is None or n.nodeid == nodeid) and (iv is None or n.iv == iv) and (label is None or n.label == label) and (pred is None or n.pred == pred)) return list(filter(epmatch, xmrs.eps()))
['def', 'select_eps', '(', 'xmrs', ',', 'nodeid', '=', 'None', ',', 'iv', '=', 'None', ',', 'label', '=', 'None', ',', 'pred', '=', 'None', ')', ':', 'epmatch', '=', 'lambda', 'n', ':', '(', '(', 'nodeid', 'is', 'None', 'or', 'n', '.', 'nodeid', '==', 'nodeid', ')', 'and', '(', 'iv', 'is', 'None', 'or', 'n', '.', 'iv', '==', 'iv', ')', 'and', '(', 'label', 'is', 'None', 'or', 'n', '.', 'label', '==', 'label', ')', 'and', '(', 'pred', 'is', 'None', 'or', 'n', '.', 'pred', '==', 'pred', ')', ')', 'return', 'list', '(', 'filter', '(', 'epmatch', ',', 'xmrs', '.', 'eps', '(', ')', ')', ')']
Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications
['Return', 'the', 'list', 'of', 'matching', 'elementary', 'predications', 'in', '*', 'xmrs', '*', '.']
train
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/query.py#L62-L87
2,262
saltstack/salt
salt/modules/rpm_lowpkg.py
file_list
def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret}
python
def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret}
['def', 'file_list', '(', '*', 'packages', ',', '*', '*', 'kwargs', ')', ':', 'cmd', '=', '[', "'rpm'", ']', 'if', 'kwargs', '.', 'get', '(', "'root'", ')', ':', 'cmd', '.', 'extend', '(', '[', "'--root'", ',', 'kwargs', '[', "'root'", ']', ']', ')', 'cmd', '.', 'append', '(', "'-ql'", 'if', 'packages', 'else', "'-qla'", ')', 'if', 'packages', ':', "# Can't concatenate a tuple, must do a list.extend()", 'cmd', '.', 'extend', '(', 'packages', ')', 'ret', '=', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ',', 'output_loglevel', '=', "'trace'", ',', 'python_shell', '=', 'False', ')', '.', 'splitlines', '(', ')', 'return', '{', "'errors'", ':', '[', ']', ',', "'files'", ':', 'ret', '}']
List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list
['List', 'the', 'files', 'that', 'belong', 'to', 'a', 'package', '.', 'Not', 'specifying', 'any', 'packages', 'will', 'return', 'a', 'list', 'of', '_every_', 'file', 'on', 'the', 'system', 's', 'rpm', 'database', '(', 'not', 'generally', 'recommended', ')', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L343-L373
2,263
cloudera/cm_api
python/src/cm_shell/cmps.py
ClouderaShell.do_stop_role
def do_stop_role(self, role): """ Stop a role Usage: > stop_role <role> Stops this role """ if not role: return None if not self.has_cluster(): return None if '-' not in role: print("Please enter a valid role name") return None try: service = api.get_cluster(self.cluster).get_service(role.split('-')[0]) service.stop_roles(role) print("Stopping Role") except ApiException: print("Error: Role or Service Not Found")
python
def do_stop_role(self, role): """ Stop a role Usage: > stop_role <role> Stops this role """ if not role: return None if not self.has_cluster(): return None if '-' not in role: print("Please enter a valid role name") return None try: service = api.get_cluster(self.cluster).get_service(role.split('-')[0]) service.stop_roles(role) print("Stopping Role") except ApiException: print("Error: Role or Service Not Found")
['def', 'do_stop_role', '(', 'self', ',', 'role', ')', ':', 'if', 'not', 'role', ':', 'return', 'None', 'if', 'not', 'self', '.', 'has_cluster', '(', ')', ':', 'return', 'None', 'if', "'-'", 'not', 'in', 'role', ':', 'print', '(', '"Please enter a valid role name"', ')', 'return', 'None', 'try', ':', 'service', '=', 'api', '.', 'get_cluster', '(', 'self', '.', 'cluster', ')', '.', 'get_service', '(', 'role', '.', 'split', '(', "'-'", ')', '[', '0', ']', ')', 'service', '.', 'stop_roles', '(', 'role', ')', 'print', '(', '"Stopping Role"', ')', 'except', 'ApiException', ':', 'print', '(', '"Error: Role or Service Not Found"', ')']
Stop a role Usage: > stop_role <role> Stops this role
['Stop', 'a', 'role', 'Usage', ':', '>', 'stop_role', '<role', '>', 'Stops', 'this', 'role']
train
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_shell/cmps.py#L498-L519
2,264
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
tx2genefile
def tx2genefile(gtf, out_file=None, data=None, tsv=True, keep_version=False): """ write out a file of transcript->gene mappings. """ if tsv: extension = ".tsv" sep = "\t" else: extension = ".csv" sep = "," if file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in tx2genedict(gtf, keep_version).items(): out_handle.write(sep.join([k, v]) + "\n") logger.info("tx2gene file %s created from %s." % (out_file, gtf)) return out_file
python
def tx2genefile(gtf, out_file=None, data=None, tsv=True, keep_version=False): """ write out a file of transcript->gene mappings. """ if tsv: extension = ".tsv" sep = "\t" else: extension = ".csv" sep = "," if file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in tx2genedict(gtf, keep_version).items(): out_handle.write(sep.join([k, v]) + "\n") logger.info("tx2gene file %s created from %s." % (out_file, gtf)) return out_file
['def', 'tx2genefile', '(', 'gtf', ',', 'out_file', '=', 'None', ',', 'data', '=', 'None', ',', 'tsv', '=', 'True', ',', 'keep_version', '=', 'False', ')', ':', 'if', 'tsv', ':', 'extension', '=', '".tsv"', 'sep', '=', '"\\t"', 'else', ':', 'extension', '=', '".csv"', 'sep', '=', '","', 'if', 'file_exists', '(', 'out_file', ')', ':', 'return', 'out_file', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'with', 'open', '(', 'tx_out_file', ',', '"w"', ')', 'as', 'out_handle', ':', 'for', 'k', ',', 'v', 'in', 'tx2genedict', '(', 'gtf', ',', 'keep_version', ')', '.', 'items', '(', ')', ':', 'out_handle', '.', 'write', '(', 'sep', '.', 'join', '(', '[', 'k', ',', 'v', ']', ')', '+', '"\\n"', ')', 'logger', '.', 'info', '(', '"tx2gene file %s created from %s."', '%', '(', 'out_file', ',', 'gtf', ')', ')', 'return', 'out_file']
write out a file of transcript->gene mappings.
['write', 'out', 'a', 'file', 'of', 'transcript', '-', '>', 'gene', 'mappings', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L330-L347
2,265
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
OpenFileCache.close
def close(self, filehandle): """Close openend file if no longer used.""" with self.lock: if filehandle in self.files: self.files[filehandle] -= 1 # trim the file cache index = 0 size = len(self.past) while size > self.size and index < size: filehandle = self.past[index] if self.files[filehandle] == 0: filehandle.close() del self.files[filehandle] del self.past[index] size -= 1 else: index += 1
python
def close(self, filehandle): """Close openend file if no longer used.""" with self.lock: if filehandle in self.files: self.files[filehandle] -= 1 # trim the file cache index = 0 size = len(self.past) while size > self.size and index < size: filehandle = self.past[index] if self.files[filehandle] == 0: filehandle.close() del self.files[filehandle] del self.past[index] size -= 1 else: index += 1
['def', 'close', '(', 'self', ',', 'filehandle', ')', ':', 'with', 'self', '.', 'lock', ':', 'if', 'filehandle', 'in', 'self', '.', 'files', ':', 'self', '.', 'files', '[', 'filehandle', ']', '-=', '1', '# trim the file cache', 'index', '=', '0', 'size', '=', 'len', '(', 'self', '.', 'past', ')', 'while', 'size', '>', 'self', '.', 'size', 'and', 'index', '<', 'size', ':', 'filehandle', '=', 'self', '.', 'past', '[', 'index', ']', 'if', 'self', '.', 'files', '[', 'filehandle', ']', '==', '0', ':', 'filehandle', '.', 'close', '(', ')', 'del', 'self', '.', 'files', '[', 'filehandle', ']', 'del', 'self', '.', 'past', '[', 'index', ']', 'size', '-=', '1', 'else', ':', 'index', '+=', '1']
Close openend file if no longer used.
['Close', 'openend', 'file', 'if', 'no', 'longer', 'used', '.']
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5759-L5775
2,266
AnalogJ/lexicon
lexicon/providers/powerdns.py
Provider.zone_data
def zone_data(self): """Get zone data""" if self._zone_data is None: self._zone_data = self._get('/zones/' + self.domain).json() return self._zone_data
python
def zone_data(self): """Get zone data""" if self._zone_data is None: self._zone_data = self._get('/zones/' + self.domain).json() return self._zone_data
['def', 'zone_data', '(', 'self', ')', ':', 'if', 'self', '.', '_zone_data', 'is', 'None', ':', 'self', '.', '_zone_data', '=', 'self', '.', '_get', '(', "'/zones/'", '+', 'self', '.', 'domain', ')', '.', 'json', '(', ')', 'return', 'self', '.', '_zone_data']
Get zone data
['Get', 'zone', 'data']
train
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/powerdns.py#L86-L90
2,267
maxfischer2781/include
include/mount/__init__.py
MountLoader.load_module
def load_module(self, name): """Load and return a module""" if name in sys.modules: return sys.modules[name] # load the actual import hook module module_name = self.mount2name(name) __import__(module_name) # alias the import hook module to the mount, so both can be used interchangeably module = sys.modules[name] = sys.modules[module_name] module.install() return module
python
def load_module(self, name): """Load and return a module""" if name in sys.modules: return sys.modules[name] # load the actual import hook module module_name = self.mount2name(name) __import__(module_name) # alias the import hook module to the mount, so both can be used interchangeably module = sys.modules[name] = sys.modules[module_name] module.install() return module
['def', 'load_module', '(', 'self', ',', 'name', ')', ':', 'if', 'name', 'in', 'sys', '.', 'modules', ':', 'return', 'sys', '.', 'modules', '[', 'name', ']', '# load the actual import hook module', 'module_name', '=', 'self', '.', 'mount2name', '(', 'name', ')', '__import__', '(', 'module_name', ')', '# alias the import hook module to the mount, so both can be used interchangeably', 'module', '=', 'sys', '.', 'modules', '[', 'name', ']', '=', 'sys', '.', 'modules', '[', 'module_name', ']', 'module', '.', 'install', '(', ')', 'return', 'module']
Load and return a module
['Load', 'and', 'return', 'a', 'module']
train
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L9-L19
2,268
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLFrontend._get_approved_attributes
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state): """ Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes """ name_format = idp_policy.get_name_form(sp_entity_id) attrconvs = idp.config.attribute_converters idp_policy.acs = attrconvs attribute_filter = [] for aconv in attrconvs: if aconv.name_format == name_format: all_attributes = {v: None for v in aconv._fro.values()} attribute_filter = list(idp_policy.restrict(all_attributes, sp_entity_id, idp.metadata).keys()) break attribute_filter = self.converter.to_internal_filter(self.attribute_profile, attribute_filter) satosa_logging(logger, logging.DEBUG, "Filter: %s" % attribute_filter, state) return attribute_filter
python
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state): """ Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes """ name_format = idp_policy.get_name_form(sp_entity_id) attrconvs = idp.config.attribute_converters idp_policy.acs = attrconvs attribute_filter = [] for aconv in attrconvs: if aconv.name_format == name_format: all_attributes = {v: None for v in aconv._fro.values()} attribute_filter = list(idp_policy.restrict(all_attributes, sp_entity_id, idp.metadata).keys()) break attribute_filter = self.converter.to_internal_filter(self.attribute_profile, attribute_filter) satosa_logging(logger, logging.DEBUG, "Filter: %s" % attribute_filter, state) return attribute_filter
['def', '_get_approved_attributes', '(', 'self', ',', 'idp', ',', 'idp_policy', ',', 'sp_entity_id', ',', 'state', ')', ':', 'name_format', '=', 'idp_policy', '.', 'get_name_form', '(', 'sp_entity_id', ')', 'attrconvs', '=', 'idp', '.', 'config', '.', 'attribute_converters', 'idp_policy', '.', 'acs', '=', 'attrconvs', 'attribute_filter', '=', '[', ']', 'for', 'aconv', 'in', 'attrconvs', ':', 'if', 'aconv', '.', 'name_format', '==', 'name_format', ':', 'all_attributes', '=', '{', 'v', ':', 'None', 'for', 'v', 'in', 'aconv', '.', '_fro', '.', 'values', '(', ')', '}', 'attribute_filter', '=', 'list', '(', 'idp_policy', '.', 'restrict', '(', 'all_attributes', ',', 'sp_entity_id', ',', 'idp', '.', 'metadata', ')', '.', 'keys', '(', ')', ')', 'break', 'attribute_filter', '=', 'self', '.', 'converter', '.', 'to_internal_filter', '(', 'self', '.', 'attribute_profile', ',', 'attribute_filter', ')', 'satosa_logging', '(', 'logger', ',', 'logging', '.', 'DEBUG', ',', '"Filter: %s"', '%', 'attribute_filter', ',', 'state', ')', 'return', 'attribute_filter']
Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes
['Returns', 'a', 'list', 'of', 'approved', 'attributes']
train
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L247-L274
2,269
seung-lab/cloud-volume
cloudvolume/skeletonservice.py
PrecomputedSkeleton.from_path
def from_path(kls, vertices): """ Given an Nx3 array of vertices that constitute a single path, generate a skeleton with appropriate edges. """ if vertices.shape[0] == 0: return PrecomputedSkeleton() skel = PrecomputedSkeleton(vertices) edges = np.zeros(shape=(skel.vertices.shape[0] - 1, 2), dtype=np.uint32) edges[:,0] = np.arange(skel.vertices.shape[0] - 1) edges[:,1] = np.arange(1, skel.vertices.shape[0]) skel.edges = edges return skel
python
def from_path(kls, vertices): """ Given an Nx3 array of vertices that constitute a single path, generate a skeleton with appropriate edges. """ if vertices.shape[0] == 0: return PrecomputedSkeleton() skel = PrecomputedSkeleton(vertices) edges = np.zeros(shape=(skel.vertices.shape[0] - 1, 2), dtype=np.uint32) edges[:,0] = np.arange(skel.vertices.shape[0] - 1) edges[:,1] = np.arange(1, skel.vertices.shape[0]) skel.edges = edges return skel
['def', 'from_path', '(', 'kls', ',', 'vertices', ')', ':', 'if', 'vertices', '.', 'shape', '[', '0', ']', '==', '0', ':', 'return', 'PrecomputedSkeleton', '(', ')', 'skel', '=', 'PrecomputedSkeleton', '(', 'vertices', ')', 'edges', '=', 'np', '.', 'zeros', '(', 'shape', '=', '(', 'skel', '.', 'vertices', '.', 'shape', '[', '0', ']', '-', '1', ',', '2', ')', ',', 'dtype', '=', 'np', '.', 'uint32', ')', 'edges', '[', ':', ',', '0', ']', '=', 'np', '.', 'arange', '(', 'skel', '.', 'vertices', '.', 'shape', '[', '0', ']', '-', '1', ')', 'edges', '[', ':', ',', '1', ']', '=', 'np', '.', 'arange', '(', '1', ',', 'skel', '.', 'vertices', '.', 'shape', '[', '0', ']', ')', 'skel', '.', 'edges', '=', 'edges', 'return', 'skel']
Given an Nx3 array of vertices that constitute a single path, generate a skeleton with appropriate edges.
['Given', 'an', 'Nx3', 'array', 'of', 'vertices', 'that', 'constitute', 'a', 'single', 'path', 'generate', 'a', 'skeleton', 'with', 'appropriate', 'edges', '.']
train
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/skeletonservice.py#L63-L76
2,270
SmartTeleMax/iktomi
iktomi/unstable/db/sqla/replication.py
replicate
def replicate(source, model, cache=None): '''Replicates the `source` object to `model` class and returns its reflection.''' target = replicate_no_merge(source, model, cache=cache) if target is not None: db = object_session(source) target = db.merge(target) return target
python
def replicate(source, model, cache=None): '''Replicates the `source` object to `model` class and returns its reflection.''' target = replicate_no_merge(source, model, cache=cache) if target is not None: db = object_session(source) target = db.merge(target) return target
['def', 'replicate', '(', 'source', ',', 'model', ',', 'cache', '=', 'None', ')', ':', 'target', '=', 'replicate_no_merge', '(', 'source', ',', 'model', ',', 'cache', '=', 'cache', ')', 'if', 'target', 'is', 'not', 'None', ':', 'db', '=', 'object_session', '(', 'source', ')', 'target', '=', 'db', '.', 'merge', '(', 'target', ')', 'return', 'target']
Replicates the `source` object to `model` class and returns its reflection.
['Replicates', 'the', 'source', 'object', 'to', 'model', 'class', 'and', 'returns', 'its', 'reflection', '.']
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/unstable/db/sqla/replication.py#L207-L214
2,271
tanghaibao/goatools
goatools/grouper/sorter_nts.py
SorterNts._get_sorted_section
def _get_sorted_section(self, nts_section): """Sort GO IDs in each section, if requested by user.""" #pylint: disable=unnecessary-lambda if self.section_sortby is True: return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt)) if self.section_sortby is False or self.section_sortby is None: return nts_section # print('SORT GO IDS IN A SECTION') return sorted(nts_section, key=lambda nt: self.section_sortby(nt))
python
def _get_sorted_section(self, nts_section): """Sort GO IDs in each section, if requested by user.""" #pylint: disable=unnecessary-lambda if self.section_sortby is True: return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt)) if self.section_sortby is False or self.section_sortby is None: return nts_section # print('SORT GO IDS IN A SECTION') return sorted(nts_section, key=lambda nt: self.section_sortby(nt))
['def', '_get_sorted_section', '(', 'self', ',', 'nts_section', ')', ':', '#pylint: disable=unnecessary-lambda', 'if', 'self', '.', 'section_sortby', 'is', 'True', ':', 'return', 'sorted', '(', 'nts_section', ',', 'key', '=', 'lambda', 'nt', ':', 'self', '.', 'sortgos', '.', 'usrgo_sortby', '(', 'nt', ')', ')', 'if', 'self', '.', 'section_sortby', 'is', 'False', 'or', 'self', '.', 'section_sortby', 'is', 'None', ':', 'return', 'nts_section', "# print('SORT GO IDS IN A SECTION')", 'return', 'sorted', '(', 'nts_section', ',', 'key', '=', 'lambda', 'nt', ':', 'self', '.', 'section_sortby', '(', 'nt', ')', ')']
Sort GO IDs in each section, if requested by user.
['Sort', 'GO', 'IDs', 'in', 'each', 'section', 'if', 'requested', 'by', 'user', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/sorter_nts.py#L88-L96
2,272
Alignak-monitoring/alignak
alignak/misc/carboniface.py
CarbonIface.add_data
def add_data(self, metric, value, ts=None): """ Add data to queue :param metric: the metric name :type metric: str :param value: the value of data :type value: int :param ts: the timestamp :type ts: int | None :return: True if added successfully, otherwise False :rtype: bool """ if not ts: ts = time.time() if self.__data_lock.acquire(): self.__data.append((metric, (ts, value))) self.__data_lock.release() return True return False
python
def add_data(self, metric, value, ts=None): """ Add data to queue :param metric: the metric name :type metric: str :param value: the value of data :type value: int :param ts: the timestamp :type ts: int | None :return: True if added successfully, otherwise False :rtype: bool """ if not ts: ts = time.time() if self.__data_lock.acquire(): self.__data.append((metric, (ts, value))) self.__data_lock.release() return True return False
['def', 'add_data', '(', 'self', ',', 'metric', ',', 'value', ',', 'ts', '=', 'None', ')', ':', 'if', 'not', 'ts', ':', 'ts', '=', 'time', '.', 'time', '(', ')', 'if', 'self', '.', '__data_lock', '.', 'acquire', '(', ')', ':', 'self', '.', '__data', '.', 'append', '(', '(', 'metric', ',', '(', 'ts', ',', 'value', ')', ')', ')', 'self', '.', '__data_lock', '.', 'release', '(', ')', 'return', 'True', 'return', 'False']
Add data to queue :param metric: the metric name :type metric: str :param value: the value of data :type value: int :param ts: the timestamp :type ts: int | None :return: True if added successfully, otherwise False :rtype: bool
['Add', 'data', 'to', 'queue']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/misc/carboniface.py#L40-L59
2,273
RI-imaging/ODTbrain
examples/example_helper.py
load_zip_data
def load_zip_data(zipname, f_sino_real, f_sino_imag, f_angles=None, f_phantom=None, f_info=None): """Load example sinogram data from a .zip file""" ret = [] with zipfile.ZipFile(str(zipname)) as arc: sino_real = np.loadtxt(arc.open(f_sino_real)) sino_imag = np.loadtxt(arc.open(f_sino_imag)) sino = sino_real + 1j * sino_imag ret.append(sino) if f_angles: angles = np.loadtxt(arc.open(f_angles)) ret.append(angles) if f_phantom: phantom = np.loadtxt(arc.open(f_phantom)) ret.append(phantom) if f_info: with arc.open(f_info) as info: cfg = {} for li in info.readlines(): li = li.decode() if li.count("=") == 1: key, val = li.split("=") cfg[key.strip()] = float(val.strip()) ret.append(cfg) return ret
python
def load_zip_data(zipname, f_sino_real, f_sino_imag, f_angles=None, f_phantom=None, f_info=None): """Load example sinogram data from a .zip file""" ret = [] with zipfile.ZipFile(str(zipname)) as arc: sino_real = np.loadtxt(arc.open(f_sino_real)) sino_imag = np.loadtxt(arc.open(f_sino_imag)) sino = sino_real + 1j * sino_imag ret.append(sino) if f_angles: angles = np.loadtxt(arc.open(f_angles)) ret.append(angles) if f_phantom: phantom = np.loadtxt(arc.open(f_phantom)) ret.append(phantom) if f_info: with arc.open(f_info) as info: cfg = {} for li in info.readlines(): li = li.decode() if li.count("=") == 1: key, val = li.split("=") cfg[key.strip()] = float(val.strip()) ret.append(cfg) return ret
['def', 'load_zip_data', '(', 'zipname', ',', 'f_sino_real', ',', 'f_sino_imag', ',', 'f_angles', '=', 'None', ',', 'f_phantom', '=', 'None', ',', 'f_info', '=', 'None', ')', ':', 'ret', '=', '[', ']', 'with', 'zipfile', '.', 'ZipFile', '(', 'str', '(', 'zipname', ')', ')', 'as', 'arc', ':', 'sino_real', '=', 'np', '.', 'loadtxt', '(', 'arc', '.', 'open', '(', 'f_sino_real', ')', ')', 'sino_imag', '=', 'np', '.', 'loadtxt', '(', 'arc', '.', 'open', '(', 'f_sino_imag', ')', ')', 'sino', '=', 'sino_real', '+', '1j', '*', 'sino_imag', 'ret', '.', 'append', '(', 'sino', ')', 'if', 'f_angles', ':', 'angles', '=', 'np', '.', 'loadtxt', '(', 'arc', '.', 'open', '(', 'f_angles', ')', ')', 'ret', '.', 'append', '(', 'angles', ')', 'if', 'f_phantom', ':', 'phantom', '=', 'np', '.', 'loadtxt', '(', 'arc', '.', 'open', '(', 'f_phantom', ')', ')', 'ret', '.', 'append', '(', 'phantom', ')', 'if', 'f_info', ':', 'with', 'arc', '.', 'open', '(', 'f_info', ')', 'as', 'info', ':', 'cfg', '=', '{', '}', 'for', 'li', 'in', 'info', '.', 'readlines', '(', ')', ':', 'li', '=', 'li', '.', 'decode', '(', ')', 'if', 'li', '.', 'count', '(', '"="', ')', '==', '1', ':', 'key', ',', 'val', '=', 'li', '.', 'split', '(', '"="', ')', 'cfg', '[', 'key', '.', 'strip', '(', ')', ']', '=', 'float', '(', 'val', '.', 'strip', '(', ')', ')', 'ret', '.', 'append', '(', 'cfg', ')', 'return', 'ret']
Load example sinogram data from a .zip file
['Load', 'example', 'sinogram', 'data', 'from', 'a', '.', 'zip', 'file']
train
https://github.com/RI-imaging/ODTbrain/blob/abbab8b790f10c0c7aea8d858d7d60f2fdd7161e/examples/example_helper.py#L119-L143
2,274
aliyun/aliyun-odps-python-sdk
odps/df/expr/groupby.py
value_counts
def value_counts(expr, sort=True, ascending=False, dropna=False): """ Return object containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occuring element. Exclude NA values by default :param expr: sequence :param sort: if sort :type sort: bool :param dropna: Don’t include counts of None, default False :return: collection with two columns :rtype: :class:`odps.df.expr.expressions.CollectionExpr` """ names = [expr.name, 'count'] typos = [expr.dtype, types.int64] return ValueCounts(_input=expr, _schema=Schema.from_lists(names, typos), _sort=sort, _ascending=ascending, _dropna=dropna)
python
def value_counts(expr, sort=True, ascending=False, dropna=False): """ Return object containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occuring element. Exclude NA values by default :param expr: sequence :param sort: if sort :type sort: bool :param dropna: Don’t include counts of None, default False :return: collection with two columns :rtype: :class:`odps.df.expr.expressions.CollectionExpr` """ names = [expr.name, 'count'] typos = [expr.dtype, types.int64] return ValueCounts(_input=expr, _schema=Schema.from_lists(names, typos), _sort=sort, _ascending=ascending, _dropna=dropna)
['def', 'value_counts', '(', 'expr', ',', 'sort', '=', 'True', ',', 'ascending', '=', 'False', ',', 'dropna', '=', 'False', ')', ':', 'names', '=', '[', 'expr', '.', 'name', ',', "'count'", ']', 'typos', '=', '[', 'expr', '.', 'dtype', ',', 'types', '.', 'int64', ']', 'return', 'ValueCounts', '(', '_input', '=', 'expr', ',', '_schema', '=', 'Schema', '.', 'from_lists', '(', 'names', ',', 'typos', ')', ',', '_sort', '=', 'sort', ',', '_ascending', '=', 'ascending', ',', '_dropna', '=', 'dropna', ')']
Return object containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occuring element. Exclude NA values by default :param expr: sequence :param sort: if sort :type sort: bool :param dropna: Don’t include counts of None, default False :return: collection with two columns :rtype: :class:`odps.df.expr.expressions.CollectionExpr`
['Return', 'object', 'containing', 'counts', 'of', 'unique', 'values', '.']
train
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/groupby.py#L535-L553
2,275
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
trainHMM_fromFile
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, class_names
python
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, class_names
['def', 'trainHMM_fromFile', '(', 'wav_file', ',', 'gt_file', ',', 'hmm_model_name', ',', 'mt_win', ',', 'mt_step', ')', ':', '[', 'seg_start', ',', 'seg_end', ',', 'seg_labs', ']', '=', 'readSegmentGT', '(', 'gt_file', ')', 'flags', ',', 'class_names', '=', 'segs2flags', '(', 'seg_start', ',', 'seg_end', ',', 'seg_labs', ',', 'mt_step', ')', '[', 'fs', ',', 'x', ']', '=', 'audioBasicIO', '.', 'readAudioFile', '(', 'wav_file', ')', '[', 'F', ',', '_', ',', '_', ']', '=', 'aF', '.', 'mtFeatureExtraction', '(', 'x', ',', 'fs', ',', 'mt_win', '*', 'fs', ',', 'mt_step', '*', 'fs', ',', 'round', '(', 'fs', '*', '0.050', ')', ',', 'round', '(', 'fs', '*', '0.050', ')', ')', 'start_prob', ',', 'transmat', ',', 'means', ',', 'cov', '=', 'trainHMM_computeStatistics', '(', 'F', ',', 'flags', ')', 'hmm', '=', 'hmmlearn', '.', 'hmm', '.', 'GaussianHMM', '(', 'start_prob', '.', 'shape', '[', '0', ']', ',', '"diag"', ')', 'hmm', '.', 'startprob_', '=', 'start_prob', 'hmm', '.', 'transmat_', '=', 'transmat', 'hmm', '.', 'means_', '=', 'means', 'hmm', '.', 'covars_', '=', 'cov', 'fo', '=', 'open', '(', 'hmm_model_name', ',', '"wb"', ')', 'cPickle', '.', 'dump', '(', 'hmm', ',', 'fo', ',', 'protocol', '=', 'cPickle', '.', 'HIGHEST_PROTOCOL', ')', 'cPickle', '.', 'dump', '(', 'class_names', ',', 'fo', ',', 'protocol', '=', 'cPickle', '.', 'HIGHEST_PROTOCOL', ')', 'cPickle', '.', 'dump', '(', 'mt_win', ',', 'fo', ',', 'protocol', '=', 'cPickle', '.', 'HIGHEST_PROTOCOL', ')', 'cPickle', '.', 'dump', '(', 'mt_step', ',', 'fo', ',', 'protocol', '=', 'cPickle', '.', 'HIGHEST_PROTOCOL', ')', 'fo', '.', 'close', '(', ')', 'return', 'hmm', ',', 'class_names']
This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file
['This', 'function', 'trains', 'a', 'HMM', 'model', 'for', 'segmentation', '-', 'classification', 'using', 'a', 'single', 'annotated', 'audio', 'file', 'ARGUMENTS', ':', '-', 'wav_file', ':', 'the', 'path', 'of', 'the', 'audio', 'filename', '-', 'gt_file', ':', 'the', 'path', 'of', 'the', 'ground', 'truth', 'filename', '(', 'a', 'csv', 'file', 'of', 'the', 'form', '<segment', 'start', 'in', 'seconds', '>', '<segment', 'end', 'in', 'seconds', '>', '<segment', 'label', '>', 'in', 'each', 'row', '-', 'hmm_model_name', ':', 'the', 'name', 'of', 'the', 'HMM', 'model', 'to', 'be', 'stored', '-', 'mt_win', ':', 'mid', '-', 'term', 'window', 'size', '-', 'mt_step', ':', 'mid', '-', 'term', 'window', 'step', 'RETURNS', ':', '-', 'hmm', ':', 'an', 'object', 'to', 'the', 'resulting', 'HMM', '-', 'class_names', ':', 'a', 'list', 'of', 'class_names']
train
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L333-L370
2,276
cuihantao/andes
andes/variables/dae.py
DAE.reset_small
def reset_small(self, eq): """Reset numbers smaller than 1e-12 in f and g equations""" assert eq in ('f', 'g') for idx, var in enumerate(self.__dict__[eq]): if abs(var) <= 1e-12: self.__dict__[eq][idx] = 0
python
def reset_small(self, eq): """Reset numbers smaller than 1e-12 in f and g equations""" assert eq in ('f', 'g') for idx, var in enumerate(self.__dict__[eq]): if abs(var) <= 1e-12: self.__dict__[eq][idx] = 0
['def', 'reset_small', '(', 'self', ',', 'eq', ')', ':', 'assert', 'eq', 'in', '(', "'f'", ',', "'g'", ')', 'for', 'idx', ',', 'var', 'in', 'enumerate', '(', 'self', '.', '__dict__', '[', 'eq', ']', ')', ':', 'if', 'abs', '(', 'var', ')', '<=', '1e-12', ':', 'self', '.', '__dict__', '[', 'eq', ']', '[', 'idx', ']', '=', '0']
Reset numbers smaller than 1e-12 in f and g equations
['Reset', 'numbers', 'smaller', 'than', '1e', '-', '12', 'in', 'f', 'and', 'g', 'equations']
train
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/dae.py#L700-L705
2,277
the01/python-floscraper
floscraper/webscraper.py
WebScraper._get_tag_match
def _get_tag_match(self, ele, tree): """ Match tag :param ele: :type ele: :param tree: :type tree: None, list :return: :rtype: None | list """ if tree in [None, []]: return [ele] res = [] t = tree[0] branch = tree[1:] attributes = {} for attr in t: if isinstance(t[attr], dict): if t[attr].get("type", None) == "reg": t[attr] = re.compile(t[attr]['reg']) attributes.update(t) if "name" in attributes: del attributes['name'] if "text" in attributes: del attributes['text'] if "recursive" in attributes: del attributes['recursive'] if "[]" in attributes: del attributes['[]'] possibles = ele.find_all( t.get('name', None), text=t.get('text', None), attrs=attributes, recursive=t.get('recursive', True) ) if not possibles: return None else: pass if "[]" in t: try: possibles = eval("possibles[{}]".format(t["[]"])) except: # no possibles return None if not isinstance(possibles, list): possibles = [possibles] for a in possibles: match = self._get_tag_match(a, branch) if match: res.extend(match) if not res: return None else: return res
python
def _get_tag_match(self, ele, tree): """ Match tag :param ele: :type ele: :param tree: :type tree: None, list :return: :rtype: None | list """ if tree in [None, []]: return [ele] res = [] t = tree[0] branch = tree[1:] attributes = {} for attr in t: if isinstance(t[attr], dict): if t[attr].get("type", None) == "reg": t[attr] = re.compile(t[attr]['reg']) attributes.update(t) if "name" in attributes: del attributes['name'] if "text" in attributes: del attributes['text'] if "recursive" in attributes: del attributes['recursive'] if "[]" in attributes: del attributes['[]'] possibles = ele.find_all( t.get('name', None), text=t.get('text', None), attrs=attributes, recursive=t.get('recursive', True) ) if not possibles: return None else: pass if "[]" in t: try: possibles = eval("possibles[{}]".format(t["[]"])) except: # no possibles return None if not isinstance(possibles, list): possibles = [possibles] for a in possibles: match = self._get_tag_match(a, branch) if match: res.extend(match) if not res: return None else: return res
['def', '_get_tag_match', '(', 'self', ',', 'ele', ',', 'tree', ')', ':', 'if', 'tree', 'in', '[', 'None', ',', '[', ']', ']', ':', 'return', '[', 'ele', ']', 'res', '=', '[', ']', 't', '=', 'tree', '[', '0', ']', 'branch', '=', 'tree', '[', '1', ':', ']', 'attributes', '=', '{', '}', 'for', 'attr', 'in', 't', ':', 'if', 'isinstance', '(', 't', '[', 'attr', ']', ',', 'dict', ')', ':', 'if', 't', '[', 'attr', ']', '.', 'get', '(', '"type"', ',', 'None', ')', '==', '"reg"', ':', 't', '[', 'attr', ']', '=', 're', '.', 'compile', '(', 't', '[', 'attr', ']', '[', "'reg'", ']', ')', 'attributes', '.', 'update', '(', 't', ')', 'if', '"name"', 'in', 'attributes', ':', 'del', 'attributes', '[', "'name'", ']', 'if', '"text"', 'in', 'attributes', ':', 'del', 'attributes', '[', "'text'", ']', 'if', '"recursive"', 'in', 'attributes', ':', 'del', 'attributes', '[', "'recursive'", ']', 'if', '"[]"', 'in', 'attributes', ':', 'del', 'attributes', '[', "'[]'", ']', 'possibles', '=', 'ele', '.', 'find_all', '(', 't', '.', 'get', '(', "'name'", ',', 'None', ')', ',', 'text', '=', 't', '.', 'get', '(', "'text'", ',', 'None', ')', ',', 'attrs', '=', 'attributes', ',', 'recursive', '=', 't', '.', 'get', '(', "'recursive'", ',', 'True', ')', ')', 'if', 'not', 'possibles', ':', 'return', 'None', 'else', ':', 'pass', 'if', '"[]"', 'in', 't', ':', 'try', ':', 'possibles', '=', 'eval', '(', '"possibles[{}]"', '.', 'format', '(', 't', '[', '"[]"', ']', ')', ')', 'except', ':', '# no possibles', 'return', 'None', 'if', 'not', 'isinstance', '(', 'possibles', ',', 'list', ')', ':', 'possibles', '=', '[', 'possibles', ']', 'for', 'a', 'in', 'possibles', ':', 'match', '=', 'self', '.', '_get_tag_match', '(', 'a', ',', 'branch', ')', 'if', 'match', ':', 'res', '.', 'extend', '(', 'match', ')', 'if', 'not', 'res', ':', 'return', 'None', 'else', ':', 'return', 'res']
Match tag :param ele: :type ele: :param tree: :type tree: None, list :return: :rtype: None | list
['Match', 'tag']
train
https://github.com/the01/python-floscraper/blob/d578cd3d6381070d9a07dade1e10387ae33e9a65/floscraper/webscraper.py#L388-L454
2,278
incf-nidash/nidmresults
nidmresults/objects/generic.py
NeuroimagingSoftware.export
def export(self, nidm_version, export_dir): """ Create prov entities and activities. """ if nidm_version['major'] < 1 or \ (nidm_version['major'] == 1 and nidm_version['minor'] < 3): self.type = NLX_OLD_FSL atts = ( (PROV['type'], self.type), (PROV['type'], PROV['SoftwareAgent']), (PROV['label'], Literal(self.label, datatype=XSD_STRING)), (NIDM_SOFTWARE_VERSION, self.version) ) if self.feat_version: atts = atts + ((FSL_FEAT_VERSION, self.feat_version),) self.add_attributes(atts)
python
def export(self, nidm_version, export_dir): """ Create prov entities and activities. """ if nidm_version['major'] < 1 or \ (nidm_version['major'] == 1 and nidm_version['minor'] < 3): self.type = NLX_OLD_FSL atts = ( (PROV['type'], self.type), (PROV['type'], PROV['SoftwareAgent']), (PROV['label'], Literal(self.label, datatype=XSD_STRING)), (NIDM_SOFTWARE_VERSION, self.version) ) if self.feat_version: atts = atts + ((FSL_FEAT_VERSION, self.feat_version),) self.add_attributes(atts)
['def', 'export', '(', 'self', ',', 'nidm_version', ',', 'export_dir', ')', ':', 'if', 'nidm_version', '[', "'major'", ']', '<', '1', 'or', '(', 'nidm_version', '[', "'major'", ']', '==', '1', 'and', 'nidm_version', '[', "'minor'", ']', '<', '3', ')', ':', 'self', '.', 'type', '=', 'NLX_OLD_FSL', 'atts', '=', '(', '(', 'PROV', '[', "'type'", ']', ',', 'self', '.', 'type', ')', ',', '(', 'PROV', '[', "'type'", ']', ',', 'PROV', '[', "'SoftwareAgent'", ']', ')', ',', '(', 'PROV', '[', "'label'", ']', ',', 'Literal', '(', 'self', '.', 'label', ',', 'datatype', '=', 'XSD_STRING', ')', ')', ',', '(', 'NIDM_SOFTWARE_VERSION', ',', 'self', '.', 'version', ')', ')', 'if', 'self', '.', 'feat_version', ':', 'atts', '=', 'atts', '+', '(', '(', 'FSL_FEAT_VERSION', ',', 'self', '.', 'feat_version', ')', ',', ')', 'self', '.', 'add_attributes', '(', 'atts', ')']
Create prov entities and activities.
['Create', 'prov', 'entities', 'and', 'activities', '.']
train
https://github.com/incf-nidash/nidmresults/blob/438f7cce6abc4a4379b629bd76f4d427891e033f/nidmresults/objects/generic.py#L454-L472
2,279
SHTOOLS/SHTOOLS
pyshtools/shclasses/shwindow.py
SHWindowCap._multitaper_cross_spectrum
def _multitaper_cross_spectrum(self, clm, slm, k, convention='power', unit='per_l', clat=None, clon=None, coord_degrees=True, lmax=None, taper_wt=None): """ Return the multitaper cross-spectrum estimate and standard error for two input SHCoeffs class instances. """ if lmax is None: lmax = min(clm.lmax, slm.lmax) if (clat is not None and clon is not None and clat == self.clat and clon == self.clon and coord_degrees is self.coord_degrees and k <= self.nwinrot): # use the already stored coeffs pass elif (clat is None and clon is None) and \ (self.clat is not None and self.clon is not None and k <= self.nwinrot): # use the already stored coeffs pass else: if clat is None: clat = self.clat if clon is None: clon = self.clon if (clat is None and clon is not None) or \ (clat is not None and clon is None): raise ValueError('clat and clon must both be input. ' + 'clat = {:s}, clon = {:s}' .format(repr(clat), repr(clon))) if clat is None and clon is None: self.rotate(clat=90., clon=0., coord_degrees=True, nwinrot=k) else: self.rotate(clat=clat, clon=clon, coord_degrees=coord_degrees, nwinrot=k) sh1 = clm.to_array(normalization='4pi', csphase=1, lmax=lmax) sh2 = slm.to_array(normalization='4pi', csphase=1, lmax=lmax) if taper_wt is None: mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs, lmax1=lmax, lmax2=lmax, k=k) else: mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs, lmax1=lmax, lmax2=lmax, k=k, taper_wt=taper_wt) if (unit == 'per_l'): pass elif (unit == 'per_lm'): degree_l = _np.arange(len(mtse)) mtse /= (2.0 * degree_l + 1.0) sd /= (2.0 * degree_l + 1.0) else: raise ValueError( "unit must be 'per_l' or 'per_lm'." + "Input value was {:s}".format(repr(unit))) if (convention == 'power'): return mtse, sd elif (convention == 'energy'): return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi else: raise ValueError( "convention must be 'power' or 'energy'." + "Input value was {:s}".format(repr(convention)))
python
def _multitaper_cross_spectrum(self, clm, slm, k, convention='power', unit='per_l', clat=None, clon=None, coord_degrees=True, lmax=None, taper_wt=None): """ Return the multitaper cross-spectrum estimate and standard error for two input SHCoeffs class instances. """ if lmax is None: lmax = min(clm.lmax, slm.lmax) if (clat is not None and clon is not None and clat == self.clat and clon == self.clon and coord_degrees is self.coord_degrees and k <= self.nwinrot): # use the already stored coeffs pass elif (clat is None and clon is None) and \ (self.clat is not None and self.clon is not None and k <= self.nwinrot): # use the already stored coeffs pass else: if clat is None: clat = self.clat if clon is None: clon = self.clon if (clat is None and clon is not None) or \ (clat is not None and clon is None): raise ValueError('clat and clon must both be input. ' + 'clat = {:s}, clon = {:s}' .format(repr(clat), repr(clon))) if clat is None and clon is None: self.rotate(clat=90., clon=0., coord_degrees=True, nwinrot=k) else: self.rotate(clat=clat, clon=clon, coord_degrees=coord_degrees, nwinrot=k) sh1 = clm.to_array(normalization='4pi', csphase=1, lmax=lmax) sh2 = slm.to_array(normalization='4pi', csphase=1, lmax=lmax) if taper_wt is None: mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs, lmax1=lmax, lmax2=lmax, k=k) else: mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs, lmax1=lmax, lmax2=lmax, k=k, taper_wt=taper_wt) if (unit == 'per_l'): pass elif (unit == 'per_lm'): degree_l = _np.arange(len(mtse)) mtse /= (2.0 * degree_l + 1.0) sd /= (2.0 * degree_l + 1.0) else: raise ValueError( "unit must be 'per_l' or 'per_lm'." + "Input value was {:s}".format(repr(unit))) if (convention == 'power'): return mtse, sd elif (convention == 'energy'): return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi else: raise ValueError( "convention must be 'power' or 'energy'." + "Input value was {:s}".format(repr(convention)))
['def', '_multitaper_cross_spectrum', '(', 'self', ',', 'clm', ',', 'slm', ',', 'k', ',', 'convention', '=', "'power'", ',', 'unit', '=', "'per_l'", ',', 'clat', '=', 'None', ',', 'clon', '=', 'None', ',', 'coord_degrees', '=', 'True', ',', 'lmax', '=', 'None', ',', 'taper_wt', '=', 'None', ')', ':', 'if', 'lmax', 'is', 'None', ':', 'lmax', '=', 'min', '(', 'clm', '.', 'lmax', ',', 'slm', '.', 'lmax', ')', 'if', '(', 'clat', 'is', 'not', 'None', 'and', 'clon', 'is', 'not', 'None', 'and', 'clat', '==', 'self', '.', 'clat', 'and', 'clon', '==', 'self', '.', 'clon', 'and', 'coord_degrees', 'is', 'self', '.', 'coord_degrees', 'and', 'k', '<=', 'self', '.', 'nwinrot', ')', ':', '# use the already stored coeffs', 'pass', 'elif', '(', 'clat', 'is', 'None', 'and', 'clon', 'is', 'None', ')', 'and', '(', 'self', '.', 'clat', 'is', 'not', 'None', 'and', 'self', '.', 'clon', 'is', 'not', 'None', 'and', 'k', '<=', 'self', '.', 'nwinrot', ')', ':', '# use the already stored coeffs', 'pass', 'else', ':', 'if', 'clat', 'is', 'None', ':', 'clat', '=', 'self', '.', 'clat', 'if', 'clon', 'is', 'None', ':', 'clon', '=', 'self', '.', 'clon', 'if', '(', 'clat', 'is', 'None', 'and', 'clon', 'is', 'not', 'None', ')', 'or', '(', 'clat', 'is', 'not', 'None', 'and', 'clon', 'is', 'None', ')', ':', 'raise', 'ValueError', '(', "'clat and clon must both be input. '", '+', "'clat = {:s}, clon = {:s}'", '.', 'format', '(', 'repr', '(', 'clat', ')', ',', 'repr', '(', 'clon', ')', ')', ')', 'if', 'clat', 'is', 'None', 'and', 'clon', 'is', 'None', ':', 'self', '.', 'rotate', '(', 'clat', '=', '90.', ',', 'clon', '=', '0.', ',', 'coord_degrees', '=', 'True', ',', 'nwinrot', '=', 'k', ')', 'else', ':', 'self', '.', 'rotate', '(', 'clat', '=', 'clat', ',', 'clon', '=', 'clon', ',', 'coord_degrees', '=', 'coord_degrees', ',', 'nwinrot', '=', 'k', ')', 'sh1', '=', 'clm', '.', 'to_array', '(', 'normalization', '=', "'4pi'", ',', 'csphase', '=', '1', ',', 'lmax', '=', 'lmax', ')', 'sh2', '=', 'slm', '.', 'to_array', '(', 'normalization', '=', "'4pi'", ',', 'csphase', '=', '1', ',', 'lmax', '=', 'lmax', ')', 'if', 'taper_wt', 'is', 'None', ':', 'mtse', ',', 'sd', '=', '_shtools', '.', 'SHMultiTaperMaskCSE', '(', 'sh1', ',', 'sh2', ',', 'self', '.', 'coeffs', ',', 'lmax1', '=', 'lmax', ',', 'lmax2', '=', 'lmax', ',', 'k', '=', 'k', ')', 'else', ':', 'mtse', ',', 'sd', '=', '_shtools', '.', 'SHMultiTaperMaskCSE', '(', 'sh1', ',', 'sh2', ',', 'self', '.', 'coeffs', ',', 'lmax1', '=', 'lmax', ',', 'lmax2', '=', 'lmax', ',', 'k', '=', 'k', ',', 'taper_wt', '=', 'taper_wt', ')', 'if', '(', 'unit', '==', "'per_l'", ')', ':', 'pass', 'elif', '(', 'unit', '==', "'per_lm'", ')', ':', 'degree_l', '=', '_np', '.', 'arange', '(', 'len', '(', 'mtse', ')', ')', 'mtse', '/=', '(', '2.0', '*', 'degree_l', '+', '1.0', ')', 'sd', '/=', '(', '2.0', '*', 'degree_l', '+', '1.0', ')', 'else', ':', 'raise', 'ValueError', '(', '"unit must be \'per_l\' or \'per_lm\'."', '+', '"Input value was {:s}"', '.', 'format', '(', 'repr', '(', 'unit', ')', ')', ')', 'if', '(', 'convention', '==', "'power'", ')', ':', 'return', 'mtse', ',', 'sd', 'elif', '(', 'convention', '==', "'energy'", ')', ':', 'return', 'mtse', '*', '4.0', '*', '_np', '.', 'pi', ',', 'sd', '*', '4.0', '*', '_np', '.', 'pi', 'else', ':', 'raise', 'ValueError', '(', '"convention must be \'power\' or \'energy\'."', '+', '"Input value was {:s}"', '.', 'format', '(', 'repr', '(', 'convention', ')', ')', ')']
Return the multitaper cross-spectrum estimate and standard error for two input SHCoeffs class instances.
['Return', 'the', 'multitaper', 'cross', '-', 'spectrum', 'estimate', 'and', 'standard', 'error', 'for', 'two', 'input', 'SHCoeffs', 'class', 'instances', '.']
train
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shwindow.py#L1343-L1410
2,280
RPi-Distro/python-gpiozero
gpiozero/tools.py
post_periodic_filtered
def post_periodic_filtered(values, repeat_after, block): """ After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value) """ values = _normalize(values) if repeat_after < 1: raise ValueError("repeat_after must be 1 or larger") if block < 1: raise ValueError("block must be 1 or larger") it = iter(values) try: while True: for _ in range(repeat_after): yield next(it) for _ in range(block): next(it) except StopIteration: pass
python
def post_periodic_filtered(values, repeat_after, block): """ After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value) """ values = _normalize(values) if repeat_after < 1: raise ValueError("repeat_after must be 1 or larger") if block < 1: raise ValueError("block must be 1 or larger") it = iter(values) try: while True: for _ in range(repeat_after): yield next(it) for _ in range(block): next(it) except StopIteration: pass
['def', 'post_periodic_filtered', '(', 'values', ',', 'repeat_after', ',', 'block', ')', ':', 'values', '=', '_normalize', '(', 'values', ')', 'if', 'repeat_after', '<', '1', ':', 'raise', 'ValueError', '(', '"repeat_after must be 1 or larger"', ')', 'if', 'block', '<', '1', ':', 'raise', 'ValueError', '(', '"block must be 1 or larger"', ')', 'it', '=', 'iter', '(', 'values', ')', 'try', ':', 'while', 'True', ':', 'for', '_', 'in', 'range', '(', 'repeat_after', ')', ':', 'yield', 'next', '(', 'it', ')', 'for', '_', 'in', 'range', '(', 'block', ')', ':', 'next', '(', 'it', ')', 'except', 'StopIteration', ':', 'pass']
After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value)
['After', 'every', '*', 'repeat_after', '*', 'items', 'blocks', 'the', 'next', '*', 'block', '*', 'items', 'from', '*', 'values', '*', '.', 'Note', 'that', 'unlike', ':', 'func', ':', 'pre_periodic_filtered', '*', 'repeat_after', '*', 'can', 't', 'be', '0', '.', 'For', 'example', 'to', 'block', 'every', 'tenth', 'item', 'read', 'from', 'an', 'ADC', '::']
train
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/tools.py#L574-L601
2,281
thombashi/SimpleSQLite
simplesqlite/core.py
SimpleSQLite.has_attr
def has_attr(self, table_name, attr_name): """ :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite """ self.verify_table_existence(table_name) if typepy.is_null_string(attr_name): return False return attr_name in self.fetch_attr_names(table_name)
python
def has_attr(self, table_name, attr_name): """ :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite """ self.verify_table_existence(table_name) if typepy.is_null_string(attr_name): return False return attr_name in self.fetch_attr_names(table_name)
['def', 'has_attr', '(', 'self', ',', 'table_name', ',', 'attr_name', ')', ':', 'self', '.', 'verify_table_existence', '(', 'table_name', ')', 'if', 'typepy', '.', 'is_null_string', '(', 'attr_name', ')', ':', 'return', 'False', 'return', 'attr_name', 'in', 'self', '.', 'fetch_attr_names', '(', 'table_name', ')']
:param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite
[':', 'param', 'str', 'table_name', ':', 'Table', 'name', 'that', 'the', 'attribute', 'exists', '.', ':', 'param', 'str', 'attr_name', ':', 'Attribute', 'name', 'to', 'be', 'tested', '.', ':', 'return', ':', '|True|', 'if', 'the', 'table', 'has', 'the', 'attribute', '.', ':', 'rtype', ':', 'bool', ':', 'raises', 'simplesqlite', '.', 'TableNotFoundError', ':', '|raises_verify_table_existence|']
train
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L955-L995
2,282
gabrielelanaro/chemview
chemview/render.py
render_povray
def render_povray(scene, filename='ipython', width=600, height=600, antialiasing=0.01, extra_opts={}): '''Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene. ''' if not vapory_available: raise Exception("To render with povray, you need to have the vapory" " package installed.") # Adding extra options scene = normalize_scene(scene) scene.update(extra_opts) # Camera target aspect = scene['camera']['aspect'] up = np.dot(rmatrixquaternion(scene['camera']['quaternion']), [0, 1, 0]) v_fov = scene['camera']['vfov'] / 180.0 * np.pi h_fov = 2.0 * np.arctan(np.tan(v_fov/2.0) * aspect) / np.pi * 180 # Setup camera position camera = vp.Camera( 'location', scene['camera']['location'], 'direction', [0, 0, -1], 'sky', up, 'look_at', scene['camera']['target'], 'angle', h_fov ) global_settings = [] # Setup global illumination if scene.get('radiosity', False): # Global Illumination radiosity = vp.Radiosity( 'brightness', 2.0, 'count', 100, 'error_bound', 0.15, 'gray_threshold', 0.0, 'low_error_factor', 0.2, 'minimum_reuse', 0.015, 'nearest_count', 10, 'recursion_limit', 1, #Docs say 1 is enough 'adc_bailout', 0.01, 'max_sample', 0.5, 'media off', 'normal off', 'always_sample', 1, 'pretrace_start', 0.08, 'pretrace_end', 0.01) light_sources = [] global_settings.append(radiosity) else: # Lights light_sources = [ vp.LightSource( np.array([2,4,-3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-2,-4,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-1,2,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([1,-2,-3]) * 1000, 'color', [1,1,1] ) ] # Background -- white for now background = vp.Background([1, 1, 1]) # Things to display stuff = _generate_objects(scene['representations']) scene = vp.Scene( camera, objects = light_sources + stuff + [background], global_settings=global_settings) return scene.render(filename, width=width, height=height, antialiasing = antialiasing)
python
def render_povray(scene, filename='ipython', width=600, height=600, antialiasing=0.01, extra_opts={}): '''Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene. ''' if not vapory_available: raise Exception("To render with povray, you need to have the vapory" " package installed.") # Adding extra options scene = normalize_scene(scene) scene.update(extra_opts) # Camera target aspect = scene['camera']['aspect'] up = np.dot(rmatrixquaternion(scene['camera']['quaternion']), [0, 1, 0]) v_fov = scene['camera']['vfov'] / 180.0 * np.pi h_fov = 2.0 * np.arctan(np.tan(v_fov/2.0) * aspect) / np.pi * 180 # Setup camera position camera = vp.Camera( 'location', scene['camera']['location'], 'direction', [0, 0, -1], 'sky', up, 'look_at', scene['camera']['target'], 'angle', h_fov ) global_settings = [] # Setup global illumination if scene.get('radiosity', False): # Global Illumination radiosity = vp.Radiosity( 'brightness', 2.0, 'count', 100, 'error_bound', 0.15, 'gray_threshold', 0.0, 'low_error_factor', 0.2, 'minimum_reuse', 0.015, 'nearest_count', 10, 'recursion_limit', 1, #Docs say 1 is enough 'adc_bailout', 0.01, 'max_sample', 0.5, 'media off', 'normal off', 'always_sample', 1, 'pretrace_start', 0.08, 'pretrace_end', 0.01) light_sources = [] global_settings.append(radiosity) else: # Lights light_sources = [ vp.LightSource( np.array([2,4,-3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-2,-4,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-1,2,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([1,-2,-3]) * 1000, 'color', [1,1,1] ) ] # Background -- white for now background = vp.Background([1, 1, 1]) # Things to display stuff = _generate_objects(scene['representations']) scene = vp.Scene( camera, objects = light_sources + stuff + [background], global_settings=global_settings) return scene.render(filename, width=width, height=height, antialiasing = antialiasing)
['def', 'render_povray', '(', 'scene', ',', 'filename', '=', "'ipython'", ',', 'width', '=', '600', ',', 'height', '=', '600', ',', 'antialiasing', '=', '0.01', ',', 'extra_opts', '=', '{', '}', ')', ':', 'if', 'not', 'vapory_available', ':', 'raise', 'Exception', '(', '"To render with povray, you need to have the vapory"', '" package installed."', ')', '# Adding extra options', 'scene', '=', 'normalize_scene', '(', 'scene', ')', 'scene', '.', 'update', '(', 'extra_opts', ')', '# Camera target', 'aspect', '=', 'scene', '[', "'camera'", ']', '[', "'aspect'", ']', 'up', '=', 'np', '.', 'dot', '(', 'rmatrixquaternion', '(', 'scene', '[', "'camera'", ']', '[', "'quaternion'", ']', ')', ',', '[', '0', ',', '1', ',', '0', ']', ')', 'v_fov', '=', 'scene', '[', "'camera'", ']', '[', "'vfov'", ']', '/', '180.0', '*', 'np', '.', 'pi', 'h_fov', '=', '2.0', '*', 'np', '.', 'arctan', '(', 'np', '.', 'tan', '(', 'v_fov', '/', '2.0', ')', '*', 'aspect', ')', '/', 'np', '.', 'pi', '*', '180', '# Setup camera position', 'camera', '=', 'vp', '.', 'Camera', '(', "'location'", ',', 'scene', '[', "'camera'", ']', '[', "'location'", ']', ',', "'direction'", ',', '[', '0', ',', '0', ',', '-', '1', ']', ',', "'sky'", ',', 'up', ',', "'look_at'", ',', 'scene', '[', "'camera'", ']', '[', "'target'", ']', ',', "'angle'", ',', 'h_fov', ')', 'global_settings', '=', '[', ']', '# Setup global illumination', 'if', 'scene', '.', 'get', '(', "'radiosity'", ',', 'False', ')', ':', '# Global Illumination', 'radiosity', '=', 'vp', '.', 'Radiosity', '(', "'brightness'", ',', '2.0', ',', "'count'", ',', '100', ',', "'error_bound'", ',', '0.15', ',', "'gray_threshold'", ',', '0.0', ',', "'low_error_factor'", ',', '0.2', ',', "'minimum_reuse'", ',', '0.015', ',', "'nearest_count'", ',', '10', ',', "'recursion_limit'", ',', '1', ',', '#Docs say 1 is enough', "'adc_bailout'", ',', '0.01', ',', "'max_sample'", ',', '0.5', ',', "'media off'", ',', "'normal off'", ',', "'always_sample'", ',', '1', ',', "'pretrace_start'", ',', '0.08', ',', "'pretrace_end'", ',', '0.01', ')', 'light_sources', '=', '[', ']', 'global_settings', '.', 'append', '(', 'radiosity', ')', 'else', ':', '# Lights', 'light_sources', '=', '[', 'vp', '.', 'LightSource', '(', 'np', '.', 'array', '(', '[', '2', ',', '4', ',', '-', '3', ']', ')', '*', '1000', ',', "'color'", ',', '[', '1', ',', '1', ',', '1', ']', ')', ',', 'vp', '.', 'LightSource', '(', 'np', '.', 'array', '(', '[', '-', '2', ',', '-', '4', ',', '3', ']', ')', '*', '1000', ',', "'color'", ',', '[', '1', ',', '1', ',', '1', ']', ')', ',', 'vp', '.', 'LightSource', '(', 'np', '.', 'array', '(', '[', '-', '1', ',', '2', ',', '3', ']', ')', '*', '1000', ',', "'color'", ',', '[', '1', ',', '1', ',', '1', ']', ')', ',', 'vp', '.', 'LightSource', '(', 'np', '.', 'array', '(', '[', '1', ',', '-', '2', ',', '-', '3', ']', ')', '*', '1000', ',', "'color'", ',', '[', '1', ',', '1', ',', '1', ']', ')', ']', '# Background -- white for now', 'background', '=', 'vp', '.', 'Background', '(', '[', '1', ',', '1', ',', '1', ']', ')', '# Things to display', 'stuff', '=', '_generate_objects', '(', 'scene', '[', "'representations'", ']', ')', 'scene', '=', 'vp', '.', 'Scene', '(', 'camera', ',', 'objects', '=', 'light_sources', '+', 'stuff', '+', '[', 'background', ']', ',', 'global_settings', '=', 'global_settings', ')', 'return', 'scene', '.', 'render', '(', 'filename', ',', 'width', '=', 'width', ',', 'height', '=', 'height', ',', 'antialiasing', '=', 'antialiasing', ')']
Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene.
['Render', 'the', 'scene', 'with', 'povray', 'for', 'publication', '.']
train
https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/render.py#L21-L93
2,283
openstack/horizon
openstack_dashboard/api/neutron.py
subnetpool_create
def subnetpool_create(request, name, prefixes, **kwargs): """Create a subnetpool. ip_version is auto-detected in back-end. Parameters: request -- Request context name -- Name for subnetpool prefixes -- List of prefixes for pool Keyword Arguments (optional): min_prefixlen -- Minimum prefix length for allocations from pool max_prefixlen -- Maximum prefix length for allocations from pool default_prefixlen -- Default prefix length for allocations from pool default_quota -- Default quota for allocations from pool shared -- Subnetpool should be shared (Admin-only) tenant_id -- Owner of subnetpool Returns: SubnetPool object """ LOG.debug("subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, " "kwargs=%(kwargs)s", {'name': name, 'prefixes': prefixes, 'kwargs': kwargs}) body = {'subnetpool': {'name': name, 'prefixes': prefixes, } } if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['subnetpool'].update(kwargs) subnetpool = \ neutronclient(request).create_subnetpool(body=body).get('subnetpool') return SubnetPool(subnetpool)
python
def subnetpool_create(request, name, prefixes, **kwargs): """Create a subnetpool. ip_version is auto-detected in back-end. Parameters: request -- Request context name -- Name for subnetpool prefixes -- List of prefixes for pool Keyword Arguments (optional): min_prefixlen -- Minimum prefix length for allocations from pool max_prefixlen -- Maximum prefix length for allocations from pool default_prefixlen -- Default prefix length for allocations from pool default_quota -- Default quota for allocations from pool shared -- Subnetpool should be shared (Admin-only) tenant_id -- Owner of subnetpool Returns: SubnetPool object """ LOG.debug("subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, " "kwargs=%(kwargs)s", {'name': name, 'prefixes': prefixes, 'kwargs': kwargs}) body = {'subnetpool': {'name': name, 'prefixes': prefixes, } } if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['subnetpool'].update(kwargs) subnetpool = \ neutronclient(request).create_subnetpool(body=body).get('subnetpool') return SubnetPool(subnetpool)
['def', 'subnetpool_create', '(', 'request', ',', 'name', ',', 'prefixes', ',', '*', '*', 'kwargs', ')', ':', 'LOG', '.', 'debug', '(', '"subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, "', '"kwargs=%(kwargs)s"', ',', '{', "'name'", ':', 'name', ',', "'prefixes'", ':', 'prefixes', ',', "'kwargs'", ':', 'kwargs', '}', ')', 'body', '=', '{', "'subnetpool'", ':', '{', "'name'", ':', 'name', ',', "'prefixes'", ':', 'prefixes', ',', '}', '}', 'if', "'tenant_id'", 'not', 'in', 'kwargs', ':', 'kwargs', '[', "'tenant_id'", ']', '=', 'request', '.', 'user', '.', 'project_id', 'body', '[', "'subnetpool'", ']', '.', 'update', '(', 'kwargs', ')', 'subnetpool', '=', 'neutronclient', '(', 'request', ')', '.', 'create_subnetpool', '(', 'body', '=', 'body', ')', '.', 'get', '(', "'subnetpool'", ')', 'return', 'SubnetPool', '(', 'subnetpool', ')']
Create a subnetpool. ip_version is auto-detected in back-end. Parameters: request -- Request context name -- Name for subnetpool prefixes -- List of prefixes for pool Keyword Arguments (optional): min_prefixlen -- Minimum prefix length for allocations from pool max_prefixlen -- Maximum prefix length for allocations from pool default_prefixlen -- Default prefix length for allocations from pool default_quota -- Default quota for allocations from pool shared -- Subnetpool should be shared (Admin-only) tenant_id -- Owner of subnetpool Returns: SubnetPool object
['Create', 'a', 'subnetpool', '.']
train
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L1243-L1277
2,284
django-auth-ldap/django-auth-ldap
django_auth_ldap/backend.py
_LDAPUser.populate_user
def populate_user(self): """ Populates the Django user object using the default bind credentials. """ user = None try: # self.attrs will only be non-None if we were able to load this user # from the LDAP directory, so this filters out nonexistent users. if self.attrs is not None: self._get_or_create_user(force_populate=True) user = self._user except ldap.LDAPError as e: results = ldap_error.send( self.backend.__class__, context="populate_user", user=self._user, exception=e, ) if len(results) == 0: logger.warning( "Caught LDAPError while authenticating {}: {}".format( self._username, pprint.pformat(e) ) ) except Exception as e: logger.warning("{} while authenticating {}".format(e, self._username)) raise return user
python
def populate_user(self): """ Populates the Django user object using the default bind credentials. """ user = None try: # self.attrs will only be non-None if we were able to load this user # from the LDAP directory, so this filters out nonexistent users. if self.attrs is not None: self._get_or_create_user(force_populate=True) user = self._user except ldap.LDAPError as e: results = ldap_error.send( self.backend.__class__, context="populate_user", user=self._user, exception=e, ) if len(results) == 0: logger.warning( "Caught LDAPError while authenticating {}: {}".format( self._username, pprint.pformat(e) ) ) except Exception as e: logger.warning("{} while authenticating {}".format(e, self._username)) raise return user
['def', 'populate_user', '(', 'self', ')', ':', 'user', '=', 'None', 'try', ':', '# self.attrs will only be non-None if we were able to load this user', '# from the LDAP directory, so this filters out nonexistent users.', 'if', 'self', '.', 'attrs', 'is', 'not', 'None', ':', 'self', '.', '_get_or_create_user', '(', 'force_populate', '=', 'True', ')', 'user', '=', 'self', '.', '_user', 'except', 'ldap', '.', 'LDAPError', 'as', 'e', ':', 'results', '=', 'ldap_error', '.', 'send', '(', 'self', '.', 'backend', '.', '__class__', ',', 'context', '=', '"populate_user"', ',', 'user', '=', 'self', '.', '_user', ',', 'exception', '=', 'e', ',', ')', 'if', 'len', '(', 'results', ')', '==', '0', ':', 'logger', '.', 'warning', '(', '"Caught LDAPError while authenticating {}: {}"', '.', 'format', '(', 'self', '.', '_username', ',', 'pprint', '.', 'pformat', '(', 'e', ')', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'warning', '(', '"{} while authenticating {}"', '.', 'format', '(', 'e', ',', 'self', '.', '_username', ')', ')', 'raise', 'return', 'user']
Populates the Django user object using the default bind credentials.
['Populates', 'the', 'Django', 'user', 'object', 'using', 'the', 'default', 'bind', 'credentials', '.']
train
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L402-L432
2,285
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
CustomerStatementExport.create
def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
python
def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
['def', 'create', '(', 'cls', ',', 'statement_format', ',', 'date_start', ',', 'date_end', ',', 'monetary_account_id', '=', 'None', ',', 'regional_format', '=', 'None', ',', 'custom_headers', '=', 'None', ')', ':', 'if', 'custom_headers', 'is', 'None', ':', 'custom_headers', '=', '{', '}', 'request_map', '=', '{', 'cls', '.', 'FIELD_STATEMENT_FORMAT', ':', 'statement_format', ',', 'cls', '.', 'FIELD_DATE_START', ':', 'date_start', ',', 'cls', '.', 'FIELD_DATE_END', ':', 'date_end', ',', 'cls', '.', 'FIELD_REGIONAL_FORMAT', ':', 'regional_format', '}', 'request_map_string', '=', 'converter', '.', 'class_to_json', '(', 'request_map', ')', 'request_map_string', '=', 'cls', '.', '_remove_field_for_request', '(', 'request_map_string', ')', 'api_client', '=', 'client', '.', 'ApiClient', '(', 'cls', '.', '_get_api_context', '(', ')', ')', 'request_bytes', '=', 'request_map_string', '.', 'encode', '(', ')', 'endpoint_url', '=', 'cls', '.', '_ENDPOINT_URL_CREATE', '.', 'format', '(', 'cls', '.', '_determine_user_id', '(', ')', ',', 'cls', '.', '_determine_monetary_account_id', '(', 'monetary_account_id', ')', ')', 'response_raw', '=', 'api_client', '.', 'post', '(', 'endpoint_url', ',', 'request_bytes', ',', 'custom_headers', ')', 'return', 'BunqResponseInt', '.', 'cast_from_bunq_response', '(', 'cls', '.', '_process_for_id', '(', 'response_raw', ')', ')']
:type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
[':', 'type', 'user_id', ':', 'int', ':', 'type', 'monetary_account_id', ':', 'int', ':', 'param', 'statement_format', ':', 'The', 'format', 'type', 'of', 'statement', '.', 'Allowed', 'values', ':', 'MT940', 'CSV', 'PDF', '.', ':', 'type', 'statement_format', ':', 'str', ':', 'param', 'date_start', ':', 'The', 'start', 'date', 'for', 'making', 'statements', '.', ':', 'type', 'date_start', ':', 'str', ':', 'param', 'date_end', ':', 'The', 'end', 'date', 'for', 'making', 'statements', '.', ':', 'type', 'date_end', ':', 'str', ':', 'param', 'regional_format', ':', 'Required', 'for', 'CSV', 'exports', '.', 'The', 'regional', 'format', 'of', 'the', 'statement', 'can', 'be', 'UK_US', '(', 'comma', '-', 'separated', ')', 'or', 'EUROPEAN', '(', 'semicolon', '-', 'separated', ')', '.', ':', 'type', 'regional_format', ':', 'str', ':', 'type', 'custom_headers', ':', 'dict', '[', 'str', 'str', ']', '|None']
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L16500-L16544
2,286
zhanglab/psamm
psamm/commands/duplicatescheck.py
reaction_signature
def reaction_signature(eq, direction=False, stoichiometry=False): """Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality. """ def compounds_sig(compounds): if stoichiometry: return tuple(sorted(compounds)) else: return tuple(sorted(compound for compound, _ in compounds)) left = compounds_sig(eq.left) right = compounds_sig(eq.right) if left < right: reaction_sig = left, right direction_sig = eq.direction else: reaction_sig = right, left direction_sig = eq.direction.flipped() if direction: return reaction_sig, direction_sig return reaction_sig
python
def reaction_signature(eq, direction=False, stoichiometry=False): """Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality. """ def compounds_sig(compounds): if stoichiometry: return tuple(sorted(compounds)) else: return tuple(sorted(compound for compound, _ in compounds)) left = compounds_sig(eq.left) right = compounds_sig(eq.right) if left < right: reaction_sig = left, right direction_sig = eq.direction else: reaction_sig = right, left direction_sig = eq.direction.flipped() if direction: return reaction_sig, direction_sig return reaction_sig
['def', 'reaction_signature', '(', 'eq', ',', 'direction', '=', 'False', ',', 'stoichiometry', '=', 'False', ')', ':', 'def', 'compounds_sig', '(', 'compounds', ')', ':', 'if', 'stoichiometry', ':', 'return', 'tuple', '(', 'sorted', '(', 'compounds', ')', ')', 'else', ':', 'return', 'tuple', '(', 'sorted', '(', 'compound', 'for', 'compound', ',', '_', 'in', 'compounds', ')', ')', 'left', '=', 'compounds_sig', '(', 'eq', '.', 'left', ')', 'right', '=', 'compounds_sig', '(', 'eq', '.', 'right', ')', 'if', 'left', '<', 'right', ':', 'reaction_sig', '=', 'left', ',', 'right', 'direction_sig', '=', 'eq', '.', 'direction', 'else', ':', 'reaction_sig', '=', 'right', ',', 'left', 'direction_sig', '=', 'eq', '.', 'direction', '.', 'flipped', '(', ')', 'if', 'direction', ':', 'return', 'reaction_sig', ',', 'direction_sig', 'return', 'reaction_sig']
Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality.
['Return', 'unique', 'signature', 'object', 'for', ':', 'class', ':', 'Reaction', '.']
train
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/commands/duplicatescheck.py#L29-L57
2,287
scanny/python-pptx
pptx/oxml/shapes/picture.py
CT_Picture.crop_to_fit
def crop_to_fit(self, image_size, view_size): """ Set cropping values in `p:blipFill/a:srcRect` such that an image of *image_size* will stretch to exactly fit *view_size* when its aspect ratio is preserved. """ self.blipFill.crop(self._fill_cropping(image_size, view_size))
python
def crop_to_fit(self, image_size, view_size): """ Set cropping values in `p:blipFill/a:srcRect` such that an image of *image_size* will stretch to exactly fit *view_size* when its aspect ratio is preserved. """ self.blipFill.crop(self._fill_cropping(image_size, view_size))
['def', 'crop_to_fit', '(', 'self', ',', 'image_size', ',', 'view_size', ')', ':', 'self', '.', 'blipFill', '.', 'crop', '(', 'self', '.', '_fill_cropping', '(', 'image_size', ',', 'view_size', ')', ')']
Set cropping values in `p:blipFill/a:srcRect` such that an image of *image_size* will stretch to exactly fit *view_size* when its aspect ratio is preserved.
['Set', 'cropping', 'values', 'in', 'p', ':', 'blipFill', '/', 'a', ':', 'srcRect', 'such', 'that', 'an', 'image', 'of', '*', 'image_size', '*', 'will', 'stretch', 'to', 'exactly', 'fit', '*', 'view_size', '*', 'when', 'its', 'aspect', 'ratio', 'is', 'preserved', '.']
train
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/shapes/picture.py#L35-L41
2,288
mpg-age-bioinformatics/AGEpy
AGEpy/kegg.py
ecs_idsKEGG
def ecs_idsKEGG(organism): """ Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism :param organism: an organisms as listed in organismsKEGG() :returns: a Pandas dataframe of with 'ec' and 'KEGGid'. """ kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read() kegg_ec=kegg_ec.split("\n") final=[] for k in kegg_ec: final.append(k.split("\t")) df=pd.DataFrame(final[0:len(final)-1])[[0,1]] df.columns=['ec','KEGGid'] return df
python
def ecs_idsKEGG(organism): """ Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism :param organism: an organisms as listed in organismsKEGG() :returns: a Pandas dataframe of with 'ec' and 'KEGGid'. """ kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read() kegg_ec=kegg_ec.split("\n") final=[] for k in kegg_ec: final.append(k.split("\t")) df=pd.DataFrame(final[0:len(final)-1])[[0,1]] df.columns=['ec','KEGGid'] return df
['def', 'ecs_idsKEGG', '(', 'organism', ')', ':', 'kegg_ec', '=', 'urlopen', '(', '"http://rest.kegg.jp/link/"', '+', 'organism', '+', '"/enzyme"', ')', '.', 'read', '(', ')', 'kegg_ec', '=', 'kegg_ec', '.', 'split', '(', '"\\n"', ')', 'final', '=', '[', ']', 'for', 'k', 'in', 'kegg_ec', ':', 'final', '.', 'append', '(', 'k', '.', 'split', '(', '"\\t"', ')', ')', 'df', '=', 'pd', '.', 'DataFrame', '(', 'final', '[', '0', ':', 'len', '(', 'final', ')', '-', '1', ']', ')', '[', '[', '0', ',', '1', ']', ']', 'df', '.', 'columns', '=', '[', "'ec'", ',', "'KEGGid'", ']', 'return', 'df']
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism :param organism: an organisms as listed in organismsKEGG() :returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
['Uses', 'KEGG', 'to', 'retrieve', 'all', 'ids', 'and', 'respective', 'ecs', 'for', 'a', 'given', 'KEGG', 'organism']
train
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L107-L123
2,289
ronaldguillen/wave
wave/renderers.py
BrowsableAPIRenderer.get_content
def get_content(self, renderer, data, accepted_media_type, renderer_context): """ Get the content as if it had been rendered by the default non-documenting renderer. """ if not renderer: return '[No renderers were found]' renderer_context['indent'] = 4 content = renderer.render(data, accepted_media_type, renderer_context) render_style = getattr(renderer, 'render_style', 'text') assert render_style in ['text', 'binary'], 'Expected .render_style ' \ '"text" or "binary", but got "%s"' % render_style if render_style == 'binary': return '[%d bytes of binary content]' % len(content) return content
python
def get_content(self, renderer, data, accepted_media_type, renderer_context): """ Get the content as if it had been rendered by the default non-documenting renderer. """ if not renderer: return '[No renderers were found]' renderer_context['indent'] = 4 content = renderer.render(data, accepted_media_type, renderer_context) render_style = getattr(renderer, 'render_style', 'text') assert render_style in ['text', 'binary'], 'Expected .render_style ' \ '"text" or "binary", but got "%s"' % render_style if render_style == 'binary': return '[%d bytes of binary content]' % len(content) return content
['def', 'get_content', '(', 'self', ',', 'renderer', ',', 'data', ',', 'accepted_media_type', ',', 'renderer_context', ')', ':', 'if', 'not', 'renderer', ':', 'return', "'[No renderers were found]'", 'renderer_context', '[', "'indent'", ']', '=', '4', 'content', '=', 'renderer', '.', 'render', '(', 'data', ',', 'accepted_media_type', ',', 'renderer_context', ')', 'render_style', '=', 'getattr', '(', 'renderer', ',', "'render_style'", ',', "'text'", ')', 'assert', 'render_style', 'in', '[', "'text'", ',', "'binary'", ']', ',', "'Expected .render_style '", '\'"text" or "binary", but got "%s"\'', '%', 'render_style', 'if', 'render_style', '==', "'binary'", ':', 'return', "'[%d bytes of binary content]'", '%', 'len', '(', 'content', ')', 'return', 'content']
Get the content as if it had been rendered by the default non-documenting renderer.
['Get', 'the', 'content', 'as', 'if', 'it', 'had', 'been', 'rendered', 'by', 'the', 'default', 'non', '-', 'documenting', 'renderer', '.']
train
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L388-L406
2,290
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.replace
def replace(self, to_replace, value=_NoValue, subset=None): """Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+ """ if value is _NoValue: if isinstance(to_replace, dict): value = None else: raise TypeError("value argument is required when to_replace is not a dictionary.") # Helper functions def all_of(types): """Given a type or tuple of types and a sequence of xs check if each x is instance of type(s) >>> all_of(bool)([True, False]) True >>> all_of(basestring)(["a", 1]) False """ def all_of_(xs): return all(isinstance(x, types) for x in xs) return all_of_ all_of_bool = all_of(bool) all_of_str = all_of(basestring) all_of_numeric = all_of((float, int, long)) # Validate input types valid_types = (bool, float, int, long, basestring, list, tuple) if not isinstance(to_replace, valid_types + (dict, )): raise ValueError( "to_replace should be a bool, float, int, long, string, list, tuple, or dict. " "Got {0}".format(type(to_replace))) if not isinstance(value, valid_types) and value is not None \ and not isinstance(to_replace, dict): raise ValueError("If to_replace is not a dict, value should be " "a bool, float, int, long, string, list, tuple or None. " "Got {0}".format(type(value))) if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)): if len(to_replace) != len(value): raise ValueError("to_replace and value lists should be of the same length. " "Got {0} and {1}".format(len(to_replace), len(value))) if not (subset is None or isinstance(subset, (list, tuple, basestring))): raise ValueError("subset should be a list or tuple of column names, " "column name or None. Got {0}".format(type(subset))) # Reshape input arguments if necessary if isinstance(to_replace, (float, int, long, basestring)): to_replace = [to_replace] if isinstance(to_replace, dict): rep_dict = to_replace if value is not None: warnings.warn("to_replace is a dict and value is not None. value will be ignored.") else: if isinstance(value, (float, int, long, basestring)) or value is None: value = [value for _ in range(len(to_replace))] rep_dict = dict(zip(to_replace, value)) if isinstance(subset, basestring): subset = [subset] # Verify we were not passed in mixed type generics. if not any(all_of_type(rep_dict.keys()) and all_of_type(x for x in rep_dict.values() if x is not None) for all_of_type in [all_of_bool, all_of_str, all_of_numeric]): raise ValueError("Mixed type replacements are not supported") if subset is None: return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx) else: return DataFrame( self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
python
def replace(self, to_replace, value=_NoValue, subset=None): """Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+ """ if value is _NoValue: if isinstance(to_replace, dict): value = None else: raise TypeError("value argument is required when to_replace is not a dictionary.") # Helper functions def all_of(types): """Given a type or tuple of types and a sequence of xs check if each x is instance of type(s) >>> all_of(bool)([True, False]) True >>> all_of(basestring)(["a", 1]) False """ def all_of_(xs): return all(isinstance(x, types) for x in xs) return all_of_ all_of_bool = all_of(bool) all_of_str = all_of(basestring) all_of_numeric = all_of((float, int, long)) # Validate input types valid_types = (bool, float, int, long, basestring, list, tuple) if not isinstance(to_replace, valid_types + (dict, )): raise ValueError( "to_replace should be a bool, float, int, long, string, list, tuple, or dict. " "Got {0}".format(type(to_replace))) if not isinstance(value, valid_types) and value is not None \ and not isinstance(to_replace, dict): raise ValueError("If to_replace is not a dict, value should be " "a bool, float, int, long, string, list, tuple or None. " "Got {0}".format(type(value))) if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)): if len(to_replace) != len(value): raise ValueError("to_replace and value lists should be of the same length. " "Got {0} and {1}".format(len(to_replace), len(value))) if not (subset is None or isinstance(subset, (list, tuple, basestring))): raise ValueError("subset should be a list or tuple of column names, " "column name or None. Got {0}".format(type(subset))) # Reshape input arguments if necessary if isinstance(to_replace, (float, int, long, basestring)): to_replace = [to_replace] if isinstance(to_replace, dict): rep_dict = to_replace if value is not None: warnings.warn("to_replace is a dict and value is not None. value will be ignored.") else: if isinstance(value, (float, int, long, basestring)) or value is None: value = [value for _ in range(len(to_replace))] rep_dict = dict(zip(to_replace, value)) if isinstance(subset, basestring): subset = [subset] # Verify we were not passed in mixed type generics. if not any(all_of_type(rep_dict.keys()) and all_of_type(x for x in rep_dict.values() if x is not None) for all_of_type in [all_of_bool, all_of_str, all_of_numeric]): raise ValueError("Mixed type replacements are not supported") if subset is None: return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx) else: return DataFrame( self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
['def', 'replace', '(', 'self', ',', 'to_replace', ',', 'value', '=', '_NoValue', ',', 'subset', '=', 'None', ')', ':', 'if', 'value', 'is', '_NoValue', ':', 'if', 'isinstance', '(', 'to_replace', ',', 'dict', ')', ':', 'value', '=', 'None', 'else', ':', 'raise', 'TypeError', '(', '"value argument is required when to_replace is not a dictionary."', ')', '# Helper functions', 'def', 'all_of', '(', 'types', ')', ':', '"""Given a type or tuple of types and a sequence of xs\n check if each x is instance of type(s)\n\n >>> all_of(bool)([True, False])\n True\n >>> all_of(basestring)(["a", 1])\n False\n """', 'def', 'all_of_', '(', 'xs', ')', ':', 'return', 'all', '(', 'isinstance', '(', 'x', ',', 'types', ')', 'for', 'x', 'in', 'xs', ')', 'return', 'all_of_', 'all_of_bool', '=', 'all_of', '(', 'bool', ')', 'all_of_str', '=', 'all_of', '(', 'basestring', ')', 'all_of_numeric', '=', 'all_of', '(', '(', 'float', ',', 'int', ',', 'long', ')', ')', '# Validate input types', 'valid_types', '=', '(', 'bool', ',', 'float', ',', 'int', ',', 'long', ',', 'basestring', ',', 'list', ',', 'tuple', ')', 'if', 'not', 'isinstance', '(', 'to_replace', ',', 'valid_types', '+', '(', 'dict', ',', ')', ')', ':', 'raise', 'ValueError', '(', '"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "', '"Got {0}"', '.', 'format', '(', 'type', '(', 'to_replace', ')', ')', ')', 'if', 'not', 'isinstance', '(', 'value', ',', 'valid_types', ')', 'and', 'value', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'to_replace', ',', 'dict', ')', ':', 'raise', 'ValueError', '(', '"If to_replace is not a dict, value should be "', '"a bool, float, int, long, string, list, tuple or None. "', '"Got {0}"', '.', 'format', '(', 'type', '(', 'value', ')', ')', ')', 'if', 'isinstance', '(', 'to_replace', ',', '(', 'list', ',', 'tuple', ')', ')', 'and', 'isinstance', '(', 'value', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'if', 'len', '(', 'to_replace', ')', '!=', 'len', '(', 'value', ')', ':', 'raise', 'ValueError', '(', '"to_replace and value lists should be of the same length. "', '"Got {0} and {1}"', '.', 'format', '(', 'len', '(', 'to_replace', ')', ',', 'len', '(', 'value', ')', ')', ')', 'if', 'not', '(', 'subset', 'is', 'None', 'or', 'isinstance', '(', 'subset', ',', '(', 'list', ',', 'tuple', ',', 'basestring', ')', ')', ')', ':', 'raise', 'ValueError', '(', '"subset should be a list or tuple of column names, "', '"column name or None. Got {0}"', '.', 'format', '(', 'type', '(', 'subset', ')', ')', ')', '# Reshape input arguments if necessary', 'if', 'isinstance', '(', 'to_replace', ',', '(', 'float', ',', 'int', ',', 'long', ',', 'basestring', ')', ')', ':', 'to_replace', '=', '[', 'to_replace', ']', 'if', 'isinstance', '(', 'to_replace', ',', 'dict', ')', ':', 'rep_dict', '=', 'to_replace', 'if', 'value', 'is', 'not', 'None', ':', 'warnings', '.', 'warn', '(', '"to_replace is a dict and value is not None. value will be ignored."', ')', 'else', ':', 'if', 'isinstance', '(', 'value', ',', '(', 'float', ',', 'int', ',', 'long', ',', 'basestring', ')', ')', 'or', 'value', 'is', 'None', ':', 'value', '=', '[', 'value', 'for', '_', 'in', 'range', '(', 'len', '(', 'to_replace', ')', ')', ']', 'rep_dict', '=', 'dict', '(', 'zip', '(', 'to_replace', ',', 'value', ')', ')', 'if', 'isinstance', '(', 'subset', ',', 'basestring', ')', ':', 'subset', '=', '[', 'subset', ']', '# Verify we were not passed in mixed type generics.', 'if', 'not', 'any', '(', 'all_of_type', '(', 'rep_dict', '.', 'keys', '(', ')', ')', 'and', 'all_of_type', '(', 'x', 'for', 'x', 'in', 'rep_dict', '.', 'values', '(', ')', 'if', 'x', 'is', 'not', 'None', ')', 'for', 'all_of_type', 'in', '[', 'all_of_bool', ',', 'all_of_str', ',', 'all_of_numeric', ']', ')', ':', 'raise', 'ValueError', '(', '"Mixed type replacements are not supported"', ')', 'if', 'subset', 'is', 'None', ':', 'return', 'DataFrame', '(', 'self', '.', '_jdf', '.', 'na', '(', ')', '.', 'replace', '(', "'*'", ',', 'rep_dict', ')', ',', 'self', '.', 'sql_ctx', ')', 'else', ':', 'return', 'DataFrame', '(', 'self', '.', '_jdf', '.', 'na', '(', ')', '.', 'replace', '(', 'self', '.', '_jseq', '(', 'subset', ')', ',', 'self', '.', '_jmap', '(', 'rep_dict', ')', ')', ',', 'self', '.', 'sql_ctx', ')']
Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+
['Returns', 'a', 'new', ':', 'class', ':', 'DataFrame', 'replacing', 'a', 'value', 'with', 'another', 'value', '.', ':', 'func', ':', 'DataFrame', '.', 'replace', 'and', ':', 'func', ':', 'DataFrameNaFunctions', '.', 'replace', 'are', 'aliases', 'of', 'each', 'other', '.', 'Values', 'to_replace', 'and', 'value', 'must', 'have', 'the', 'same', 'type', 'and', 'can', 'only', 'be', 'numerics', 'booleans', 'or', 'strings', '.', 'Value', 'can', 'have', 'None', '.', 'When', 'replacing', 'the', 'new', 'value', 'will', 'be', 'cast', 'to', 'the', 'type', 'of', 'the', 'existing', 'column', '.', 'For', 'numeric', 'replacements', 'all', 'values', 'to', 'be', 'replaced', 'should', 'have', 'unique', 'floating', 'point', 'representation', '.', 'In', 'case', 'of', 'conflicts', '(', 'for', 'example', 'with', '{', '42', ':', '-', '1', '42', '.', '0', ':', '1', '}', ')', 'and', 'arbitrary', 'replacement', 'will', 'be', 'used', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1668-L1805
2,291
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
silenceRemoval
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False): ''' Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds ''' if weight >= 1: weight = 0.99 if weight <= 0: weight = 0.01 # Step 1: feature extraction x = audioBasicIO.stereo2mono(x) st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs, st_step * fs) # Step 2: train binary svm classifier of low vs high energy frames # keep only the energy short-term sequence (2nd feature) st_energy = st_feats[1, :] en = numpy.sort(st_energy) # number of 10% of the total short-term windows l1 = int(len(en) / 10) # compute "lower" 10% energy threshold t1 = numpy.mean(en[0:l1]) + 0.000000000000001 # compute "higher" 10% energy threshold t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001 # get all features that correspond to low energy class1 = st_feats[:, numpy.where(st_energy <= t1)[0]] # get all features that correspond to high energy class2 = st_feats[:, numpy.where(st_energy >= t2)[0]] # form the binary classification task and ... faets_s = [class1.T, class2.T] # normalize and train the respective svm probabilistic model # (ONSET vs SILENCE) [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s) svm = aT.trainSVM(faets_s_norm, 1.0) # Step 3: compute onset probability based on the trained svm prob_on_set = [] for i in range(st_feats.shape[1]): # for each frame cur_fv = (st_feats[:, i] - means_s) / stds_s # get svm probability (that it belongs to the ONSET class) prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1]) prob_on_set = numpy.array(prob_on_set) # smooth probability: prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step) # Step 4A: detect onset frame indices: prog_on_set_sort = numpy.sort(prob_on_set) # find probability Threshold as a weighted average # of top 10% and lower 10% of the values Nt = int(prog_on_set_sort.shape[0] / 10) T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) + weight * numpy.mean(prog_on_set_sort[-Nt::])) max_idx = numpy.where(prob_on_set > T)[0] # get the indices of the frames that satisfy the thresholding i = 0 time_clusters = [] seg_limits = [] # Step 4B: group frame indices to onset segments while i < len(max_idx): # for each of the detected onset indices cur_cluster = [max_idx[i]] if i == len(max_idx)-1: break while max_idx[i+1] - cur_cluster[-1] <= 2: cur_cluster.append(max_idx[i+1]) i += 1 if i == len(max_idx)-1: break i += 1 time_clusters.append(cur_cluster) seg_limits.append([cur_cluster[0] * st_step, cur_cluster[-1] * st_step]) # Step 5: Post process: remove very small segments: min_dur = 0.2 seg_limits_2 = [] for s in seg_limits: if s[1] - s[0] > min_dur: seg_limits_2.append(s) seg_limits = seg_limits_2 if plot: timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs) plt.subplot(2, 1, 1) plt.plot(timeX, x) for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.subplot(2, 1, 2) plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step), prob_on_set) plt.title('Signal') for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.title('svm Probability') plt.show() return seg_limits
python
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False): ''' Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds ''' if weight >= 1: weight = 0.99 if weight <= 0: weight = 0.01 # Step 1: feature extraction x = audioBasicIO.stereo2mono(x) st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs, st_step * fs) # Step 2: train binary svm classifier of low vs high energy frames # keep only the energy short-term sequence (2nd feature) st_energy = st_feats[1, :] en = numpy.sort(st_energy) # number of 10% of the total short-term windows l1 = int(len(en) / 10) # compute "lower" 10% energy threshold t1 = numpy.mean(en[0:l1]) + 0.000000000000001 # compute "higher" 10% energy threshold t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001 # get all features that correspond to low energy class1 = st_feats[:, numpy.where(st_energy <= t1)[0]] # get all features that correspond to high energy class2 = st_feats[:, numpy.where(st_energy >= t2)[0]] # form the binary classification task and ... faets_s = [class1.T, class2.T] # normalize and train the respective svm probabilistic model # (ONSET vs SILENCE) [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s) svm = aT.trainSVM(faets_s_norm, 1.0) # Step 3: compute onset probability based on the trained svm prob_on_set = [] for i in range(st_feats.shape[1]): # for each frame cur_fv = (st_feats[:, i] - means_s) / stds_s # get svm probability (that it belongs to the ONSET class) prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1]) prob_on_set = numpy.array(prob_on_set) # smooth probability: prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step) # Step 4A: detect onset frame indices: prog_on_set_sort = numpy.sort(prob_on_set) # find probability Threshold as a weighted average # of top 10% and lower 10% of the values Nt = int(prog_on_set_sort.shape[0] / 10) T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) + weight * numpy.mean(prog_on_set_sort[-Nt::])) max_idx = numpy.where(prob_on_set > T)[0] # get the indices of the frames that satisfy the thresholding i = 0 time_clusters = [] seg_limits = [] # Step 4B: group frame indices to onset segments while i < len(max_idx): # for each of the detected onset indices cur_cluster = [max_idx[i]] if i == len(max_idx)-1: break while max_idx[i+1] - cur_cluster[-1] <= 2: cur_cluster.append(max_idx[i+1]) i += 1 if i == len(max_idx)-1: break i += 1 time_clusters.append(cur_cluster) seg_limits.append([cur_cluster[0] * st_step, cur_cluster[-1] * st_step]) # Step 5: Post process: remove very small segments: min_dur = 0.2 seg_limits_2 = [] for s in seg_limits: if s[1] - s[0] > min_dur: seg_limits_2.append(s) seg_limits = seg_limits_2 if plot: timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs) plt.subplot(2, 1, 1) plt.plot(timeX, x) for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.subplot(2, 1, 2) plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step), prob_on_set) plt.title('Signal') for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.title('svm Probability') plt.show() return seg_limits
['def', 'silenceRemoval', '(', 'x', ',', 'fs', ',', 'st_win', ',', 'st_step', ',', 'smoothWindow', '=', '0.5', ',', 'weight', '=', '0.5', ',', 'plot', '=', 'False', ')', ':', 'if', 'weight', '>=', '1', ':', 'weight', '=', '0.99', 'if', 'weight', '<=', '0', ':', 'weight', '=', '0.01', '# Step 1: feature extraction', 'x', '=', 'audioBasicIO', '.', 'stereo2mono', '(', 'x', ')', 'st_feats', ',', '_', '=', 'aF', '.', 'stFeatureExtraction', '(', 'x', ',', 'fs', ',', 'st_win', '*', 'fs', ',', 'st_step', '*', 'fs', ')', '# Step 2: train binary svm classifier of low vs high energy frames', '# keep only the energy short-term sequence (2nd feature)', 'st_energy', '=', 'st_feats', '[', '1', ',', ':', ']', 'en', '=', 'numpy', '.', 'sort', '(', 'st_energy', ')', '# number of 10% of the total short-term windows', 'l1', '=', 'int', '(', 'len', '(', 'en', ')', '/', '10', ')', '# compute "lower" 10% energy threshold', 't1', '=', 'numpy', '.', 'mean', '(', 'en', '[', '0', ':', 'l1', ']', ')', '+', '0.000000000000001', '# compute "higher" 10% energy threshold', 't2', '=', 'numpy', '.', 'mean', '(', 'en', '[', '-', 'l1', ':', '-', '1', ']', ')', '+', '0.000000000000001', '# get all features that correspond to low energy', 'class1', '=', 'st_feats', '[', ':', ',', 'numpy', '.', 'where', '(', 'st_energy', '<=', 't1', ')', '[', '0', ']', ']', '# get all features that correspond to high energy', 'class2', '=', 'st_feats', '[', ':', ',', 'numpy', '.', 'where', '(', 'st_energy', '>=', 't2', ')', '[', '0', ']', ']', '# form the binary classification task and ...', 'faets_s', '=', '[', 'class1', '.', 'T', ',', 'class2', '.', 'T', ']', '# normalize and train the respective svm probabilistic model', '# (ONSET vs SILENCE)', '[', 'faets_s_norm', ',', 'means_s', ',', 'stds_s', ']', '=', 'aT', '.', 'normalizeFeatures', '(', 'faets_s', ')', 'svm', '=', 'aT', '.', 'trainSVM', '(', 'faets_s_norm', ',', '1.0', ')', '# Step 3: compute onset probability based on the trained svm', 'prob_on_set', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'st_feats', '.', 'shape', '[', '1', ']', ')', ':', '# for each frame', 'cur_fv', '=', '(', 'st_feats', '[', ':', ',', 'i', ']', '-', 'means_s', ')', '/', 'stds_s', '# get svm probability (that it belongs to the ONSET class)', 'prob_on_set', '.', 'append', '(', 'svm', '.', 'predict_proba', '(', 'cur_fv', '.', 'reshape', '(', '1', ',', '-', '1', ')', ')', '[', '0', ']', '[', '1', ']', ')', 'prob_on_set', '=', 'numpy', '.', 'array', '(', 'prob_on_set', ')', '# smooth probability:', 'prob_on_set', '=', 'smoothMovingAvg', '(', 'prob_on_set', ',', 'smoothWindow', '/', 'st_step', ')', '# Step 4A: detect onset frame indices:', 'prog_on_set_sort', '=', 'numpy', '.', 'sort', '(', 'prob_on_set', ')', '# find probability Threshold as a weighted average', '# of top 10% and lower 10% of the values', 'Nt', '=', 'int', '(', 'prog_on_set_sort', '.', 'shape', '[', '0', ']', '/', '10', ')', 'T', '=', '(', 'numpy', '.', 'mean', '(', '(', '1', '-', 'weight', ')', '*', 'prog_on_set_sort', '[', '0', ':', 'Nt', ']', ')', '+', 'weight', '*', 'numpy', '.', 'mean', '(', 'prog_on_set_sort', '[', '-', 'Nt', ':', ':', ']', ')', ')', 'max_idx', '=', 'numpy', '.', 'where', '(', 'prob_on_set', '>', 'T', ')', '[', '0', ']', '# get the indices of the frames that satisfy the thresholding', 'i', '=', '0', 'time_clusters', '=', '[', ']', 'seg_limits', '=', '[', ']', '# Step 4B: group frame indices to onset segments', 'while', 'i', '<', 'len', '(', 'max_idx', ')', ':', '# for each of the detected onset indices', 'cur_cluster', '=', '[', 'max_idx', '[', 'i', ']', ']', 'if', 'i', '==', 'len', '(', 'max_idx', ')', '-', '1', ':', 'break', 'while', 'max_idx', '[', 'i', '+', '1', ']', '-', 'cur_cluster', '[', '-', '1', ']', '<=', '2', ':', 'cur_cluster', '.', 'append', '(', 'max_idx', '[', 'i', '+', '1', ']', ')', 'i', '+=', '1', 'if', 'i', '==', 'len', '(', 'max_idx', ')', '-', '1', ':', 'break', 'i', '+=', '1', 'time_clusters', '.', 'append', '(', 'cur_cluster', ')', 'seg_limits', '.', 'append', '(', '[', 'cur_cluster', '[', '0', ']', '*', 'st_step', ',', 'cur_cluster', '[', '-', '1', ']', '*', 'st_step', ']', ')', '# Step 5: Post process: remove very small segments:', 'min_dur', '=', '0.2', 'seg_limits_2', '=', '[', ']', 'for', 's', 'in', 'seg_limits', ':', 'if', 's', '[', '1', ']', '-', 's', '[', '0', ']', '>', 'min_dur', ':', 'seg_limits_2', '.', 'append', '(', 's', ')', 'seg_limits', '=', 'seg_limits_2', 'if', 'plot', ':', 'timeX', '=', 'numpy', '.', 'arange', '(', '0', ',', 'x', '.', 'shape', '[', '0', ']', '/', 'float', '(', 'fs', ')', ',', '1.0', '/', 'fs', ')', 'plt', '.', 'subplot', '(', '2', ',', '1', ',', '1', ')', 'plt', '.', 'plot', '(', 'timeX', ',', 'x', ')', 'for', 's', 'in', 'seg_limits', ':', 'plt', '.', 'axvline', '(', 'x', '=', 's', '[', '0', ']', ')', 'plt', '.', 'axvline', '(', 'x', '=', 's', '[', '1', ']', ')', 'plt', '.', 'subplot', '(', '2', ',', '1', ',', '2', ')', 'plt', '.', 'plot', '(', 'numpy', '.', 'arange', '(', '0', ',', 'prob_on_set', '.', 'shape', '[', '0', ']', '*', 'st_step', ',', 'st_step', ')', ',', 'prob_on_set', ')', 'plt', '.', 'title', '(', "'Signal'", ')', 'for', 's', 'in', 'seg_limits', ':', 'plt', '.', 'axvline', '(', 'x', '=', 's', '[', '0', ']', ')', 'plt', '.', 'axvline', '(', 'x', '=', 's', '[', '1', ']', ')', 'plt', '.', 'title', '(', "'svm Probability'", ')', 'plt', '.', 'show', '(', ')', 'return', 'seg_limits']
Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
['Event', 'Detection', '(', 'silence', 'removal', ')', 'ARGUMENTS', ':', '-', 'x', ':', 'the', 'input', 'audio', 'signal', '-', 'fs', ':', 'sampling', 'freq', '-', 'st_win', 'st_step', ':', 'window', 'size', 'and', 'step', 'in', 'seconds', '-', 'smoothWindow', ':', '(', 'optinal', ')', 'smooth', 'window', '(', 'in', 'seconds', ')', '-', 'weight', ':', '(', 'optinal', ')', 'weight', 'factor', '(', '0', '<', 'weight', '<', '1', ')', 'the', 'higher', 'the', 'more', 'strict', '-', 'plot', ':', '(', 'optinal', ')', 'True', 'if', 'results', 'are', 'to', 'be', 'plotted', 'RETURNS', ':', '-', 'seg_limits', ':', 'list', 'of', 'segment', 'limits', 'in', 'seconds', '(', 'e', '.', 'g', '[[', '0', '.', '1', '0', '.', '9', ']', '[', '1', '.', '4', '3', '.', '0', ']]', 'means', 'that', 'the', 'resulting', 'segments', 'are', '(', '0', '.', '1', '-', '0', '.', '9', ')', 'seconds', 'and', '(', '1', '.', '4', '3', '.', '0', ')', 'seconds']
train
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L625-L738
2,292
Tanganelli/CoAPthon3
coapthon/layers/messagelayer.py
MessageLayer.receive_empty
def receive_empty(self, message): """ Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to """ logger.debug("receive_empty - " + str(message)) try: host, port = message.source except AttributeError: return key_mid = str_append_hash(host, port, message.mid) key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid) key_token = str_append_hash(host, port, message.token) key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token) if key_mid in list(self._transactions.keys()): transaction = self._transactions[key_mid] elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] elif key_mid_multicast in list(self._transactions.keys()): transaction = self._transactions[key_mid_multicast] elif key_token_multicast in self._transactions_token: transaction = self._transactions_token[key_token_multicast] else: logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port)) return None if message.type == defines.Types["ACK"]: if not transaction.request.acknowledged: transaction.request.acknowledged = True elif (transaction.response is not None) and (not transaction.response.acknowledged): transaction.response.acknowledged = True elif message.type == defines.Types["RST"]: if not transaction.request.acknowledged: transaction.request.rejected = True elif not transaction.response.acknowledged: transaction.response.rejected = True elif message.type == defines.Types["CON"]: #implicit ACK (might have been lost) logger.debug("Implicit ACK on received CON for waiting transaction") transaction.request.acknowledged = True else: logger.warning("Unhandled message type...") if transaction.retransmit_stop is not None: transaction.retransmit_stop.set() return transaction
python
def receive_empty(self, message): """ Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to """ logger.debug("receive_empty - " + str(message)) try: host, port = message.source except AttributeError: return key_mid = str_append_hash(host, port, message.mid) key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid) key_token = str_append_hash(host, port, message.token) key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token) if key_mid in list(self._transactions.keys()): transaction = self._transactions[key_mid] elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] elif key_mid_multicast in list(self._transactions.keys()): transaction = self._transactions[key_mid_multicast] elif key_token_multicast in self._transactions_token: transaction = self._transactions_token[key_token_multicast] else: logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port)) return None if message.type == defines.Types["ACK"]: if not transaction.request.acknowledged: transaction.request.acknowledged = True elif (transaction.response is not None) and (not transaction.response.acknowledged): transaction.response.acknowledged = True elif message.type == defines.Types["RST"]: if not transaction.request.acknowledged: transaction.request.rejected = True elif not transaction.response.acknowledged: transaction.response.rejected = True elif message.type == defines.Types["CON"]: #implicit ACK (might have been lost) logger.debug("Implicit ACK on received CON for waiting transaction") transaction.request.acknowledged = True else: logger.warning("Unhandled message type...") if transaction.retransmit_stop is not None: transaction.retransmit_stop.set() return transaction
['def', 'receive_empty', '(', 'self', ',', 'message', ')', ':', 'logger', '.', 'debug', '(', '"receive_empty - "', '+', 'str', '(', 'message', ')', ')', 'try', ':', 'host', ',', 'port', '=', 'message', '.', 'source', 'except', 'AttributeError', ':', 'return', 'key_mid', '=', 'str_append_hash', '(', 'host', ',', 'port', ',', 'message', '.', 'mid', ')', 'key_mid_multicast', '=', 'str_append_hash', '(', 'defines', '.', 'ALL_COAP_NODES', ',', 'port', ',', 'message', '.', 'mid', ')', 'key_token', '=', 'str_append_hash', '(', 'host', ',', 'port', ',', 'message', '.', 'token', ')', 'key_token_multicast', '=', 'str_append_hash', '(', 'defines', '.', 'ALL_COAP_NODES', ',', 'port', ',', 'message', '.', 'token', ')', 'if', 'key_mid', 'in', 'list', '(', 'self', '.', '_transactions', '.', 'keys', '(', ')', ')', ':', 'transaction', '=', 'self', '.', '_transactions', '[', 'key_mid', ']', 'elif', 'key_token', 'in', 'self', '.', '_transactions_token', ':', 'transaction', '=', 'self', '.', '_transactions_token', '[', 'key_token', ']', 'elif', 'key_mid_multicast', 'in', 'list', '(', 'self', '.', '_transactions', '.', 'keys', '(', ')', ')', ':', 'transaction', '=', 'self', '.', '_transactions', '[', 'key_mid_multicast', ']', 'elif', 'key_token_multicast', 'in', 'self', '.', '_transactions_token', ':', 'transaction', '=', 'self', '.', '_transactions_token', '[', 'key_token_multicast', ']', 'else', ':', 'logger', '.', 'warning', '(', '"Un-Matched incoming empty message "', '+', 'str', '(', 'host', ')', '+', '":"', '+', 'str', '(', 'port', ')', ')', 'return', 'None', 'if', 'message', '.', 'type', '==', 'defines', '.', 'Types', '[', '"ACK"', ']', ':', 'if', 'not', 'transaction', '.', 'request', '.', 'acknowledged', ':', 'transaction', '.', 'request', '.', 'acknowledged', '=', 'True', 'elif', '(', 'transaction', '.', 'response', 'is', 'not', 'None', ')', 'and', '(', 'not', 'transaction', '.', 'response', '.', 'acknowledged', ')', ':', 'transaction', '.', 'response', '.', 'acknowledged', '=', 'True', 'elif', 'message', '.', 'type', '==', 'defines', '.', 'Types', '[', '"RST"', ']', ':', 'if', 'not', 'transaction', '.', 'request', '.', 'acknowledged', ':', 'transaction', '.', 'request', '.', 'rejected', '=', 'True', 'elif', 'not', 'transaction', '.', 'response', '.', 'acknowledged', ':', 'transaction', '.', 'response', '.', 'rejected', '=', 'True', 'elif', 'message', '.', 'type', '==', 'defines', '.', 'Types', '[', '"CON"', ']', ':', '#implicit ACK (might have been lost)', 'logger', '.', 'debug', '(', '"Implicit ACK on received CON for waiting transaction"', ')', 'transaction', '.', 'request', '.', 'acknowledged', '=', 'True', 'else', ':', 'logger', '.', 'warning', '(', '"Unhandled message type..."', ')', 'if', 'transaction', '.', 'retransmit_stop', 'is', 'not', 'None', ':', 'transaction', '.', 'retransmit_stop', '.', 'set', '(', ')', 'return', 'transaction']
Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to
['Pair', 'ACKs', 'with', 'requests', '.']
train
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/messagelayer.py#L140-L190
2,293
andela-sjames/paystack-python
paystackapi/base.py
PayStackRequests.post
def post(self, endpoint, **kwargs): """Create a resource. Args: endpoint: resource endpoint. """ return self._request(requests.post, endpoint, **kwargs)
python
def post(self, endpoint, **kwargs): """Create a resource. Args: endpoint: resource endpoint. """ return self._request(requests.post, endpoint, **kwargs)
['def', 'post', '(', 'self', ',', 'endpoint', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_request', '(', 'requests', '.', 'post', ',', 'endpoint', ',', '*', '*', 'kwargs', ')']
Create a resource. Args: endpoint: resource endpoint.
['Create', 'a', 'resource', '.']
train
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/base.py#L70-L76
2,294
boppreh/keyboard
keyboard/__init__.py
get_hotkey_name
def get_hotkey_name(names=None): """ Returns a string representation of hotkey from the given key names, or the currently pressed keys if not given. This function: - normalizes names; - removes "left" and "right" prefixes; - replaces the "+" key name with "plus" to avoid ambiguity; - puts modifier keys first, in a standardized order; - sort remaining keys; - finally, joins everything with "+". Example: get_hotkey_name(['+', 'left ctrl', 'shift']) # "ctrl+shift+plus" """ if names is None: _listener.start_if_necessary() with _pressed_events_lock: names = [e.name for e in _pressed_events.values()] else: names = [normalize_name(name) for name in names] clean_names = set(e.replace('left ', '').replace('right ', '').replace('+', 'plus') for e in names) # https://developer.apple.com/macos/human-interface-guidelines/input-and-output/keyboard/ # > List modifier keys in the correct order. If you use more than one modifier key in a # > hotkey, always list them in this order: Control, Option, Shift, Command. modifiers = ['ctrl', 'alt', 'shift', 'windows'] sorting_key = lambda k: (modifiers.index(k) if k in modifiers else 5, str(k)) return '+'.join(sorted(clean_names, key=sorting_key))
python
def get_hotkey_name(names=None): """ Returns a string representation of hotkey from the given key names, or the currently pressed keys if not given. This function: - normalizes names; - removes "left" and "right" prefixes; - replaces the "+" key name with "plus" to avoid ambiguity; - puts modifier keys first, in a standardized order; - sort remaining keys; - finally, joins everything with "+". Example: get_hotkey_name(['+', 'left ctrl', 'shift']) # "ctrl+shift+plus" """ if names is None: _listener.start_if_necessary() with _pressed_events_lock: names = [e.name for e in _pressed_events.values()] else: names = [normalize_name(name) for name in names] clean_names = set(e.replace('left ', '').replace('right ', '').replace('+', 'plus') for e in names) # https://developer.apple.com/macos/human-interface-guidelines/input-and-output/keyboard/ # > List modifier keys in the correct order. If you use more than one modifier key in a # > hotkey, always list them in this order: Control, Option, Shift, Command. modifiers = ['ctrl', 'alt', 'shift', 'windows'] sorting_key = lambda k: (modifiers.index(k) if k in modifiers else 5, str(k)) return '+'.join(sorted(clean_names, key=sorting_key))
['def', 'get_hotkey_name', '(', 'names', '=', 'None', ')', ':', 'if', 'names', 'is', 'None', ':', '_listener', '.', 'start_if_necessary', '(', ')', 'with', '_pressed_events_lock', ':', 'names', '=', '[', 'e', '.', 'name', 'for', 'e', 'in', '_pressed_events', '.', 'values', '(', ')', ']', 'else', ':', 'names', '=', '[', 'normalize_name', '(', 'name', ')', 'for', 'name', 'in', 'names', ']', 'clean_names', '=', 'set', '(', 'e', '.', 'replace', '(', "'left '", ',', "''", ')', '.', 'replace', '(', "'right '", ',', "''", ')', '.', 'replace', '(', "'+'", ',', "'plus'", ')', 'for', 'e', 'in', 'names', ')', '# https://developer.apple.com/macos/human-interface-guidelines/input-and-output/keyboard/', '# > List modifier keys in the correct order. If you use more than one modifier key in a', '# > hotkey, always list them in this order: Control, Option, Shift, Command.', 'modifiers', '=', '[', "'ctrl'", ',', "'alt'", ',', "'shift'", ',', "'windows'", ']', 'sorting_key', '=', 'lambda', 'k', ':', '(', 'modifiers', '.', 'index', '(', 'k', ')', 'if', 'k', 'in', 'modifiers', 'else', '5', ',', 'str', '(', 'k', ')', ')', 'return', "'+'", '.', 'join', '(', 'sorted', '(', 'clean_names', ',', 'key', '=', 'sorting_key', ')', ')']
Returns a string representation of hotkey from the given key names, or the currently pressed keys if not given. This function: - normalizes names; - removes "left" and "right" prefixes; - replaces the "+" key name with "plus" to avoid ambiguity; - puts modifier keys first, in a standardized order; - sort remaining keys; - finally, joins everything with "+". Example: get_hotkey_name(['+', 'left ctrl', 'shift']) # "ctrl+shift+plus"
['Returns', 'a', 'string', 'representation', 'of', 'hotkey', 'from', 'the', 'given', 'key', 'names', 'or', 'the', 'currently', 'pressed', 'keys', 'if', 'not', 'given', '.', 'This', 'function', ':']
train
https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/__init__.py#L886-L915
2,295
trombastic/PyScada
pyscada/utils/scheduler.py
Scheduler.run
def run(self): """ the main loop """ try: master_process = BackgroundProcess.objects.filter(pk=self.process_id).first() if master_process: master_process.last_update = now() master_process.message = 'init child processes' master_process.save() else: self.delete_pid(force_del=True) self.stderr.write("no such process in BackgroundProcesses") sys.exit(0) self.manage_processes() while True: # handle signals sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None # check the DB connection check_db_connection() # update the P BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=now(), message='running..') if sig is None: self.manage_processes() elif sig not in self.SIGNALS: logger.error('%s, unhandled signal %d' % (self.label, sig)) continue elif sig == signal.SIGTERM: logger.debug('%s, termination signal' % self.label) raise StopIteration elif sig == signal.SIGHUP: # todo handle sighup pass elif sig == signal.SIGUSR1: # restart all child processes logger.debug('PID %d, processed SIGUSR1 (%d) signal' % (self.pid, sig)) self.restart() elif sig == signal.SIGUSR2: # write the process status to stdout self.status() pass sleep(5) except StopIteration: self.stop() self.delete_pid() sys.exit(0) except SystemExit: raise except: logger.error('%s(%d), unhandled exception\n%s' % (self.label, getpid(), traceback.format_exc()))
python
def run(self): """ the main loop """ try: master_process = BackgroundProcess.objects.filter(pk=self.process_id).first() if master_process: master_process.last_update = now() master_process.message = 'init child processes' master_process.save() else: self.delete_pid(force_del=True) self.stderr.write("no such process in BackgroundProcesses") sys.exit(0) self.manage_processes() while True: # handle signals sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None # check the DB connection check_db_connection() # update the P BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=now(), message='running..') if sig is None: self.manage_processes() elif sig not in self.SIGNALS: logger.error('%s, unhandled signal %d' % (self.label, sig)) continue elif sig == signal.SIGTERM: logger.debug('%s, termination signal' % self.label) raise StopIteration elif sig == signal.SIGHUP: # todo handle sighup pass elif sig == signal.SIGUSR1: # restart all child processes logger.debug('PID %d, processed SIGUSR1 (%d) signal' % (self.pid, sig)) self.restart() elif sig == signal.SIGUSR2: # write the process status to stdout self.status() pass sleep(5) except StopIteration: self.stop() self.delete_pid() sys.exit(0) except SystemExit: raise except: logger.error('%s(%d), unhandled exception\n%s' % (self.label, getpid(), traceback.format_exc()))
['def', 'run', '(', 'self', ')', ':', 'try', ':', 'master_process', '=', 'BackgroundProcess', '.', 'objects', '.', 'filter', '(', 'pk', '=', 'self', '.', 'process_id', ')', '.', 'first', '(', ')', 'if', 'master_process', ':', 'master_process', '.', 'last_update', '=', 'now', '(', ')', 'master_process', '.', 'message', '=', "'init child processes'", 'master_process', '.', 'save', '(', ')', 'else', ':', 'self', '.', 'delete_pid', '(', 'force_del', '=', 'True', ')', 'self', '.', 'stderr', '.', 'write', '(', '"no such process in BackgroundProcesses"', ')', 'sys', '.', 'exit', '(', '0', ')', 'self', '.', 'manage_processes', '(', ')', 'while', 'True', ':', '# handle signals', 'sig', '=', 'self', '.', 'SIG_QUEUE', '.', 'pop', '(', '0', ')', 'if', 'len', '(', 'self', '.', 'SIG_QUEUE', ')', 'else', 'None', '# check the DB connection', 'check_db_connection', '(', ')', '# update the P', 'BackgroundProcess', '.', 'objects', '.', 'filter', '(', 'pk', '=', 'self', '.', 'process_id', ')', '.', 'update', '(', 'last_update', '=', 'now', '(', ')', ',', 'message', '=', "'running..'", ')', 'if', 'sig', 'is', 'None', ':', 'self', '.', 'manage_processes', '(', ')', 'elif', 'sig', 'not', 'in', 'self', '.', 'SIGNALS', ':', 'logger', '.', 'error', '(', "'%s, unhandled signal %d'", '%', '(', 'self', '.', 'label', ',', 'sig', ')', ')', 'continue', 'elif', 'sig', '==', 'signal', '.', 'SIGTERM', ':', 'logger', '.', 'debug', '(', "'%s, termination signal'", '%', 'self', '.', 'label', ')', 'raise', 'StopIteration', 'elif', 'sig', '==', 'signal', '.', 'SIGHUP', ':', '# todo handle sighup', 'pass', 'elif', 'sig', '==', 'signal', '.', 'SIGUSR1', ':', '# restart all child processes', 'logger', '.', 'debug', '(', "'PID %d, processed SIGUSR1 (%d) signal'", '%', '(', 'self', '.', 'pid', ',', 'sig', ')', ')', 'self', '.', 'restart', '(', ')', 'elif', 'sig', '==', 'signal', '.', 'SIGUSR2', ':', '# write the process status to stdout', 'self', '.', 'status', '(', ')', 'pass', 'sleep', '(', '5', ')', 'except', 'StopIteration', ':', 'self', '.', 'stop', '(', ')', 'self', '.', 'delete_pid', '(', ')', 'sys', '.', 'exit', '(', '0', ')', 'except', 'SystemExit', ':', 'raise', 'except', ':', 'logger', '.', 'error', '(', "'%s(%d), unhandled exception\\n%s'", '%', '(', 'self', '.', 'label', ',', 'getpid', '(', ')', ',', 'traceback', '.', 'format_exc', '(', ')', ')', ')']
the main loop
['the', 'main', 'loop']
train
https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/utils/scheduler.py#L293-L347
2,296
ThreatConnect-Inc/tcex
tcex/tcex_ti_batch.py
TcExBatch.write_batch_json
def write_batch_json(self, content): """Write batch json data to a file.""" timestamp = str(time.time()).replace('.', '') batch_json_file = os.path.join( self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp) ) with open(batch_json_file, 'w') as fh: json.dump(content, fh, indent=2)
python
def write_batch_json(self, content): """Write batch json data to a file.""" timestamp = str(time.time()).replace('.', '') batch_json_file = os.path.join( self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp) ) with open(batch_json_file, 'w') as fh: json.dump(content, fh, indent=2)
['def', 'write_batch_json', '(', 'self', ',', 'content', ')', ':', 'timestamp', '=', 'str', '(', 'time', '.', 'time', '(', ')', ')', '.', 'replace', '(', "'.'", ',', "''", ')', 'batch_json_file', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'tcex', '.', 'args', '.', 'tc_temp_path', ',', "'batch-{}.json'", '.', 'format', '(', 'timestamp', ')', ')', 'with', 'open', '(', 'batch_json_file', ',', "'w'", ')', 'as', 'fh', ':', 'json', '.', 'dump', '(', 'content', ',', 'fh', ',', 'indent', '=', '2', ')']
Write batch json data to a file.
['Write', 'batch', 'json', 'data', 'to', 'a', 'file', '.']
train
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L1623-L1630
2,297
ktbyers/netmiko
netmiko/cisco/cisco_nxos_ssh.py
CiscoNxosSSH.session_preparation
def session_preparation(self): """Prepare the session after the connection has been established.""" self._test_channel_read(pattern=r"[>#]") self.ansi_escape_codes = True self.set_base_prompt() self.disable_paging() # Clear the read buffer time.sleep(0.3 * self.global_delay_factor) self.clear_buffer()
python
def session_preparation(self): """Prepare the session after the connection has been established.""" self._test_channel_read(pattern=r"[>#]") self.ansi_escape_codes = True self.set_base_prompt() self.disable_paging() # Clear the read buffer time.sleep(0.3 * self.global_delay_factor) self.clear_buffer()
['def', 'session_preparation', '(', 'self', ')', ':', 'self', '.', '_test_channel_read', '(', 'pattern', '=', 'r"[>#]"', ')', 'self', '.', 'ansi_escape_codes', '=', 'True', 'self', '.', 'set_base_prompt', '(', ')', 'self', '.', 'disable_paging', '(', ')', '# Clear the read buffer', 'time', '.', 'sleep', '(', '0.3', '*', 'self', '.', 'global_delay_factor', ')', 'self', '.', 'clear_buffer', '(', ')']
Prepare the session after the connection has been established.
['Prepare', 'the', 'session', 'after', 'the', 'connection', 'has', 'been', 'established', '.']
train
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/cisco/cisco_nxos_ssh.py#L11-L19
2,298
openstack/stacktach-winchester
winchester/config.py
ConfigManager._load_yaml_config
def _load_yaml_config(cls, config_data, filename="(unknown)"): """Load a yaml config file.""" try: config = yaml.safe_load(config_data) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = ("Invalid YAML syntax in Configuration file " "%(file)s at line: %(line)s, column: %(column)s." % dict(file=filename, line=mark.line + 1, column=mark.column + 1)) else: errmsg = ("YAML error reading Configuration file " "%(file)s" % dict(file=filename)) logger.error(errmsg) raise logger.info("Configuration: %s", config) return config
python
def _load_yaml_config(cls, config_data, filename="(unknown)"): """Load a yaml config file.""" try: config = yaml.safe_load(config_data) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = ("Invalid YAML syntax in Configuration file " "%(file)s at line: %(line)s, column: %(column)s." % dict(file=filename, line=mark.line + 1, column=mark.column + 1)) else: errmsg = ("YAML error reading Configuration file " "%(file)s" % dict(file=filename)) logger.error(errmsg) raise logger.info("Configuration: %s", config) return config
['def', '_load_yaml_config', '(', 'cls', ',', 'config_data', ',', 'filename', '=', '"(unknown)"', ')', ':', 'try', ':', 'config', '=', 'yaml', '.', 'safe_load', '(', 'config_data', ')', 'except', 'yaml', '.', 'YAMLError', 'as', 'err', ':', 'if', 'hasattr', '(', 'err', ',', "'problem_mark'", ')', ':', 'mark', '=', 'err', '.', 'problem_mark', 'errmsg', '=', '(', '"Invalid YAML syntax in Configuration file "', '"%(file)s at line: %(line)s, column: %(column)s."', '%', 'dict', '(', 'file', '=', 'filename', ',', 'line', '=', 'mark', '.', 'line', '+', '1', ',', 'column', '=', 'mark', '.', 'column', '+', '1', ')', ')', 'else', ':', 'errmsg', '=', '(', '"YAML error reading Configuration file "', '"%(file)s"', '%', 'dict', '(', 'file', '=', 'filename', ')', ')', 'logger', '.', 'error', '(', 'errmsg', ')', 'raise', 'logger', '.', 'info', '(', '"Configuration: %s"', ',', 'config', ')', 'return', 'config']
Load a yaml config file.
['Load', 'a', 'yaml', 'config', 'file', '.']
train
https://github.com/openstack/stacktach-winchester/blob/54f3ffc4a8fd84b6fb29ad9b65adb018e8927956/winchester/config.py#L129-L150
2,299
horazont/aioxmpp
aioxmpp/callbacks.py
Filter.filter
def filter(self, obj, *args, **kwargs): """ Filter the given object through the filter chain. :param obj: The object to filter :param args: Additional arguments to pass to each filter function. :param kwargs: Additional keyword arguments to pass to each filter function. :return: The filtered object or :data:`None` See the documentation of :class:`Filter` on how filtering operates. Returns the object returned by the last function in the filter chain or :data:`None` if any function returned :data:`None`. """ for _, _, func in self._filter_order: obj = func(obj, *args, **kwargs) if obj is None: return None return obj
python
def filter(self, obj, *args, **kwargs): """ Filter the given object through the filter chain. :param obj: The object to filter :param args: Additional arguments to pass to each filter function. :param kwargs: Additional keyword arguments to pass to each filter function. :return: The filtered object or :data:`None` See the documentation of :class:`Filter` on how filtering operates. Returns the object returned by the last function in the filter chain or :data:`None` if any function returned :data:`None`. """ for _, _, func in self._filter_order: obj = func(obj, *args, **kwargs) if obj is None: return None return obj
['def', 'filter', '(', 'self', ',', 'obj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'for', '_', ',', '_', ',', 'func', 'in', 'self', '.', '_filter_order', ':', 'obj', '=', 'func', '(', 'obj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'obj', 'is', 'None', ':', 'return', 'None', 'return', 'obj']
Filter the given object through the filter chain. :param obj: The object to filter :param args: Additional arguments to pass to each filter function. :param kwargs: Additional keyword arguments to pass to each filter function. :return: The filtered object or :data:`None` See the documentation of :class:`Filter` on how filtering operates. Returns the object returned by the last function in the filter chain or :data:`None` if any function returned :data:`None`.
['Filter', 'the', 'given', 'object', 'through', 'the', 'filter', 'chain', '.']
train
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/callbacks.py#L790-L809