Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
5,600
jeffknupp/sandman2
sandman2/scripts/sandman2ctl.py
main
def main(): """Main entry point for script.""" parser = argparse.ArgumentParser( description='Auto-generate a RESTful API service ' 'from an existing database.' ) parser.add_argument( 'URI', help='Database URI in the format ' 'postgresql+psycopg2://user:password@host/database') parser.add_argument( '-d', '--debug', help='Turn on debug logging', action='store_true', default=False) parser.add_argument( '-p', '--port', help='Port for service to listen on', default=5000) parser.add_argument( '-l', '--local-only', help='Only provide service on localhost (will not be accessible' ' from other machines)', action='store_true', default=False) parser.add_argument( '-r', '--read-only', help='Make all database resources read-only (i.e. only the HTTP GET method is supported)', action='store_true', default=False) parser.add_argument( '-s', '--schema', help='Use this named schema instead of default', default=None) args = parser.parse_args() app = get_app(args.URI, read_only=args.read_only, schema=args.schema) if args.debug: app.config['DEBUG'] = True if args.local_only: host = '127.0.0.1' else: host = '0.0.0.0' app.config['SECRET_KEY'] = '42' app.run(host=host, port=int(args.port))
python
def main(): """Main entry point for script.""" parser = argparse.ArgumentParser( description='Auto-generate a RESTful API service ' 'from an existing database.' ) parser.add_argument( 'URI', help='Database URI in the format ' 'postgresql+psycopg2://user:password@host/database') parser.add_argument( '-d', '--debug', help='Turn on debug logging', action='store_true', default=False) parser.add_argument( '-p', '--port', help='Port for service to listen on', default=5000) parser.add_argument( '-l', '--local-only', help='Only provide service on localhost (will not be accessible' ' from other machines)', action='store_true', default=False) parser.add_argument( '-r', '--read-only', help='Make all database resources read-only (i.e. only the HTTP GET method is supported)', action='store_true', default=False) parser.add_argument( '-s', '--schema', help='Use this named schema instead of default', default=None) args = parser.parse_args() app = get_app(args.URI, read_only=args.read_only, schema=args.schema) if args.debug: app.config['DEBUG'] = True if args.local_only: host = '127.0.0.1' else: host = '0.0.0.0' app.config['SECRET_KEY'] = '42' app.run(host=host, port=int(args.port))
['def', 'main', '(', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Auto-generate a RESTful API service '", "'from an existing database.'", ')', 'parser', '.', 'add_argument', '(', "'URI'", ',', 'help', '=', "'Database URI in the format '", "'postgresql+psycopg2://user:password@host/database'", ')', 'parser', '.', 'add_argument', '(', "'-d'", ',', "'--debug'", ',', 'help', '=', "'Turn on debug logging'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ')', 'parser', '.', 'add_argument', '(', "'-p'", ',', "'--port'", ',', 'help', '=', "'Port for service to listen on'", ',', 'default', '=', '5000', ')', 'parser', '.', 'add_argument', '(', "'-l'", ',', "'--local-only'", ',', 'help', '=', "'Only provide service on localhost (will not be accessible'", "' from other machines)'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ')', 'parser', '.', 'add_argument', '(', "'-r'", ',', "'--read-only'", ',', 'help', '=', "'Make all database resources read-only (i.e. only the HTTP GET method is supported)'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ')', 'parser', '.', 'add_argument', '(', "'-s'", ',', "'--schema'", ',', 'help', '=', "'Use this named schema instead of default'", ',', 'default', '=', 'None', ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', 'app', '=', 'get_app', '(', 'args', '.', 'URI', ',', 'read_only', '=', 'args', '.', 'read_only', ',', 'schema', '=', 'args', '.', 'schema', ')', 'if', 'args', '.', 'debug', ':', 'app', '.', 'config', '[', "'DEBUG'", ']', '=', 'True', 'if', 'args', '.', 'local_only', ':', 'host', '=', "'127.0.0.1'", 'else', ':', 'host', '=', "'0.0.0.0'", 'app', '.', 'config', '[', "'SECRET_KEY'", ']', '=', "'42'", 'app', '.', 'run', '(', 'host', '=', 'host', ',', 'port', '=', 'int', '(', 'args', '.', 'port', ')', ')']
Main entry point for script.
['Main', 'entry', 'point', 'for', 'script', '.']
train
https://github.com/jeffknupp/sandman2/blob/1ce21d6f7a6df77fa96fab694b0f9bb8469c166b/sandman2/scripts/sandman2ctl.py#L9-L59
5,601
BlockHub/slackbot
slackbot/__init__.py
Bot.__parse_direct_mention
def __parse_direct_mention(self, message_text): """ Finds a direct mention (a mention that is at the beginning) in message text and returns the user ID which was mentioned. If there is no direct mention, returns None """ matches = re.search(MENTION_REGEX, message_text) # the first group contains the username, the second group contains the remaining message return (matches.group(1), listify(matches.group(2).strip())) if matches else (None, None)
python
def __parse_direct_mention(self, message_text): """ Finds a direct mention (a mention that is at the beginning) in message text and returns the user ID which was mentioned. If there is no direct mention, returns None """ matches = re.search(MENTION_REGEX, message_text) # the first group contains the username, the second group contains the remaining message return (matches.group(1), listify(matches.group(2).strip())) if matches else (None, None)
['def', '__parse_direct_mention', '(', 'self', ',', 'message_text', ')', ':', 'matches', '=', 're', '.', 'search', '(', 'MENTION_REGEX', ',', 'message_text', ')', '# the first group contains the username, the second group contains the remaining message', 'return', '(', 'matches', '.', 'group', '(', '1', ')', ',', 'listify', '(', 'matches', '.', 'group', '(', '2', ')', '.', 'strip', '(', ')', ')', ')', 'if', 'matches', 'else', '(', 'None', ',', 'None', ')']
Finds a direct mention (a mention that is at the beginning) in message text and returns the user ID which was mentioned. If there is no direct mention, returns None
['Finds', 'a', 'direct', 'mention', '(', 'a', 'mention', 'that', 'is', 'at', 'the', 'beginning', ')', 'in', 'message', 'text', 'and', 'returns', 'the', 'user', 'ID', 'which', 'was', 'mentioned', '.', 'If', 'there', 'is', 'no', 'direct', 'mention', 'returns', 'None']
train
https://github.com/BlockHub/slackbot/blob/c37201516841cb322b4943f26e432ae717fe36a3/slackbot/__init__.py#L28-L35
5,602
Duke-GCB/DukeDSClient
ddsc/core/ddsapi.py
DataServiceApi.create_activity
def create_activity(self, activity_name, desc=None, started_on=None, ended_on=None): """ Send POST to /activities creating a new activity with the specified name and desc. Raises DataServiceError on error. :param activity_name: str name of the activity :param desc: str description of the activity (optional) :param started_on: str datetime when the activity started (optional) :param ended_on: str datetime when the activity ended (optional) :return: requests.Response containing the successful result """ data = { "name": activity_name, "description": desc, "started_on": started_on, "ended_on": ended_on } return self._post("/activities", data)
python
def create_activity(self, activity_name, desc=None, started_on=None, ended_on=None): """ Send POST to /activities creating a new activity with the specified name and desc. Raises DataServiceError on error. :param activity_name: str name of the activity :param desc: str description of the activity (optional) :param started_on: str datetime when the activity started (optional) :param ended_on: str datetime when the activity ended (optional) :return: requests.Response containing the successful result """ data = { "name": activity_name, "description": desc, "started_on": started_on, "ended_on": ended_on } return self._post("/activities", data)
['def', 'create_activity', '(', 'self', ',', 'activity_name', ',', 'desc', '=', 'None', ',', 'started_on', '=', 'None', ',', 'ended_on', '=', 'None', ')', ':', 'data', '=', '{', '"name"', ':', 'activity_name', ',', '"description"', ':', 'desc', ',', '"started_on"', ':', 'started_on', ',', '"ended_on"', ':', 'ended_on', '}', 'return', 'self', '.', '_post', '(', '"/activities"', ',', 'data', ')']
Send POST to /activities creating a new activity with the specified name and desc. Raises DataServiceError on error. :param activity_name: str name of the activity :param desc: str description of the activity (optional) :param started_on: str datetime when the activity started (optional) :param ended_on: str datetime when the activity ended (optional) :return: requests.Response containing the successful result
['Send', 'POST', 'to', '/', 'activities', 'creating', 'a', 'new', 'activity', 'with', 'the', 'specified', 'name', 'and', 'desc', '.', 'Raises', 'DataServiceError', 'on', 'error', '.', ':', 'param', 'activity_name', ':', 'str', 'name', 'of', 'the', 'activity', ':', 'param', 'desc', ':', 'str', 'description', 'of', 'the', 'activity', '(', 'optional', ')', ':', 'param', 'started_on', ':', 'str', 'datetime', 'when', 'the', 'activity', 'started', '(', 'optional', ')', ':', 'param', 'ended_on', ':', 'str', 'datetime', 'when', 'the', 'activity', 'ended', '(', 'optional', ')', ':', 'return', ':', 'requests', '.', 'Response', 'containing', 'the', 'successful', 'result']
train
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/ddsapi.py#L815-L831
5,603
LogicalDash/LiSE
ELiDE/ELiDE/screen.py
MainScreen.next_turn
def next_turn(self, *args): """Advance time by one turn, if it's not blocked. Block time by setting ``engine.universal['block'] = True``""" if self.tmp_block: return eng = self.app.engine dial = self.dialoglayout if eng.universal.get('block'): Logger.info("MainScreen: next_turn blocked, delete universal['block'] to unblock") return if dial.idx < len(dial.todo): Logger.info("MainScreen: not advancing time while there's a dialog") return self.tmp_block = True self.app.unbind( branch=self.app._push_time, turn=self.app._push_time, tick=self.app._push_time ) eng.next_turn(cb=self._update_from_next_turn)
python
def next_turn(self, *args): """Advance time by one turn, if it's not blocked. Block time by setting ``engine.universal['block'] = True``""" if self.tmp_block: return eng = self.app.engine dial = self.dialoglayout if eng.universal.get('block'): Logger.info("MainScreen: next_turn blocked, delete universal['block'] to unblock") return if dial.idx < len(dial.todo): Logger.info("MainScreen: not advancing time while there's a dialog") return self.tmp_block = True self.app.unbind( branch=self.app._push_time, turn=self.app._push_time, tick=self.app._push_time ) eng.next_turn(cb=self._update_from_next_turn)
['def', 'next_turn', '(', 'self', ',', '*', 'args', ')', ':', 'if', 'self', '.', 'tmp_block', ':', 'return', 'eng', '=', 'self', '.', 'app', '.', 'engine', 'dial', '=', 'self', '.', 'dialoglayout', 'if', 'eng', '.', 'universal', '.', 'get', '(', "'block'", ')', ':', 'Logger', '.', 'info', '(', '"MainScreen: next_turn blocked, delete universal[\'block\'] to unblock"', ')', 'return', 'if', 'dial', '.', 'idx', '<', 'len', '(', 'dial', '.', 'todo', ')', ':', 'Logger', '.', 'info', '(', '"MainScreen: not advancing time while there\'s a dialog"', ')', 'return', 'self', '.', 'tmp_block', '=', 'True', 'self', '.', 'app', '.', 'unbind', '(', 'branch', '=', 'self', '.', 'app', '.', '_push_time', ',', 'turn', '=', 'self', '.', 'app', '.', '_push_time', ',', 'tick', '=', 'self', '.', 'app', '.', '_push_time', ')', 'eng', '.', 'next_turn', '(', 'cb', '=', 'self', '.', '_update_from_next_turn', ')']
Advance time by one turn, if it's not blocked. Block time by setting ``engine.universal['block'] = True``
['Advance', 'time', 'by', 'one', 'turn', 'if', 'it', 's', 'not', 'blocked', '.']
train
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/screen.py#L330-L350
5,604
StanfordVL/robosuite
robosuite/controllers/baxter_ik_controller.py
BaxterIKController.get_control
def get_control(self, right=None, left=None): """ Returns joint velocities to control the robot after the target end effector positions and orientations are updated from arguments @left and @right. If no arguments are provided, joint velocities will be computed based on the previously recorded target. Args: left (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z left end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the left end effector. right (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z right end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the right end effector. Returns: velocities (numpy array): a flat array of joint velocity commands to apply to try and achieve the desired input control. """ # Sync joint positions for IK. self.sync_ik_robot(self.robot_jpos_getter()) # Compute new target joint positions if arguments are provided if (right is not None) and (left is not None): self.commanded_joint_positions = self.joint_positions_for_eef_command( right, left ) # P controller from joint positions (from IK) to velocities velocities = np.zeros(14) deltas = self._get_current_error( self.robot_jpos_getter(), self.commanded_joint_positions ) for i, delta in enumerate(deltas): velocities[i] = -2 * delta velocities = self.clip_joint_velocities(velocities) self.commanded_joint_velocities = velocities return velocities
python
def get_control(self, right=None, left=None): """ Returns joint velocities to control the robot after the target end effector positions and orientations are updated from arguments @left and @right. If no arguments are provided, joint velocities will be computed based on the previously recorded target. Args: left (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z left end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the left end effector. right (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z right end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the right end effector. Returns: velocities (numpy array): a flat array of joint velocity commands to apply to try and achieve the desired input control. """ # Sync joint positions for IK. self.sync_ik_robot(self.robot_jpos_getter()) # Compute new target joint positions if arguments are provided if (right is not None) and (left is not None): self.commanded_joint_positions = self.joint_positions_for_eef_command( right, left ) # P controller from joint positions (from IK) to velocities velocities = np.zeros(14) deltas = self._get_current_error( self.robot_jpos_getter(), self.commanded_joint_positions ) for i, delta in enumerate(deltas): velocities[i] = -2 * delta velocities = self.clip_joint_velocities(velocities) self.commanded_joint_velocities = velocities return velocities
['def', 'get_control', '(', 'self', ',', 'right', '=', 'None', ',', 'left', '=', 'None', ')', ':', '# Sync joint positions for IK.', 'self', '.', 'sync_ik_robot', '(', 'self', '.', 'robot_jpos_getter', '(', ')', ')', '# Compute new target joint positions if arguments are provided', 'if', '(', 'right', 'is', 'not', 'None', ')', 'and', '(', 'left', 'is', 'not', 'None', ')', ':', 'self', '.', 'commanded_joint_positions', '=', 'self', '.', 'joint_positions_for_eef_command', '(', 'right', ',', 'left', ')', '# P controller from joint positions (from IK) to velocities', 'velocities', '=', 'np', '.', 'zeros', '(', '14', ')', 'deltas', '=', 'self', '.', '_get_current_error', '(', 'self', '.', 'robot_jpos_getter', '(', ')', ',', 'self', '.', 'commanded_joint_positions', ')', 'for', 'i', ',', 'delta', 'in', 'enumerate', '(', 'deltas', ')', ':', 'velocities', '[', 'i', ']', '=', '-', '2', '*', 'delta', 'velocities', '=', 'self', '.', 'clip_joint_velocities', '(', 'velocities', ')', 'self', '.', 'commanded_joint_velocities', '=', 'velocities', 'return', 'velocities']
Returns joint velocities to control the robot after the target end effector positions and orientations are updated from arguments @left and @right. If no arguments are provided, joint velocities will be computed based on the previously recorded target. Args: left (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z left end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the left end effector. right (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z right end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the right end effector. Returns: velocities (numpy array): a flat array of joint velocity commands to apply to try and achieve the desired input control.
['Returns', 'joint', 'velocities', 'to', 'control', 'the', 'robot', 'after', 'the', 'target', 'end', 'effector', 'positions', 'and', 'orientations', 'are', 'updated', 'from', 'arguments', '@left', 'and', '@right', '.', 'If', 'no', 'arguments', 'are', 'provided', 'joint', 'velocities', 'will', 'be', 'computed', 'based', 'on', 'the', 'previously', 'recorded', 'target', '.']
train
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/controllers/baxter_ik_controller.py#L46-L96
5,605
wright-group/WrightTools
WrightTools/kit/_list.py
intersperse
def intersperse(lis, value): """Put value between each existing item in list. Parameters ---------- lis : list List to intersperse. value : object Value to insert. Returns ------- list interspersed list """ out = [value] * (len(lis) * 2 - 1) out[0::2] = lis return out
python
def intersperse(lis, value): """Put value between each existing item in list. Parameters ---------- lis : list List to intersperse. value : object Value to insert. Returns ------- list interspersed list """ out = [value] * (len(lis) * 2 - 1) out[0::2] = lis return out
['def', 'intersperse', '(', 'lis', ',', 'value', ')', ':', 'out', '=', '[', 'value', ']', '*', '(', 'len', '(', 'lis', ')', '*', '2', '-', '1', ')', 'out', '[', '0', ':', ':', '2', ']', '=', 'lis', 'return', 'out']
Put value between each existing item in list. Parameters ---------- lis : list List to intersperse. value : object Value to insert. Returns ------- list interspersed list
['Put', 'value', 'between', 'each', 'existing', 'item', 'in', 'list', '.']
train
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/kit/_list.py#L55-L72
5,606
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/wa_kat_main.py
AnalysisRunnerAdapter.start
def start(cls, ev=None): """ Start the analysis. """ ViewController.log_view.add("Beginning AnalysisRunner request..") # reset all inputs ViewController.reset_bars() # read the urlbox url = ViewController.url.strip() # make sure, that `url` was filled if not url: ViewController.urlbox_error.show("URL musí být vyplněna.") return if is_issn(url): ViewController.url_progressbar.hide() ViewController.url = "" ViewController.issn = url AlephISSNReaderAdapter.start() return ViewController.urlbox_error.hide() # normalize the `url` if not (url.startswith("http://") or url.startswith("https://")): url = "http://" + url ViewController.url = url # store normalized url back to input make_request( url=join(settings.API_PATH, "analyze"), data={'url': url}, on_complete=cls.on_complete )
python
def start(cls, ev=None): """ Start the analysis. """ ViewController.log_view.add("Beginning AnalysisRunner request..") # reset all inputs ViewController.reset_bars() # read the urlbox url = ViewController.url.strip() # make sure, that `url` was filled if not url: ViewController.urlbox_error.show("URL musí být vyplněna.") return if is_issn(url): ViewController.url_progressbar.hide() ViewController.url = "" ViewController.issn = url AlephISSNReaderAdapter.start() return ViewController.urlbox_error.hide() # normalize the `url` if not (url.startswith("http://") or url.startswith("https://")): url = "http://" + url ViewController.url = url # store normalized url back to input make_request( url=join(settings.API_PATH, "analyze"), data={'url': url}, on_complete=cls.on_complete )
['def', 'start', '(', 'cls', ',', 'ev', '=', 'None', ')', ':', 'ViewController', '.', 'log_view', '.', 'add', '(', '"Beginning AnalysisRunner request.."', ')', '# reset all inputs', 'ViewController', '.', 'reset_bars', '(', ')', '# read the urlbox', 'url', '=', 'ViewController', '.', 'url', '.', 'strip', '(', ')', '# make sure, that `url` was filled', 'if', 'not', 'url', ':', 'ViewController', '.', 'urlbox_error', '.', 'show', '(', '"URL musí být vyplněna.")', '', 'return', 'if', 'is_issn', '(', 'url', ')', ':', 'ViewController', '.', 'url_progressbar', '.', 'hide', '(', ')', 'ViewController', '.', 'url', '=', '""', 'ViewController', '.', 'issn', '=', 'url', 'AlephISSNReaderAdapter', '.', 'start', '(', ')', 'return', 'ViewController', '.', 'urlbox_error', '.', 'hide', '(', ')', '# normalize the `url`', 'if', 'not', '(', 'url', '.', 'startswith', '(', '"http://"', ')', 'or', 'url', '.', 'startswith', '(', '"https://"', ')', ')', ':', 'url', '=', '"http://"', '+', 'url', 'ViewController', '.', 'url', '=', 'url', '# store normalized url back to input', 'make_request', '(', 'url', '=', 'join', '(', 'settings', '.', 'API_PATH', ',', '"analyze"', ')', ',', 'data', '=', '{', "'url'", ':', 'url', '}', ',', 'on_complete', '=', 'cls', '.', 'on_complete', ')']
Start the analysis.
['Start', 'the', 'analysis', '.']
train
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/wa_kat_main.py#L118-L153
5,607
google-research/batch-ppo
agents/algorithms/ppo/ppo.py
PPO._policy_loss
def _policy_loss( self, old_policy, policy, action, advantage, length): """Compute the policy loss composed of multiple components. 1. The policy gradient loss is importance sampled from the data-collecting policy at the beginning of training. 2. The second term is a KL penalty between the policy at the beginning of training and the current policy. 3. Additionally, if this KL already changed more than twice the target amount, we activate a strong penalty discouraging further divergence. Args: old_policy: Action distribution of the behavioral policy. policy: Sequences of distribution params of the current policy. action: Sequences of actions. advantage: Sequences of advantages. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor. """ with tf.name_scope('policy_loss'): kl = tf.contrib.distributions.kl_divergence(old_policy, policy) # Infinite values in the KL, even for padding frames that we mask out, # cause NaN gradients since TensorFlow computes gradients with respect to # the whole input tensor. kl = tf.check_numerics(kl, 'kl') kl = tf.reduce_mean(self._mask(kl, length), 1) policy_gradient = tf.exp( policy.log_prob(action) - old_policy.log_prob(action)) surrogate_loss = -tf.reduce_mean(self._mask( policy_gradient * tf.stop_gradient(advantage), length), 1) surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss') kl_penalty = self._penalty * kl cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor cutoff_count = tf.reduce_sum( tf.cast(kl > cutoff_threshold, tf.int32)) with tf.control_dependencies([tf.cond( cutoff_count > 0, lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]): kl_cutoff = ( self._config.kl_cutoff_coef * tf.cast(kl > cutoff_threshold, tf.float32) * (kl - cutoff_threshold) ** 2) policy_loss = surrogate_loss + kl_penalty + kl_cutoff entropy = tf.reduce_mean(policy.entropy(), axis=1) if self._config.entropy_regularization: policy_loss -= self._config.entropy_regularization * entropy summary = tf.summary.merge([ tf.summary.histogram('entropy', entropy), tf.summary.histogram('kl', kl), tf.summary.histogram('surrogate_loss', surrogate_loss), tf.summary.histogram('kl_penalty', kl_penalty), tf.summary.histogram('kl_cutoff', kl_cutoff), tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff), tf.summary.histogram('policy_loss', policy_loss), tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)), tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)), tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))]) policy_loss = tf.reduce_mean(policy_loss, 0) return tf.check_numerics(policy_loss, 'policy_loss'), summary
python
def _policy_loss( self, old_policy, policy, action, advantage, length): """Compute the policy loss composed of multiple components. 1. The policy gradient loss is importance sampled from the data-collecting policy at the beginning of training. 2. The second term is a KL penalty between the policy at the beginning of training and the current policy. 3. Additionally, if this KL already changed more than twice the target amount, we activate a strong penalty discouraging further divergence. Args: old_policy: Action distribution of the behavioral policy. policy: Sequences of distribution params of the current policy. action: Sequences of actions. advantage: Sequences of advantages. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor. """ with tf.name_scope('policy_loss'): kl = tf.contrib.distributions.kl_divergence(old_policy, policy) # Infinite values in the KL, even for padding frames that we mask out, # cause NaN gradients since TensorFlow computes gradients with respect to # the whole input tensor. kl = tf.check_numerics(kl, 'kl') kl = tf.reduce_mean(self._mask(kl, length), 1) policy_gradient = tf.exp( policy.log_prob(action) - old_policy.log_prob(action)) surrogate_loss = -tf.reduce_mean(self._mask( policy_gradient * tf.stop_gradient(advantage), length), 1) surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss') kl_penalty = self._penalty * kl cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor cutoff_count = tf.reduce_sum( tf.cast(kl > cutoff_threshold, tf.int32)) with tf.control_dependencies([tf.cond( cutoff_count > 0, lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]): kl_cutoff = ( self._config.kl_cutoff_coef * tf.cast(kl > cutoff_threshold, tf.float32) * (kl - cutoff_threshold) ** 2) policy_loss = surrogate_loss + kl_penalty + kl_cutoff entropy = tf.reduce_mean(policy.entropy(), axis=1) if self._config.entropy_regularization: policy_loss -= self._config.entropy_regularization * entropy summary = tf.summary.merge([ tf.summary.histogram('entropy', entropy), tf.summary.histogram('kl', kl), tf.summary.histogram('surrogate_loss', surrogate_loss), tf.summary.histogram('kl_penalty', kl_penalty), tf.summary.histogram('kl_cutoff', kl_cutoff), tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff), tf.summary.histogram('policy_loss', policy_loss), tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)), tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)), tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))]) policy_loss = tf.reduce_mean(policy_loss, 0) return tf.check_numerics(policy_loss, 'policy_loss'), summary
['def', '_policy_loss', '(', 'self', ',', 'old_policy', ',', 'policy', ',', 'action', ',', 'advantage', ',', 'length', ')', ':', 'with', 'tf', '.', 'name_scope', '(', "'policy_loss'", ')', ':', 'kl', '=', 'tf', '.', 'contrib', '.', 'distributions', '.', 'kl_divergence', '(', 'old_policy', ',', 'policy', ')', '# Infinite values in the KL, even for padding frames that we mask out,', '# cause NaN gradients since TensorFlow computes gradients with respect to', '# the whole input tensor.', 'kl', '=', 'tf', '.', 'check_numerics', '(', 'kl', ',', "'kl'", ')', 'kl', '=', 'tf', '.', 'reduce_mean', '(', 'self', '.', '_mask', '(', 'kl', ',', 'length', ')', ',', '1', ')', 'policy_gradient', '=', 'tf', '.', 'exp', '(', 'policy', '.', 'log_prob', '(', 'action', ')', '-', 'old_policy', '.', 'log_prob', '(', 'action', ')', ')', 'surrogate_loss', '=', '-', 'tf', '.', 'reduce_mean', '(', 'self', '.', '_mask', '(', 'policy_gradient', '*', 'tf', '.', 'stop_gradient', '(', 'advantage', ')', ',', 'length', ')', ',', '1', ')', 'surrogate_loss', '=', 'tf', '.', 'check_numerics', '(', 'surrogate_loss', ',', "'surrogate_loss'", ')', 'kl_penalty', '=', 'self', '.', '_penalty', '*', 'kl', 'cutoff_threshold', '=', 'self', '.', '_config', '.', 'kl_target', '*', 'self', '.', '_config', '.', 'kl_cutoff_factor', 'cutoff_count', '=', 'tf', '.', 'reduce_sum', '(', 'tf', '.', 'cast', '(', 'kl', '>', 'cutoff_threshold', ',', 'tf', '.', 'int32', ')', ')', 'with', 'tf', '.', 'control_dependencies', '(', '[', 'tf', '.', 'cond', '(', 'cutoff_count', '>', '0', ',', 'lambda', ':', 'tf', '.', 'Print', '(', '0', ',', '[', 'cutoff_count', ']', ',', "'kl cutoff! '", ')', ',', 'int', ')', ']', ')', ':', 'kl_cutoff', '=', '(', 'self', '.', '_config', '.', 'kl_cutoff_coef', '*', 'tf', '.', 'cast', '(', 'kl', '>', 'cutoff_threshold', ',', 'tf', '.', 'float32', ')', '*', '(', 'kl', '-', 'cutoff_threshold', ')', '**', '2', ')', 'policy_loss', '=', 'surrogate_loss', '+', 'kl_penalty', '+', 'kl_cutoff', 'entropy', '=', 'tf', '.', 'reduce_mean', '(', 'policy', '.', 'entropy', '(', ')', ',', 'axis', '=', '1', ')', 'if', 'self', '.', '_config', '.', 'entropy_regularization', ':', 'policy_loss', '-=', 'self', '.', '_config', '.', 'entropy_regularization', '*', 'entropy', 'summary', '=', 'tf', '.', 'summary', '.', 'merge', '(', '[', 'tf', '.', 'summary', '.', 'histogram', '(', "'entropy'", ',', 'entropy', ')', ',', 'tf', '.', 'summary', '.', 'histogram', '(', "'kl'", ',', 'kl', ')', ',', 'tf', '.', 'summary', '.', 'histogram', '(', "'surrogate_loss'", ',', 'surrogate_loss', ')', ',', 'tf', '.', 'summary', '.', 'histogram', '(', "'kl_penalty'", ',', 'kl_penalty', ')', ',', 'tf', '.', 'summary', '.', 'histogram', '(', "'kl_cutoff'", ',', 'kl_cutoff', ')', ',', 'tf', '.', 'summary', '.', 'histogram', '(', "'kl_penalty_combined'", ',', 'kl_penalty', '+', 'kl_cutoff', ')', ',', 'tf', '.', 'summary', '.', 'histogram', '(', "'policy_loss'", ',', 'policy_loss', ')', ',', 'tf', '.', 'summary', '.', 'scalar', '(', "'avg_surr_loss'", ',', 'tf', '.', 'reduce_mean', '(', 'surrogate_loss', ')', ')', ',', 'tf', '.', 'summary', '.', 'scalar', '(', "'avg_kl_penalty'", ',', 'tf', '.', 'reduce_mean', '(', 'kl_penalty', ')', ')', ',', 'tf', '.', 'summary', '.', 'scalar', '(', "'avg_policy_loss'", ',', 'tf', '.', 'reduce_mean', '(', 'policy_loss', ')', ')', ']', ')', 'policy_loss', '=', 'tf', '.', 'reduce_mean', '(', 'policy_loss', ',', '0', ')', 'return', 'tf', '.', 'check_numerics', '(', 'policy_loss', ',', "'policy_loss'", ')', ',', 'summary']
Compute the policy loss composed of multiple components. 1. The policy gradient loss is importance sampled from the data-collecting policy at the beginning of training. 2. The second term is a KL penalty between the policy at the beginning of training and the current policy. 3. Additionally, if this KL already changed more than twice the target amount, we activate a strong penalty discouraging further divergence. Args: old_policy: Action distribution of the behavioral policy. policy: Sequences of distribution params of the current policy. action: Sequences of actions. advantage: Sequences of advantages. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor.
['Compute', 'the', 'policy', 'loss', 'composed', 'of', 'multiple', 'components', '.']
train
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L443-L503
5,608
greenbender/python-fifo
fifo/__init__.py
Fifo.peekuntil
def peekuntil(self, token, size=0): """ Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information. """ self.__append() i = self.buf.find(token, self.pos) if i < 0: index = max(len(token) - 1, size) newpos = max(len(self.buf) - index, self.pos) return False, self.buf[self.pos:newpos] newpos = i + len(token) return True, self.buf[self.pos:newpos]
python
def peekuntil(self, token, size=0): """ Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information. """ self.__append() i = self.buf.find(token, self.pos) if i < 0: index = max(len(token) - 1, size) newpos = max(len(self.buf) - index, self.pos) return False, self.buf[self.pos:newpos] newpos = i + len(token) return True, self.buf[self.pos:newpos]
['def', 'peekuntil', '(', 'self', ',', 'token', ',', 'size', '=', '0', ')', ':', 'self', '.', '__append', '(', ')', 'i', '=', 'self', '.', 'buf', '.', 'find', '(', 'token', ',', 'self', '.', 'pos', ')', 'if', 'i', '<', '0', ':', 'index', '=', 'max', '(', 'len', '(', 'token', ')', '-', '1', ',', 'size', ')', 'newpos', '=', 'max', '(', 'len', '(', 'self', '.', 'buf', ')', '-', 'index', ',', 'self', '.', 'pos', ')', 'return', 'False', ',', 'self', '.', 'buf', '[', 'self', '.', 'pos', ':', 'newpos', ']', 'newpos', '=', 'i', '+', 'len', '(', 'token', ')', 'return', 'True', ',', 'self', '.', 'buf', '[', 'self', '.', 'pos', ':', 'newpos', ']']
Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information.
['Peeks', 'for', 'token', 'into', 'the', 'FIFO', '.']
train
https://github.com/greenbender/python-fifo/blob/ffabb6c8b844086dd3a490d0b42bbb5aa8fbb932/fifo/__init__.py#L216-L232
5,609
bioidiap/gridtk
gridtk/manager.py
JobManager._create
def _create(self): """Creates a new and empty database.""" from .tools import makedirs_safe # create directory for sql database makedirs_safe(os.path.dirname(self._database)) # create all the tables Base.metadata.create_all(self._engine) logger.debug("Created new empty database '%s'" % self._database)
python
def _create(self): """Creates a new and empty database.""" from .tools import makedirs_safe # create directory for sql database makedirs_safe(os.path.dirname(self._database)) # create all the tables Base.metadata.create_all(self._engine) logger.debug("Created new empty database '%s'" % self._database)
['def', '_create', '(', 'self', ')', ':', 'from', '.', 'tools', 'import', 'makedirs_safe', '# create directory for sql database', 'makedirs_safe', '(', 'os', '.', 'path', '.', 'dirname', '(', 'self', '.', '_database', ')', ')', '# create all the tables', 'Base', '.', 'metadata', '.', 'create_all', '(', 'self', '.', '_engine', ')', 'logger', '.', 'debug', '(', '"Created new empty database \'%s\'"', '%', 'self', '.', '_database', ')']
Creates a new and empty database.
['Creates', 'a', 'new', 'and', 'empty', 'database', '.']
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/manager.py#L97-L106
5,610
pantsbuild/pants
src/python/pants/engine/struct.py
Struct.kwargs
def kwargs(self): """Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate. """ return {k: v for k, v in self._kwargs.items() if k not in self._INTERNAL_FIELDS}
python
def kwargs(self): """Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate. """ return {k: v for k, v in self._kwargs.items() if k not in self._INTERNAL_FIELDS}
['def', 'kwargs', '(', 'self', ')', ':', 'return', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'self', '.', '_kwargs', '.', 'items', '(', ')', 'if', 'k', 'not', 'in', 'self', '.', '_INTERNAL_FIELDS', '}']
Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate.
['Returns', 'a', 'dict', 'of', 'the', 'kwargs', 'for', 'this', 'Struct', 'which', 'were', 'not', 'interpreted', 'by', 'the', 'baseclass', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/struct.py#L95-L101
5,611
pyviz/holoviews
holoviews/core/tree.py
AttrTree._propagate
def _propagate(self, path, val): """ Propagate the value up to the root node. """ if val == '_DELETE': if path in self.data: del self.data[path] else: items = [(key, v) for key, v in self.data.items() if not all(k==p for k, p in zip(key, path))] self.data = OrderedDict(items) else: self.data[path] = val if self.parent is not None: self.parent._propagate((self.identifier,)+path, val)
python
def _propagate(self, path, val): """ Propagate the value up to the root node. """ if val == '_DELETE': if path in self.data: del self.data[path] else: items = [(key, v) for key, v in self.data.items() if not all(k==p for k, p in zip(key, path))] self.data = OrderedDict(items) else: self.data[path] = val if self.parent is not None: self.parent._propagate((self.identifier,)+path, val)
['def', '_propagate', '(', 'self', ',', 'path', ',', 'val', ')', ':', 'if', 'val', '==', "'_DELETE'", ':', 'if', 'path', 'in', 'self', '.', 'data', ':', 'del', 'self', '.', 'data', '[', 'path', ']', 'else', ':', 'items', '=', '[', '(', 'key', ',', 'v', ')', 'for', 'key', ',', 'v', 'in', 'self', '.', 'data', '.', 'items', '(', ')', 'if', 'not', 'all', '(', 'k', '==', 'p', 'for', 'k', ',', 'p', 'in', 'zip', '(', 'key', ',', 'path', ')', ')', ']', 'self', '.', 'data', '=', 'OrderedDict', '(', 'items', ')', 'else', ':', 'self', '.', 'data', '[', 'path', ']', '=', 'val', 'if', 'self', '.', 'parent', 'is', 'not', 'None', ':', 'self', '.', 'parent', '.', '_propagate', '(', '(', 'self', '.', 'identifier', ',', ')', '+', 'path', ',', 'val', ')']
Propagate the value up to the root node.
['Propagate', 'the', 'value', 'up', 'to', 'the', 'root', 'node', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/tree.py#L146-L160
5,612
StackStorm/pybind
pybind/nos/v6_0_2f/interface/fortygigabitethernet/ipv6/ipv6_config/address/__init__.py
address._set_ipv6_address
def _set_ipv6_address(self, v, load=False): """ Setter method for ipv6_address, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_config/address/ipv6_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_address must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)""", }) self.__ipv6_address = t if hasattr(self, '_set'): self._set()
python
def _set_ipv6_address(self, v, load=False): """ Setter method for ipv6_address, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_config/address/ipv6_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_address must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)""", }) self.__ipv6_address = t if hasattr(self, '_set'): self._set()
['def', '_set_ipv6_address', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"address"', ',', 'ipv6_address', '.', 'ipv6_address', ',', 'yang_name', '=', '"ipv6-address"', ',', 'rest_name', '=', '"ipv6-address"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'True', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'address'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Set the IP address of an interface'", ',', "u'cli-no-key-completion'", ':', 'None', ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-drop-node-name'", ':', 'None', ',', "u'cli-no-match-completion'", ':', 'None', ',', "u'callpoint'", ':', "u'phy-intf-ipv6-addr-cp'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"ipv6-address"', ',', 'rest_name', '=', '"ipv6-address"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Set the IP address of an interface'", ',', "u'cli-no-key-completion'", ':', 'None', ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-drop-node-name'", ':', 'None', ',', "u'cli-no-match-completion'", ':', 'None', ',', "u'callpoint'", ':', "u'phy-intf-ipv6-addr-cp'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-ipv6-config'", ',', 'defining_module', '=', "'brocade-ipv6-config'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""ipv6_address must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container=\'list\', user_ordered=True, path_helper=self._path_helper, yang_keys=\'address\', extensions={u\'tailf-common\': {u\'info\': u\'Set the IP address of an interface\', u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'cli-compact-syntax\': None, u\'cli-drop-node-name\': None, u\'cli-no-match-completion\': None, u\'callpoint\': u\'phy-intf-ipv6-addr-cp\'}}), is_container=\'list\', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Set the IP address of an interface\', u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'cli-compact-syntax\': None, u\'cli-drop-node-name\': None, u\'cli-no-match-completion\': None, u\'callpoint\': u\'phy-intf-ipv6-addr-cp\'}}, namespace=\'urn:brocade.com:mgmt:brocade-ipv6-config\', defining_module=\'brocade-ipv6-config\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__ipv6_address', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for ipv6_address, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_config/address/ipv6_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_address() directly.
['Setter', 'method', 'for', 'ipv6_address', 'mapped', 'from', 'YANG', 'variable', '/', 'interface', '/', 'fortygigabitethernet', '/', 'ipv6', '/', 'ipv6_config', '/', 'address', '/', 'ipv6_address', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_ipv6_address', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_ipv6_address', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/fortygigabitethernet/ipv6/ipv6_config/address/__init__.py#L161-L182
5,613
DataONEorg/d1_python
lib_client/src/d1_client/cnclient.py
CoordinatingNodeClient.setObsoletedBy
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None): """See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns: """ response = self.setObsoletedByResponse( pid, obsoletedByPid, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
python
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None): """See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns: """ response = self.setObsoletedByResponse( pid, obsoletedByPid, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
['def', 'setObsoletedBy', '(', 'self', ',', 'pid', ',', 'obsoletedByPid', ',', 'serialVersion', ',', 'vendorSpecific', '=', 'None', ')', ':', 'response', '=', 'self', '.', 'setObsoletedByResponse', '(', 'pid', ',', 'obsoletedByPid', ',', 'serialVersion', ',', 'vendorSpecific', ')', 'return', 'self', '.', '_read_boolean_response', '(', 'response', ')']
See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns:
['See', 'Also', ':', 'setObsoletedByResponse', '()']
train
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/cnclient.py#L192-L207
5,614
CalebBell/fluids
fluids/fittings.py
entrance_beveled
def entrance_beveled(Di, l, angle, method='Rennels'): r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe flush with the wall of a reservoir. This calculation has two methods available. The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels' formulation is centered around a straight loss coefficient of 0.57, so it is normally at least 0.07 higher. The Rennels [1]_ formulas are: .. math:: K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2 .. math:: \lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d} \right)^{\frac{1-(l/d)^{1/4}}{2}}\right] .. math:: C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90} \right)^{\frac{1}{1+l/d}} .. figure:: fittings/flush_mounted_beveled_entrance.png :scale: 30 % :alt: Beveled entrace mounted straight; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] l : float Length of bevel measured parallel to the pipe length, [m] angle : float Angle of bevel with respect to the pipe length, [degrees] method : str, optional One of 'Rennels', or 'Idelchik', [-] Returns ------- K : float Loss coefficient [-] Notes ----- A cheap way of getting a lower pressure drop. Little credible data is available. The table of data in [2]_ uses the angle for both bevels, so it runs from 0 to 180 degrees; this function follows the convention in [1]_ which uses only one angle, with the angle varying from 0 to 90 degrees. .. plot:: plots/entrance_beveled.py Examples -------- >>> entrance_beveled(Di=0.1, l=0.003, angle=45) 0.45086864221916984 >>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik') 0.3995000000000001 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. ''' if method is None: method = 'Rennels' if method == 'Rennels': Cb = (1-angle/90.)*(angle/90.)**(1./(1 + l/Di )) lbd = 1 + 0.622*(1 - 1.5*Cb*(l/Di)**((1 - (l/Di)**0.25)/2.)) return 0.0696*(1 - Cb*l/Di)*lbd**2 + (lbd - 1.)**2 elif method == 'Idelchik': return float(entrance_beveled_Idelchik_obj(angle*2.0, l/Di)) else: raise ValueError('Specified method not recognized; methods are %s' %(entrance_beveled_methods))
python
def entrance_beveled(Di, l, angle, method='Rennels'): r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe flush with the wall of a reservoir. This calculation has two methods available. The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels' formulation is centered around a straight loss coefficient of 0.57, so it is normally at least 0.07 higher. The Rennels [1]_ formulas are: .. math:: K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2 .. math:: \lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d} \right)^{\frac{1-(l/d)^{1/4}}{2}}\right] .. math:: C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90} \right)^{\frac{1}{1+l/d}} .. figure:: fittings/flush_mounted_beveled_entrance.png :scale: 30 % :alt: Beveled entrace mounted straight; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] l : float Length of bevel measured parallel to the pipe length, [m] angle : float Angle of bevel with respect to the pipe length, [degrees] method : str, optional One of 'Rennels', or 'Idelchik', [-] Returns ------- K : float Loss coefficient [-] Notes ----- A cheap way of getting a lower pressure drop. Little credible data is available. The table of data in [2]_ uses the angle for both bevels, so it runs from 0 to 180 degrees; this function follows the convention in [1]_ which uses only one angle, with the angle varying from 0 to 90 degrees. .. plot:: plots/entrance_beveled.py Examples -------- >>> entrance_beveled(Di=0.1, l=0.003, angle=45) 0.45086864221916984 >>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik') 0.3995000000000001 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. ''' if method is None: method = 'Rennels' if method == 'Rennels': Cb = (1-angle/90.)*(angle/90.)**(1./(1 + l/Di )) lbd = 1 + 0.622*(1 - 1.5*Cb*(l/Di)**((1 - (l/Di)**0.25)/2.)) return 0.0696*(1 - Cb*l/Di)*lbd**2 + (lbd - 1.)**2 elif method == 'Idelchik': return float(entrance_beveled_Idelchik_obj(angle*2.0, l/Di)) else: raise ValueError('Specified method not recognized; methods are %s' %(entrance_beveled_methods))
['def', 'entrance_beveled', '(', 'Di', ',', 'l', ',', 'angle', ',', 'method', '=', "'Rennels'", ')', ':', 'if', 'method', 'is', 'None', ':', 'method', '=', "'Rennels'", 'if', 'method', '==', "'Rennels'", ':', 'Cb', '=', '(', '1', '-', 'angle', '/', '90.', ')', '*', '(', 'angle', '/', '90.', ')', '**', '(', '1.', '/', '(', '1', '+', 'l', '/', 'Di', ')', ')', 'lbd', '=', '1', '+', '0.622', '*', '(', '1', '-', '1.5', '*', 'Cb', '*', '(', 'l', '/', 'Di', ')', '**', '(', '(', '1', '-', '(', 'l', '/', 'Di', ')', '**', '0.25', ')', '/', '2.', ')', ')', 'return', '0.0696', '*', '(', '1', '-', 'Cb', '*', 'l', '/', 'Di', ')', '*', 'lbd', '**', '2', '+', '(', 'lbd', '-', '1.', ')', '**', '2', 'elif', 'method', '==', "'Idelchik'", ':', 'return', 'float', '(', 'entrance_beveled_Idelchik_obj', '(', 'angle', '*', '2.0', ',', 'l', '/', 'Di', ')', ')', 'else', ':', 'raise', 'ValueError', '(', "'Specified method not recognized; methods are %s'", '%', '(', 'entrance_beveled_methods', ')', ')']
r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe flush with the wall of a reservoir. This calculation has two methods available. The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels' formulation is centered around a straight loss coefficient of 0.57, so it is normally at least 0.07 higher. The Rennels [1]_ formulas are: .. math:: K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2 .. math:: \lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d} \right)^{\frac{1-(l/d)^{1/4}}{2}}\right] .. math:: C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90} \right)^{\frac{1}{1+l/d}} .. figure:: fittings/flush_mounted_beveled_entrance.png :scale: 30 % :alt: Beveled entrace mounted straight; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] l : float Length of bevel measured parallel to the pipe length, [m] angle : float Angle of bevel with respect to the pipe length, [degrees] method : str, optional One of 'Rennels', or 'Idelchik', [-] Returns ------- K : float Loss coefficient [-] Notes ----- A cheap way of getting a lower pressure drop. Little credible data is available. The table of data in [2]_ uses the angle for both bevels, so it runs from 0 to 180 degrees; this function follows the convention in [1]_ which uses only one angle, with the angle varying from 0 to 90 degrees. .. plot:: plots/entrance_beveled.py Examples -------- >>> entrance_beveled(Di=0.1, l=0.003, angle=45) 0.45086864221916984 >>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik') 0.3995000000000001 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966.
['r', 'Returns', 'loss', 'coefficient', 'for', 'a', 'beveled', 'or', 'chamfered', 'entrance', 'to', 'a', 'pipe', 'flush', 'with', 'the', 'wall', 'of', 'a', 'reservoir', '.', 'This', 'calculation', 'has', 'two', 'methods', 'available', '.', 'The', 'Rennels', 'and', 'Idelchik', 'methods', 'have', 'similar', 'trends', 'but', 'the', 'Rennels', 'formulation', 'is', 'centered', 'around', 'a', 'straight', 'loss', 'coefficient', 'of', '0', '.', '57', 'so', 'it', 'is', 'normally', 'at', 'least', '0', '.', '07', 'higher', '.', 'The', 'Rennels', '[', '1', ']', '_', 'formulas', 'are', ':']
train
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L654-L733
5,615
gwastro/pycbc
pycbc/waveform/waveform.py
get_waveform_end_frequency
def get_waveform_end_frequency(template=None, **kwargs): """Return the stop frequency of a template """ input_params = props(template,**kwargs) approximant = kwargs['approximant'] if approximant in _filter_ends: return _filter_ends[approximant](**input_params) else: return None
python
def get_waveform_end_frequency(template=None, **kwargs): """Return the stop frequency of a template """ input_params = props(template,**kwargs) approximant = kwargs['approximant'] if approximant in _filter_ends: return _filter_ends[approximant](**input_params) else: return None
['def', 'get_waveform_end_frequency', '(', 'template', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'input_params', '=', 'props', '(', 'template', ',', '*', '*', 'kwargs', ')', 'approximant', '=', 'kwargs', '[', "'approximant'", ']', 'if', 'approximant', 'in', '_filter_ends', ':', 'return', '_filter_ends', '[', 'approximant', ']', '(', '*', '*', 'input_params', ')', 'else', ':', 'return', 'None']
Return the stop frequency of a template
['Return', 'the', 'stop', 'frequency', 'of', 'a', 'template']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/waveform.py#L1052-L1061
5,616
remind101/stacker_blueprints
stacker_blueprints/empire/policies.py
logstream_policy
def logstream_policy(): """Policy needed for logspout -> kinesis log streaming.""" p = Policy( Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ kinesis.CreateStream, kinesis.DescribeStream, Action(kinesis.prefix, "AddTagsToStream"), Action(kinesis.prefix, "PutRecords") ])]) return p
python
def logstream_policy(): """Policy needed for logspout -> kinesis log streaming.""" p = Policy( Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ kinesis.CreateStream, kinesis.DescribeStream, Action(kinesis.prefix, "AddTagsToStream"), Action(kinesis.prefix, "PutRecords") ])]) return p
['def', 'logstream_policy', '(', ')', ':', 'p', '=', 'Policy', '(', 'Statement', '=', '[', 'Statement', '(', 'Effect', '=', 'Allow', ',', 'Resource', '=', '[', '"*"', ']', ',', 'Action', '=', '[', 'kinesis', '.', 'CreateStream', ',', 'kinesis', '.', 'DescribeStream', ',', 'Action', '(', 'kinesis', '.', 'prefix', ',', '"AddTagsToStream"', ')', ',', 'Action', '(', 'kinesis', '.', 'prefix', ',', '"PutRecords"', ')', ']', ')', ']', ')', 'return', 'p']
Policy needed for logspout -> kinesis log streaming.
['Policy', 'needed', 'for', 'logspout', '-', '>', 'kinesis', 'log', 'streaming', '.']
train
https://github.com/remind101/stacker_blueprints/blob/71624f6e1bd4ea794dc98fb621a04235e1931cae/stacker_blueprints/empire/policies.py#L245-L257
5,617
olitheolix/qtmacs
qtmacs/extensions/qtmacsscintilla_macros.py
SearchForwardMiniApplet.qteAbort
def qteAbort(self, msgObj): """ Restore the original cursor position because the user hit abort. """ self.qteWidget.setCursorPosition(*self.cursorPosOrig) self.qteMain.qtesigAbort.disconnect(self.qteAbort)
python
def qteAbort(self, msgObj): """ Restore the original cursor position because the user hit abort. """ self.qteWidget.setCursorPosition(*self.cursorPosOrig) self.qteMain.qtesigAbort.disconnect(self.qteAbort)
['def', 'qteAbort', '(', 'self', ',', 'msgObj', ')', ':', 'self', '.', 'qteWidget', '.', 'setCursorPosition', '(', '*', 'self', '.', 'cursorPosOrig', ')', 'self', '.', 'qteMain', '.', 'qtesigAbort', '.', 'disconnect', '(', 'self', '.', 'qteAbort', ')']
Restore the original cursor position because the user hit abort.
['Restore', 'the', 'original', 'cursor', 'position', 'because', 'the', 'user', 'hit', 'abort', '.']
train
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_macros.py#L1581-L1586
5,618
radjkarl/fancyTools
fancytools/geometry/polylines.py
separate
def separate(polylines, f_mx_dist=2, mn_group_len=4): """ split polylines wherever crinkles are found """ s = [] for n in range(len(polylines) - 1, -1, -1): c = polylines[n] separated = False start = 0 for m in range(mn_group_len, len(c) - 1): if m - start < mn_group_len: continue m += 1 group = c[m - mn_group_len:m] x, y = group[:, 0], group[:, 1] asc, offs, _, _, _ = linregress(x, y) yfit = asc * x + offs # check whether next point would fit in: p1 = c[m] l = (x[0], yfit[0], p1[-1], asc * p1[-1] + offs) std = np.mean([line.distance(l, g) for g in group]) dist = line.distance(l, p1) if dist > 2 and dist > f_mx_dist * std: separated = True s.append(c[start:m - 1]) start = m - 1 if separated: if len(c) - start >= 2: s.append(c[start:]) polylines.pop(n) polylines.extend(s) return polylines
python
def separate(polylines, f_mx_dist=2, mn_group_len=4): """ split polylines wherever crinkles are found """ s = [] for n in range(len(polylines) - 1, -1, -1): c = polylines[n] separated = False start = 0 for m in range(mn_group_len, len(c) - 1): if m - start < mn_group_len: continue m += 1 group = c[m - mn_group_len:m] x, y = group[:, 0], group[:, 1] asc, offs, _, _, _ = linregress(x, y) yfit = asc * x + offs # check whether next point would fit in: p1 = c[m] l = (x[0], yfit[0], p1[-1], asc * p1[-1] + offs) std = np.mean([line.distance(l, g) for g in group]) dist = line.distance(l, p1) if dist > 2 and dist > f_mx_dist * std: separated = True s.append(c[start:m - 1]) start = m - 1 if separated: if len(c) - start >= 2: s.append(c[start:]) polylines.pop(n) polylines.extend(s) return polylines
['def', 'separate', '(', 'polylines', ',', 'f_mx_dist', '=', '2', ',', 'mn_group_len', '=', '4', ')', ':', 's', '=', '[', ']', 'for', 'n', 'in', 'range', '(', 'len', '(', 'polylines', ')', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'c', '=', 'polylines', '[', 'n', ']', 'separated', '=', 'False', 'start', '=', '0', 'for', 'm', 'in', 'range', '(', 'mn_group_len', ',', 'len', '(', 'c', ')', '-', '1', ')', ':', 'if', 'm', '-', 'start', '<', 'mn_group_len', ':', 'continue', 'm', '+=', '1', 'group', '=', 'c', '[', 'm', '-', 'mn_group_len', ':', 'm', ']', 'x', ',', 'y', '=', 'group', '[', ':', ',', '0', ']', ',', 'group', '[', ':', ',', '1', ']', 'asc', ',', 'offs', ',', '_', ',', '_', ',', '_', '=', 'linregress', '(', 'x', ',', 'y', ')', 'yfit', '=', 'asc', '*', 'x', '+', 'offs', '# check whether next point would fit in:', 'p1', '=', 'c', '[', 'm', ']', 'l', '=', '(', 'x', '[', '0', ']', ',', 'yfit', '[', '0', ']', ',', 'p1', '[', '-', '1', ']', ',', 'asc', '*', 'p1', '[', '-', '1', ']', '+', 'offs', ')', 'std', '=', 'np', '.', 'mean', '(', '[', 'line', '.', 'distance', '(', 'l', ',', 'g', ')', 'for', 'g', 'in', 'group', ']', ')', 'dist', '=', 'line', '.', 'distance', '(', 'l', ',', 'p1', ')', 'if', 'dist', '>', '2', 'and', 'dist', '>', 'f_mx_dist', '*', 'std', ':', 'separated', '=', 'True', 's', '.', 'append', '(', 'c', '[', 'start', ':', 'm', '-', '1', ']', ')', 'start', '=', 'm', '-', '1', 'if', 'separated', ':', 'if', 'len', '(', 'c', ')', '-', 'start', '>=', '2', ':', 's', '.', 'append', '(', 'c', '[', 'start', ':', ']', ')', 'polylines', '.', 'pop', '(', 'n', ')', 'polylines', '.', 'extend', '(', 's', ')', 'return', 'polylines']
split polylines wherever crinkles are found
['split', 'polylines', 'wherever', 'crinkles', 'are', 'found']
train
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/geometry/polylines.py#L94-L133
5,619
saltstack/salt
salt/utils/network.py
mac2eui64
def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return
python
def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return
['def', 'mac2eui64', '(', 'mac', ',', 'prefix', '=', 'None', ')', ':', '# http://tools.ietf.org/html/rfc4291#section-2.5.1', 'eui64', '=', 're', '.', 'sub', '(', "r'[.:-]'", ',', "''", ',', 'mac', ')', '.', 'lower', '(', ')', 'eui64', '=', 'eui64', '[', '0', ':', '6', ']', '+', "'fffe'", '+', 'eui64', '[', '6', ':', ']', 'eui64', '=', 'hex', '(', 'int', '(', 'eui64', '[', '0', ':', '2', ']', ',', '16', ')', '|', '2', ')', '[', '2', ':', ']', '.', 'zfill', '(', '2', ')', '+', 'eui64', '[', '2', ':', ']', 'if', 'prefix', 'is', 'None', ':', 'return', "':'", '.', 'join', '(', 're', '.', 'findall', '(', "r'.{4}'", ',', 'eui64', ')', ')', 'else', ':', 'try', ':', 'net', '=', 'ipaddress', '.', 'ip_network', '(', 'prefix', ',', 'strict', '=', 'False', ')', 'euil', '=', 'int', '(', "'0x{0}'", '.', 'format', '(', 'eui64', ')', ',', '16', ')', 'return', "'{0}/{1}'", '.', 'format', '(', 'net', '[', 'euil', ']', ',', 'net', '.', 'prefixlen', ')', 'except', 'Exception', ':', 'return']
Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address
['Convert', 'a', 'MAC', 'address', 'to', 'a', 'EUI64', 'identifier', 'or', 'with', 'prefix', 'provided', 'a', 'full', 'IPv6', 'address']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L1343-L1361
5,620
resonai/ybt
yabt/builders/cpp.py
get_source_files
def get_source_files(target, build_context) -> list: """Return list of source files for `target`.""" all_sources = list(target.props.sources) for proto_dep_name in target.props.protos: proto_dep = build_context.targets[proto_dep_name] all_sources.extend(proto_dep.artifacts.get(AT.gen_cc).keys()) return all_sources
python
def get_source_files(target, build_context) -> list: """Return list of source files for `target`.""" all_sources = list(target.props.sources) for proto_dep_name in target.props.protos: proto_dep = build_context.targets[proto_dep_name] all_sources.extend(proto_dep.artifacts.get(AT.gen_cc).keys()) return all_sources
['def', 'get_source_files', '(', 'target', ',', 'build_context', ')', '->', 'list', ':', 'all_sources', '=', 'list', '(', 'target', '.', 'props', '.', 'sources', ')', 'for', 'proto_dep_name', 'in', 'target', '.', 'props', '.', 'protos', ':', 'proto_dep', '=', 'build_context', '.', 'targets', '[', 'proto_dep_name', ']', 'all_sources', '.', 'extend', '(', 'proto_dep', '.', 'artifacts', '.', 'get', '(', 'AT', '.', 'gen_cc', ')', '.', 'keys', '(', ')', ')', 'return', 'all_sources']
Return list of source files for `target`.
['Return', 'list', 'of', 'source', 'files', 'for', 'target', '.']
train
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/builders/cpp.py#L307-L313
5,621
Esri/ArcREST
src/arcrest/manageags/_system.py
System.registerWebAdaptor
def registerWebAdaptor(self, webAdaptorURL, machineName, machineIP, isAdminEnabled, description, httpPort, httpsPort): """ You can use this operation to register the ArcGIS Web Adaptor from your ArcGIS Server. By registering the Web Adaptor with the server, you are telling the server to trust requests (including security credentials) that have been submitted through this Web Adaptor. Inputs: webAdaptorURL - The URL of the web adaptor through which ArcGIS resources will be accessed. machineName - The machine name on which the web adaptor is installed. machineIP - The local IP address of the machine on which the web adaptor is installed. isAdminEnabled - A boolean flag to indicate if administrative access is allowed through the web adaptor. The default is false. description - An optional description for the web adaptor. httpPort - An optional parameter to indicate the HTTP port of the web adaptor. If this parameter is not provided, it is derived from the URL. httpsPort - An optional parameter to indicate the HTTPS port of the web adaptor. If this parameter is not provided, it is derived from the URL. """ url = self._url + "/webadaptors/register" params = { "f" : "json", "webAdaptorURL" : webAdaptorURL, "machineName" : machineName, "machineIP" : machineIP, "isAdminEnabled" : isAdminEnabled, "description" : description, "httpPort" : httpPort, "httpsPort" : httpsPort } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
python
def registerWebAdaptor(self, webAdaptorURL, machineName, machineIP, isAdminEnabled, description, httpPort, httpsPort): """ You can use this operation to register the ArcGIS Web Adaptor from your ArcGIS Server. By registering the Web Adaptor with the server, you are telling the server to trust requests (including security credentials) that have been submitted through this Web Adaptor. Inputs: webAdaptorURL - The URL of the web adaptor through which ArcGIS resources will be accessed. machineName - The machine name on which the web adaptor is installed. machineIP - The local IP address of the machine on which the web adaptor is installed. isAdminEnabled - A boolean flag to indicate if administrative access is allowed through the web adaptor. The default is false. description - An optional description for the web adaptor. httpPort - An optional parameter to indicate the HTTP port of the web adaptor. If this parameter is not provided, it is derived from the URL. httpsPort - An optional parameter to indicate the HTTPS port of the web adaptor. If this parameter is not provided, it is derived from the URL. """ url = self._url + "/webadaptors/register" params = { "f" : "json", "webAdaptorURL" : webAdaptorURL, "machineName" : machineName, "machineIP" : machineIP, "isAdminEnabled" : isAdminEnabled, "description" : description, "httpPort" : httpPort, "httpsPort" : httpsPort } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
['def', 'registerWebAdaptor', '(', 'self', ',', 'webAdaptorURL', ',', 'machineName', ',', 'machineIP', ',', 'isAdminEnabled', ',', 'description', ',', 'httpPort', ',', 'httpsPort', ')', ':', 'url', '=', 'self', '.', '_url', '+', '"/webadaptors/register"', 'params', '=', '{', '"f"', ':', '"json"', ',', '"webAdaptorURL"', ':', 'webAdaptorURL', ',', '"machineName"', ':', 'machineName', ',', '"machineIP"', ':', 'machineIP', ',', '"isAdminEnabled"', ':', 'isAdminEnabled', ',', '"description"', ':', 'description', ',', '"httpPort"', ':', 'httpPort', ',', '"httpsPort"', ':', 'httpsPort', '}', 'return', 'self', '.', '_post', '(', 'url', '=', 'url', ',', 'param_dict', '=', 'params', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ')']
You can use this operation to register the ArcGIS Web Adaptor from your ArcGIS Server. By registering the Web Adaptor with the server, you are telling the server to trust requests (including security credentials) that have been submitted through this Web Adaptor. Inputs: webAdaptorURL - The URL of the web adaptor through which ArcGIS resources will be accessed. machineName - The machine name on which the web adaptor is installed. machineIP - The local IP address of the machine on which the web adaptor is installed. isAdminEnabled - A boolean flag to indicate if administrative access is allowed through the web adaptor. The default is false. description - An optional description for the web adaptor. httpPort - An optional parameter to indicate the HTTP port of the web adaptor. If this parameter is not provided, it is derived from the URL. httpsPort - An optional parameter to indicate the HTTPS port of the web adaptor. If this parameter is not provided, it is derived from the URL.
['You', 'can', 'use', 'this', 'operation', 'to', 'register', 'the', 'ArcGIS', 'Web', 'Adaptor', 'from', 'your', 'ArcGIS', 'Server', '.', 'By', 'registering', 'the', 'Web', 'Adaptor', 'with', 'the', 'server', 'you', 'are', 'telling', 'the', 'server', 'to', 'trust', 'requests', '(', 'including', 'security', 'credentials', ')', 'that', 'have', 'been', 'submitted', 'through', 'this', 'Web', 'Adaptor', '.']
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_system.py#L285-L325
5,622
valerymelou/django-active-link
active_link/templatetags/active_link_tags.py
active_link
def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs): """ Renders the given CSS class if the request path matches the path of the view. :param context: The context where the tag was called. Used to access the request object. :param viewnames: The name of the view or views separated by || (include namespaces if any). :param css_class: The CSS class to render. :param strict: If True, the tag will perform an exact match with the request path. :return: """ if css_class is None: css_class = getattr(settings, 'ACTIVE_LINK_CSS_CLASS', 'active') if strict is None: strict = getattr(settings, 'ACTIVE_LINK_STRICT', False) request = context.get('request') if request is None: # Can't work without the request object. return '' active = False views = viewnames.split('||') for viewname in views: path = reverse(viewname.strip(), args=args, kwargs=kwargs) request_path = escape_uri_path(request.path) if strict: active = request_path == path else: active = request_path.find(path) == 0 if active: break if active: return css_class return ''
python
def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs): """ Renders the given CSS class if the request path matches the path of the view. :param context: The context where the tag was called. Used to access the request object. :param viewnames: The name of the view or views separated by || (include namespaces if any). :param css_class: The CSS class to render. :param strict: If True, the tag will perform an exact match with the request path. :return: """ if css_class is None: css_class = getattr(settings, 'ACTIVE_LINK_CSS_CLASS', 'active') if strict is None: strict = getattr(settings, 'ACTIVE_LINK_STRICT', False) request = context.get('request') if request is None: # Can't work without the request object. return '' active = False views = viewnames.split('||') for viewname in views: path = reverse(viewname.strip(), args=args, kwargs=kwargs) request_path = escape_uri_path(request.path) if strict: active = request_path == path else: active = request_path.find(path) == 0 if active: break if active: return css_class return ''
['def', 'active_link', '(', 'context', ',', 'viewnames', ',', 'css_class', '=', 'None', ',', 'strict', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'css_class', 'is', 'None', ':', 'css_class', '=', 'getattr', '(', 'settings', ',', "'ACTIVE_LINK_CSS_CLASS'", ',', "'active'", ')', 'if', 'strict', 'is', 'None', ':', 'strict', '=', 'getattr', '(', 'settings', ',', "'ACTIVE_LINK_STRICT'", ',', 'False', ')', 'request', '=', 'context', '.', 'get', '(', "'request'", ')', 'if', 'request', 'is', 'None', ':', "# Can't work without the request object.", 'return', "''", 'active', '=', 'False', 'views', '=', 'viewnames', '.', 'split', '(', "'||'", ')', 'for', 'viewname', 'in', 'views', ':', 'path', '=', 'reverse', '(', 'viewname', '.', 'strip', '(', ')', ',', 'args', '=', 'args', ',', 'kwargs', '=', 'kwargs', ')', 'request_path', '=', 'escape_uri_path', '(', 'request', '.', 'path', ')', 'if', 'strict', ':', 'active', '=', 'request_path', '==', 'path', 'else', ':', 'active', '=', 'request_path', '.', 'find', '(', 'path', ')', '==', '0', 'if', 'active', ':', 'break', 'if', 'active', ':', 'return', 'css_class', 'return', "''"]
Renders the given CSS class if the request path matches the path of the view. :param context: The context where the tag was called. Used to access the request object. :param viewnames: The name of the view or views separated by || (include namespaces if any). :param css_class: The CSS class to render. :param strict: If True, the tag will perform an exact match with the request path. :return:
['Renders', 'the', 'given', 'CSS', 'class', 'if', 'the', 'request', 'path', 'matches', 'the', 'path', 'of', 'the', 'view', '.', ':', 'param', 'context', ':', 'The', 'context', 'where', 'the', 'tag', 'was', 'called', '.', 'Used', 'to', 'access', 'the', 'request', 'object', '.', ':', 'param', 'viewnames', ':', 'The', 'name', 'of', 'the', 'view', 'or', 'views', 'separated', 'by', '||', '(', 'include', 'namespaces', 'if', 'any', ')', '.', ':', 'param', 'css_class', ':', 'The', 'CSS', 'class', 'to', 'render', '.', ':', 'param', 'strict', ':', 'If', 'True', 'the', 'tag', 'will', 'perform', 'an', 'exact', 'match', 'with', 'the', 'request', 'path', '.', ':', 'return', ':']
train
https://github.com/valerymelou/django-active-link/blob/4791e7af2fb6d77d3ef2a3f3be0241b5c3019beb/active_link/templatetags/active_link_tags.py#L14-L48
5,623
ipfs/py-ipfs-api
ipfsapi/client.py
Client.files_rm
def files_rm(self, path, recursive=False, **kwargs): """Removes a file from the MFS. .. code-block:: python >>> c.files_rm("/bla/file") b'' Parameters ---------- path : str Filepath within the MFS recursive : bool Recursively remove directories? """ kwargs.setdefault("opts", {"recursive": recursive}) args = (path,) return self._client.request('/files/rm', args, **kwargs)
python
def files_rm(self, path, recursive=False, **kwargs): """Removes a file from the MFS. .. code-block:: python >>> c.files_rm("/bla/file") b'' Parameters ---------- path : str Filepath within the MFS recursive : bool Recursively remove directories? """ kwargs.setdefault("opts", {"recursive": recursive}) args = (path,) return self._client.request('/files/rm', args, **kwargs)
['def', 'files_rm', '(', 'self', ',', 'path', ',', 'recursive', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '.', 'setdefault', '(', '"opts"', ',', '{', '"recursive"', ':', 'recursive', '}', ')', 'args', '=', '(', 'path', ',', ')', 'return', 'self', '.', '_client', '.', 'request', '(', "'/files/rm'", ',', 'args', ',', '*', '*', 'kwargs', ')']
Removes a file from the MFS. .. code-block:: python >>> c.files_rm("/bla/file") b'' Parameters ---------- path : str Filepath within the MFS recursive : bool Recursively remove directories?
['Removes', 'a', 'file', 'from', 'the', 'MFS', '.']
train
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L1989-L2007
5,624
mcieslik-mctp/papy
src/papy/core.py
_TeePiper.next
def next(self): """ (internal) returns the next result from the ``itertools.tee`` object for the wrapped ``Piper`` instance or re-raises an ``Exception``. """ # do not acquire lock if NuMap is not finished. if self.finished: raise StopIteration # get per-tee lock self.piper.tee_locks[self.i].acquire() # get result or exception exception = True try: result = self.piper.tees[self.i].next() exception = False except StopIteration, result: self.finished = True except Exception, result: pass # release per-tee lock either self or next if self.s == self.stride or self.finished: self.s = 1 self.piper.tee_locks[(self.i + 1) % len(self.piper.tees)].release() else: self.s += 1 self.piper.tee_locks[self.i].release() if exception: raise result else: return result
python
def next(self): """ (internal) returns the next result from the ``itertools.tee`` object for the wrapped ``Piper`` instance or re-raises an ``Exception``. """ # do not acquire lock if NuMap is not finished. if self.finished: raise StopIteration # get per-tee lock self.piper.tee_locks[self.i].acquire() # get result or exception exception = True try: result = self.piper.tees[self.i].next() exception = False except StopIteration, result: self.finished = True except Exception, result: pass # release per-tee lock either self or next if self.s == self.stride or self.finished: self.s = 1 self.piper.tee_locks[(self.i + 1) % len(self.piper.tees)].release() else: self.s += 1 self.piper.tee_locks[self.i].release() if exception: raise result else: return result
['def', 'next', '(', 'self', ')', ':', '# do not acquire lock if NuMap is not finished.', 'if', 'self', '.', 'finished', ':', 'raise', 'StopIteration', '# get per-tee lock', 'self', '.', 'piper', '.', 'tee_locks', '[', 'self', '.', 'i', ']', '.', 'acquire', '(', ')', '# get result or exception', 'exception', '=', 'True', 'try', ':', 'result', '=', 'self', '.', 'piper', '.', 'tees', '[', 'self', '.', 'i', ']', '.', 'next', '(', ')', 'exception', '=', 'False', 'except', 'StopIteration', ',', 'result', ':', 'self', '.', 'finished', '=', 'True', 'except', 'Exception', ',', 'result', ':', 'pass', '# release per-tee lock either self or next', 'if', 'self', '.', 's', '==', 'self', '.', 'stride', 'or', 'self', '.', 'finished', ':', 'self', '.', 's', '=', '1', 'self', '.', 'piper', '.', 'tee_locks', '[', '(', 'self', '.', 'i', '+', '1', ')', '%', 'len', '(', 'self', '.', 'piper', '.', 'tees', ')', ']', '.', 'release', '(', ')', 'else', ':', 'self', '.', 's', '+=', '1', 'self', '.', 'piper', '.', 'tee_locks', '[', 'self', '.', 'i', ']', '.', 'release', '(', ')', 'if', 'exception', ':', 'raise', 'result', 'else', ':', 'return', 'result']
(internal) returns the next result from the ``itertools.tee`` object for the wrapped ``Piper`` instance or re-raises an ``Exception``.
['(', 'internal', ')', 'returns', 'the', 'next', 'result', 'from', 'the', 'itertools', '.', 'tee', 'object', 'for', 'the', 'wrapped', 'Piper', 'instance', 'or', 're', '-', 'raises', 'an', 'Exception', '.']
train
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L1728-L1759
5,625
saltstack/salt
salt/modules/debuild_pkgbuild.py
_get_src
def _get_src(tree_base, source, saltenv='base'): ''' Get the named sources and place them into the tree_base ''' parsed = _urlparse(source) sbase = os.path.basename(source) dest = os.path.join(tree_base, sbase) if parsed.scheme: __salt__['cp.get_url'](source, dest, saltenv=saltenv) else: shutil.copy(source, dest)
python
def _get_src(tree_base, source, saltenv='base'): ''' Get the named sources and place them into the tree_base ''' parsed = _urlparse(source) sbase = os.path.basename(source) dest = os.path.join(tree_base, sbase) if parsed.scheme: __salt__['cp.get_url'](source, dest, saltenv=saltenv) else: shutil.copy(source, dest)
['def', '_get_src', '(', 'tree_base', ',', 'source', ',', 'saltenv', '=', "'base'", ')', ':', 'parsed', '=', '_urlparse', '(', 'source', ')', 'sbase', '=', 'os', '.', 'path', '.', 'basename', '(', 'source', ')', 'dest', '=', 'os', '.', 'path', '.', 'join', '(', 'tree_base', ',', 'sbase', ')', 'if', 'parsed', '.', 'scheme', ':', '__salt__', '[', "'cp.get_url'", ']', '(', 'source', ',', 'dest', ',', 'saltenv', '=', 'saltenv', ')', 'else', ':', 'shutil', '.', 'copy', '(', 'source', ',', 'dest', ')']
Get the named sources and place them into the tree_base
['Get', 'the', 'named', 'sources', 'and', 'place', 'them', 'into', 'the', 'tree_base']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debuild_pkgbuild.py#L305-L315
5,626
ayust/kitnirc
skeleton/main.py
initialize_logging
def initialize_logging(args): """Configure the root logger with some sensible defaults.""" log_handler = logging.StreamHandler() log_formatter = logging.Formatter( "%(levelname)s %(asctime)s %(name)s:%(lineno)04d - %(message)s") log_handler.setFormatter(log_formatter) root_logger = logging.getLogger() root_logger.addHandler(log_handler) root_logger.setLevel(getattr(logging, args.loglevel))
python
def initialize_logging(args): """Configure the root logger with some sensible defaults.""" log_handler = logging.StreamHandler() log_formatter = logging.Formatter( "%(levelname)s %(asctime)s %(name)s:%(lineno)04d - %(message)s") log_handler.setFormatter(log_formatter) root_logger = logging.getLogger() root_logger.addHandler(log_handler) root_logger.setLevel(getattr(logging, args.loglevel))
['def', 'initialize_logging', '(', 'args', ')', ':', 'log_handler', '=', 'logging', '.', 'StreamHandler', '(', ')', 'log_formatter', '=', 'logging', '.', 'Formatter', '(', '"%(levelname)s %(asctime)s %(name)s:%(lineno)04d - %(message)s"', ')', 'log_handler', '.', 'setFormatter', '(', 'log_formatter', ')', 'root_logger', '=', 'logging', '.', 'getLogger', '(', ')', 'root_logger', '.', 'addHandler', '(', 'log_handler', ')', 'root_logger', '.', 'setLevel', '(', 'getattr', '(', 'logging', ',', 'args', '.', 'loglevel', ')', ')']
Configure the root logger with some sensible defaults.
['Configure', 'the', 'root', 'logger', 'with', 'some', 'sensible', 'defaults', '.']
train
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/skeleton/main.py#L38-L47
5,627
googleapis/google-cloud-python
datastore/google/cloud/datastore/batch.py
Batch._add_partial_key_entity_pb
def _add_partial_key_entity_pb(self): """Adds a new mutation for an entity with a partial key. :rtype: :class:`.entity_pb2.Entity` :returns: The newly created entity protobuf that will be updated and sent with a commit. """ new_mutation = _datastore_pb2.Mutation() self._mutations.append(new_mutation) return new_mutation.insert
python
def _add_partial_key_entity_pb(self): """Adds a new mutation for an entity with a partial key. :rtype: :class:`.entity_pb2.Entity` :returns: The newly created entity protobuf that will be updated and sent with a commit. """ new_mutation = _datastore_pb2.Mutation() self._mutations.append(new_mutation) return new_mutation.insert
['def', '_add_partial_key_entity_pb', '(', 'self', ')', ':', 'new_mutation', '=', '_datastore_pb2', '.', 'Mutation', '(', ')', 'self', '.', '_mutations', '.', 'append', '(', 'new_mutation', ')', 'return', 'new_mutation', '.', 'insert']
Adds a new mutation for an entity with a partial key. :rtype: :class:`.entity_pb2.Entity` :returns: The newly created entity protobuf that will be updated and sent with a commit.
['Adds', 'a', 'new', 'mutation', 'for', 'an', 'entity', 'with', 'a', 'partial', 'key', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/batch.py#L107-L116
5,628
pywavefront/PyWavefront
pywavefront/material.py
MaterialParser.parse_map_Ks
def parse_map_Ks(self): """Specular color map""" Kd = os.path.join(self.dir, " ".join(self.values[1:])) self.this_material.set_texture_specular_color(Kd)
python
def parse_map_Ks(self): """Specular color map""" Kd = os.path.join(self.dir, " ".join(self.values[1:])) self.this_material.set_texture_specular_color(Kd)
['def', 'parse_map_Ks', '(', 'self', ')', ':', 'Kd', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'dir', ',', '" "', '.', 'join', '(', 'self', '.', 'values', '[', '1', ':', ']', ')', ')', 'self', '.', 'this_material', '.', 'set_texture_specular_color', '(', 'Kd', ')']
Specular color map
['Specular', 'color', 'map']
train
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/material.py#L223-L226
5,629
Erotemic/utool
utool/util_grabdata.py
grab_selenium_driver
def grab_selenium_driver(driver_name=None): """ pip install selenium -U """ from selenium import webdriver if driver_name is None: driver_name = 'firefox' if driver_name.lower() == 'chrome': grab_selenium_chromedriver() return webdriver.Chrome() elif driver_name.lower() == 'firefox': # grab_selenium_chromedriver() return webdriver.Firefox() else: raise AssertionError('unknown name = %r' % (driver_name,))
python
def grab_selenium_driver(driver_name=None): """ pip install selenium -U """ from selenium import webdriver if driver_name is None: driver_name = 'firefox' if driver_name.lower() == 'chrome': grab_selenium_chromedriver() return webdriver.Chrome() elif driver_name.lower() == 'firefox': # grab_selenium_chromedriver() return webdriver.Firefox() else: raise AssertionError('unknown name = %r' % (driver_name,))
['def', 'grab_selenium_driver', '(', 'driver_name', '=', 'None', ')', ':', 'from', 'selenium', 'import', 'webdriver', 'if', 'driver_name', 'is', 'None', ':', 'driver_name', '=', "'firefox'", 'if', 'driver_name', '.', 'lower', '(', ')', '==', "'chrome'", ':', 'grab_selenium_chromedriver', '(', ')', 'return', 'webdriver', '.', 'Chrome', '(', ')', 'elif', 'driver_name', '.', 'lower', '(', ')', '==', "'firefox'", ':', '# grab_selenium_chromedriver()', 'return', 'webdriver', '.', 'Firefox', '(', ')', 'else', ':', 'raise', 'AssertionError', '(', "'unknown name = %r'", '%', '(', 'driver_name', ',', ')', ')']
pip install selenium -U
['pip', 'install', 'selenium', '-', 'U']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L678-L692
5,630
StackStorm/pybind
pybind/nos/v6_0_2f/port_profile/__init__.py
port_profile._set_security_profile
def _set_security_profile(self, v, load=False): """ Setter method for security_profile, mapped from YANG variable /port_profile/security_profile (container) If this variable is read-only (config: false) in the source YANG file, then _set_security_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_security_profile() directly. YANG Description: The Security profile. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=security_profile.security_profile, is_container='container', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'info': u'Security profile', u'callpoint': u'security-profile-config'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """security_profile must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=security_profile.security_profile, is_container='container', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'info': u'Security profile', u'callpoint': u'security-profile-config'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)""", }) self.__security_profile = t if hasattr(self, '_set'): self._set()
python
def _set_security_profile(self, v, load=False): """ Setter method for security_profile, mapped from YANG variable /port_profile/security_profile (container) If this variable is read-only (config: false) in the source YANG file, then _set_security_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_security_profile() directly. YANG Description: The Security profile. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=security_profile.security_profile, is_container='container', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'info': u'Security profile', u'callpoint': u'security-profile-config'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """security_profile must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=security_profile.security_profile, is_container='container', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'info': u'Security profile', u'callpoint': u'security-profile-config'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)""", }) self.__security_profile = t if hasattr(self, '_set'): self._set()
['def', '_set_security_profile', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'security_profile', '.', 'security_profile', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'True', ',', 'yang_name', '=', '"security-profile"', ',', 'rest_name', '=', '"security-profile"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'cli-full-command'", ':', 'None', ',', "u'cli-add-mode'", ':', 'None', ',', "u'cli-full-no'", ':', 'None', ',', "u'info'", ':', "u'Security profile'", ',', "u'callpoint'", ':', "u'security-profile-config'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-port-profile'", ',', 'defining_module', '=', "'brocade-port-profile'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""security_profile must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=security_profile.security_profile, is_container=\'container\', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'cli-add-mode\': None, u\'cli-full-no\': None, u\'info\': u\'Security profile\', u\'callpoint\': u\'security-profile-config\'}}, namespace=\'urn:brocade.com:mgmt:brocade-port-profile\', defining_module=\'brocade-port-profile\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__security_profile', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for security_profile, mapped from YANG variable /port_profile/security_profile (container) If this variable is read-only (config: false) in the source YANG file, then _set_security_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_security_profile() directly. YANG Description: The Security profile.
['Setter', 'method', 'for', 'security_profile', 'mapped', 'from', 'YANG', 'variable', '/', 'port_profile', '/', 'security_profile', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_security_profile', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_security_profile', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/port_profile/__init__.py#L298-L321
5,631
formiaczek/multi_key_dict
multi_key_dict.py
multi_key_dict.iterkeys
def iterkeys(self, key_type=None, return_all_keys=False): """ Returns an iterator over the dictionary's keys. @param key_type if specified, iterator for a dictionary of this type will be used. Otherwise (if not specified) tuples containing all (multiple) keys for this dictionary will be generated. @param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type.""" if(key_type is not None): the_key = str(key_type) if the_key in self.__dict__: for key in self.__dict__[the_key].keys(): if return_all_keys: yield self.__dict__[the_key][key] else: yield key else: for keys in self.items_dict.keys(): yield keys
python
def iterkeys(self, key_type=None, return_all_keys=False): """ Returns an iterator over the dictionary's keys. @param key_type if specified, iterator for a dictionary of this type will be used. Otherwise (if not specified) tuples containing all (multiple) keys for this dictionary will be generated. @param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type.""" if(key_type is not None): the_key = str(key_type) if the_key in self.__dict__: for key in self.__dict__[the_key].keys(): if return_all_keys: yield self.__dict__[the_key][key] else: yield key else: for keys in self.items_dict.keys(): yield keys
['def', 'iterkeys', '(', 'self', ',', 'key_type', '=', 'None', ',', 'return_all_keys', '=', 'False', ')', ':', 'if', '(', 'key_type', 'is', 'not', 'None', ')', ':', 'the_key', '=', 'str', '(', 'key_type', ')', 'if', 'the_key', 'in', 'self', '.', '__dict__', ':', 'for', 'key', 'in', 'self', '.', '__dict__', '[', 'the_key', ']', '.', 'keys', '(', ')', ':', 'if', 'return_all_keys', ':', 'yield', 'self', '.', '__dict__', '[', 'the_key', ']', '[', 'key', ']', 'else', ':', 'yield', 'key', 'else', ':', 'for', 'keys', 'in', 'self', '.', 'items_dict', '.', 'keys', '(', ')', ':', 'yield', 'keys']
Returns an iterator over the dictionary's keys. @param key_type if specified, iterator for a dictionary of this type will be used. Otherwise (if not specified) tuples containing all (multiple) keys for this dictionary will be generated. @param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type.
['Returns', 'an', 'iterator', 'over', 'the', 'dictionary', 's', 'keys', '.']
train
https://github.com/formiaczek/multi_key_dict/blob/320826cadad8ae8664042c627fa90f82ecd7b6b7/multi_key_dict.py#L201-L217
5,632
awslabs/sockeye
sockeye/encoder.py
Embedding.encode
def encode(self, data: mx.sym.Symbol, data_length: Optional[mx.sym.Symbol], seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]: """ Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len). """ factor_embeddings = [] # type: List[mx.sym.Symbol] if self.is_source: data, *data_factors = mx.sym.split(data=data, num_outputs=self.config.num_factors, axis=2, squeeze_axis=True, name=self.prefix + "factor_split") if self.config.factor_configs is not None: for i, (factor_data, factor_config, factor_weight) in enumerate(zip(data_factors, self.config.factor_configs, self.embed_factor_weights)): factor_embeddings.append(mx.sym.Embedding(data=factor_data, input_dim=factor_config.vocab_size, weight=factor_weight, output_dim=factor_config.num_embed, name=self.prefix + "factor%d_embed" % i)) embedding = mx.sym.Embedding(data=data, input_dim=self.config.vocab_size, weight=self.embed_weight, output_dim=self.config.num_embed, name=self.prefix + "embed") if self.config.factor_configs is not None: if self.config.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT: embedding = mx.sym.concat(embedding, *factor_embeddings, dim=2, name=self.prefix + "embed_plus_factors") else: embedding = mx.sym.add_n(embedding, *factor_embeddings, name=self.prefix + "embed_plus_factors") if self.config.dropout > 0: embedding = mx.sym.Dropout(data=embedding, p=self.config.dropout, name="source_embed_dropout") return embedding, data_length, seq_len
python
def encode(self, data: mx.sym.Symbol, data_length: Optional[mx.sym.Symbol], seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]: """ Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len). """ factor_embeddings = [] # type: List[mx.sym.Symbol] if self.is_source: data, *data_factors = mx.sym.split(data=data, num_outputs=self.config.num_factors, axis=2, squeeze_axis=True, name=self.prefix + "factor_split") if self.config.factor_configs is not None: for i, (factor_data, factor_config, factor_weight) in enumerate(zip(data_factors, self.config.factor_configs, self.embed_factor_weights)): factor_embeddings.append(mx.sym.Embedding(data=factor_data, input_dim=factor_config.vocab_size, weight=factor_weight, output_dim=factor_config.num_embed, name=self.prefix + "factor%d_embed" % i)) embedding = mx.sym.Embedding(data=data, input_dim=self.config.vocab_size, weight=self.embed_weight, output_dim=self.config.num_embed, name=self.prefix + "embed") if self.config.factor_configs is not None: if self.config.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT: embedding = mx.sym.concat(embedding, *factor_embeddings, dim=2, name=self.prefix + "embed_plus_factors") else: embedding = mx.sym.add_n(embedding, *factor_embeddings, name=self.prefix + "embed_plus_factors") if self.config.dropout > 0: embedding = mx.sym.Dropout(data=embedding, p=self.config.dropout, name="source_embed_dropout") return embedding, data_length, seq_len
['def', 'encode', '(', 'self', ',', 'data', ':', 'mx', '.', 'sym', '.', 'Symbol', ',', 'data_length', ':', 'Optional', '[', 'mx', '.', 'sym', '.', 'Symbol', ']', ',', 'seq_len', ':', 'int', ')', '->', 'Tuple', '[', 'mx', '.', 'sym', '.', 'Symbol', ',', 'mx', '.', 'sym', '.', 'Symbol', ',', 'int', ']', ':', 'factor_embeddings', '=', '[', ']', '# type: List[mx.sym.Symbol]', 'if', 'self', '.', 'is_source', ':', 'data', ',', '', '*', 'data_factors', '=', 'mx', '.', 'sym', '.', 'split', '(', 'data', '=', 'data', ',', 'num_outputs', '=', 'self', '.', 'config', '.', 'num_factors', ',', 'axis', '=', '2', ',', 'squeeze_axis', '=', 'True', ',', 'name', '=', 'self', '.', 'prefix', '+', '"factor_split"', ')', 'if', 'self', '.', 'config', '.', 'factor_configs', 'is', 'not', 'None', ':', 'for', 'i', ',', '(', 'factor_data', ',', 'factor_config', ',', 'factor_weight', ')', 'in', 'enumerate', '(', 'zip', '(', 'data_factors', ',', 'self', '.', 'config', '.', 'factor_configs', ',', 'self', '.', 'embed_factor_weights', ')', ')', ':', 'factor_embeddings', '.', 'append', '(', 'mx', '.', 'sym', '.', 'Embedding', '(', 'data', '=', 'factor_data', ',', 'input_dim', '=', 'factor_config', '.', 'vocab_size', ',', 'weight', '=', 'factor_weight', ',', 'output_dim', '=', 'factor_config', '.', 'num_embed', ',', 'name', '=', 'self', '.', 'prefix', '+', '"factor%d_embed"', '%', 'i', ')', ')', 'embedding', '=', 'mx', '.', 'sym', '.', 'Embedding', '(', 'data', '=', 'data', ',', 'input_dim', '=', 'self', '.', 'config', '.', 'vocab_size', ',', 'weight', '=', 'self', '.', 'embed_weight', ',', 'output_dim', '=', 'self', '.', 'config', '.', 'num_embed', ',', 'name', '=', 'self', '.', 'prefix', '+', '"embed"', ')', 'if', 'self', '.', 'config', '.', 'factor_configs', 'is', 'not', 'None', ':', 'if', 'self', '.', 'config', '.', 'source_factors_combine', '==', 'C', '.', 'SOURCE_FACTORS_COMBINE_CONCAT', ':', 'embedding', '=', 'mx', '.', 'sym', '.', 'concat', '(', 'embedding', ',', '*', 'factor_embeddings', ',', 'dim', '=', '2', ',', 'name', '=', 'self', '.', 'prefix', '+', '"embed_plus_factors"', ')', 'else', ':', 'embedding', '=', 'mx', '.', 'sym', '.', 'add_n', '(', 'embedding', ',', '*', 'factor_embeddings', ',', 'name', '=', 'self', '.', 'prefix', '+', '"embed_plus_factors"', ')', 'if', 'self', '.', 'config', '.', 'dropout', '>', '0', ':', 'embedding', '=', 'mx', '.', 'sym', '.', 'Dropout', '(', 'data', '=', 'embedding', ',', 'p', '=', 'self', '.', 'config', '.', 'dropout', ',', 'name', '=', '"source_embed_dropout"', ')', 'return', 'embedding', ',', 'data_length', ',', 'seq_len']
Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len).
['Encodes', 'data', 'given', 'sequence', 'lengths', 'of', 'individual', 'examples', 'and', 'maximum', 'sequence', 'length', '.']
train
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L387-L431
5,633
OnroerendErfgoed/language-tags
language_tags/Tag.py
Tag.errors
def errors(self): """ Get the errors of the tag. If invalid then the list will consist of errors containing each a code and message explaining the error. Each error also refers to the respective (sub)tag(s). :return: list of errors of the tag. If the tag is valid, it returns an empty list. """ errors = [] data = self.data error = self.error # Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn). if 'record' in data: if 'Deprecated' in data['record']: errors.append(error(self.ERR_DEPRECATED)) # Only check every subtag if the tag is not explicitly listed as grandfathered or redundant. return errors # Check that all subtag codes are meaningful. codes = data['tag'].split('-') for i, code in enumerate(codes): # Ignore anything after a singleton (break) if len(code) < 2: # Check that each private-use subtag is within the maximum allowed length. for code in codes[i + 1:]: if len(code) > 8: errors.append(error(self.ERR_TOO_LONG, code)) break if code not in index: errors.append(error(self.ERR_UNKNOWN, code)) # Continue to the next item. continue # Check that first tag is a language tag. subtags = self.subtags if not len(subtags): errors.append(error(self.ERR_NO_LANGUAGE)) return errors elif subtags[0].type != 'language': errors.append(error(self.ERR_NO_LANGUAGE)) return errors # Check for more than one of some types and for deprecation. found = dict(language=[], extlang=[], variant=[], script=[], region=[]) for subtag in subtags: type = subtag.type if subtag.deprecated: errors.append(error(self.ERR_SUBTAG_DEPRECATED, subtag)) if type in found: found[type].append(subtag) if 'language' == type: if len(found['language']) > 1: errors.append(error(self.ERR_EXTRA_LANGUAGE, subtag)) elif 'region' == type: if len(found['region']) > 1: errors.append(error(self.ERR_EXTRA_REGION, subtag)) elif 'extlang' == type: if len(found['extlang']) > 1: errors.append(error(self.ERR_EXTRA_EXTLANG, subtag)) elif 'script' == type: if len(found['script']) > 1: errors.append(error(self.ERR_EXTRA_SCRIPT, subtag)) # Check if script is same as language suppress-script. else: script = subtags[0].script if script: if script.format == subtag.format: errors.append(error(self.ERR_SUPPRESS_SCRIPT, subtag)) elif 'variant' == type: if len(found['variant']) > 1: for variant in found['variant']: if variant.format == subtag.format: errors.append(error(self.ERR_DUPLICATE_VARIANT, subtag)) break # Check for correct order. if len(subtags) > 1: priority = dict(language=4, extlang=5, script=6, region=7, variant=8) for i, subtag in enumerate(subtags[0:len(subtags)-1]): next = subtags[i + 1] if next: if priority[subtag.type] > priority[next.type]: errors.append(error(self.ERR_WRONG_ORDER, [subtag, next])) return errors
python
def errors(self): """ Get the errors of the tag. If invalid then the list will consist of errors containing each a code and message explaining the error. Each error also refers to the respective (sub)tag(s). :return: list of errors of the tag. If the tag is valid, it returns an empty list. """ errors = [] data = self.data error = self.error # Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn). if 'record' in data: if 'Deprecated' in data['record']: errors.append(error(self.ERR_DEPRECATED)) # Only check every subtag if the tag is not explicitly listed as grandfathered or redundant. return errors # Check that all subtag codes are meaningful. codes = data['tag'].split('-') for i, code in enumerate(codes): # Ignore anything after a singleton (break) if len(code) < 2: # Check that each private-use subtag is within the maximum allowed length. for code in codes[i + 1:]: if len(code) > 8: errors.append(error(self.ERR_TOO_LONG, code)) break if code not in index: errors.append(error(self.ERR_UNKNOWN, code)) # Continue to the next item. continue # Check that first tag is a language tag. subtags = self.subtags if not len(subtags): errors.append(error(self.ERR_NO_LANGUAGE)) return errors elif subtags[0].type != 'language': errors.append(error(self.ERR_NO_LANGUAGE)) return errors # Check for more than one of some types and for deprecation. found = dict(language=[], extlang=[], variant=[], script=[], region=[]) for subtag in subtags: type = subtag.type if subtag.deprecated: errors.append(error(self.ERR_SUBTAG_DEPRECATED, subtag)) if type in found: found[type].append(subtag) if 'language' == type: if len(found['language']) > 1: errors.append(error(self.ERR_EXTRA_LANGUAGE, subtag)) elif 'region' == type: if len(found['region']) > 1: errors.append(error(self.ERR_EXTRA_REGION, subtag)) elif 'extlang' == type: if len(found['extlang']) > 1: errors.append(error(self.ERR_EXTRA_EXTLANG, subtag)) elif 'script' == type: if len(found['script']) > 1: errors.append(error(self.ERR_EXTRA_SCRIPT, subtag)) # Check if script is same as language suppress-script. else: script = subtags[0].script if script: if script.format == subtag.format: errors.append(error(self.ERR_SUPPRESS_SCRIPT, subtag)) elif 'variant' == type: if len(found['variant']) > 1: for variant in found['variant']: if variant.format == subtag.format: errors.append(error(self.ERR_DUPLICATE_VARIANT, subtag)) break # Check for correct order. if len(subtags) > 1: priority = dict(language=4, extlang=5, script=6, region=7, variant=8) for i, subtag in enumerate(subtags[0:len(subtags)-1]): next = subtags[i + 1] if next: if priority[subtag.type] > priority[next.type]: errors.append(error(self.ERR_WRONG_ORDER, [subtag, next])) return errors
['def', 'errors', '(', 'self', ')', ':', 'errors', '=', '[', ']', 'data', '=', 'self', '.', 'data', 'error', '=', 'self', '.', 'error', '# Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn).', 'if', "'record'", 'in', 'data', ':', 'if', "'Deprecated'", 'in', 'data', '[', "'record'", ']', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_DEPRECATED', ')', ')', '# Only check every subtag if the tag is not explicitly listed as grandfathered or redundant.', 'return', 'errors', '# Check that all subtag codes are meaningful.', 'codes', '=', 'data', '[', "'tag'", ']', '.', 'split', '(', "'-'", ')', 'for', 'i', ',', 'code', 'in', 'enumerate', '(', 'codes', ')', ':', '# Ignore anything after a singleton (break)', 'if', 'len', '(', 'code', ')', '<', '2', ':', '# Check that each private-use subtag is within the maximum allowed length.', 'for', 'code', 'in', 'codes', '[', 'i', '+', '1', ':', ']', ':', 'if', 'len', '(', 'code', ')', '>', '8', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_TOO_LONG', ',', 'code', ')', ')', 'break', 'if', 'code', 'not', 'in', 'index', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_UNKNOWN', ',', 'code', ')', ')', '# Continue to the next item.', 'continue', '# Check that first tag is a language tag.', 'subtags', '=', 'self', '.', 'subtags', 'if', 'not', 'len', '(', 'subtags', ')', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_NO_LANGUAGE', ')', ')', 'return', 'errors', 'elif', 'subtags', '[', '0', ']', '.', 'type', '!=', "'language'", ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_NO_LANGUAGE', ')', ')', 'return', 'errors', '# Check for more than one of some types and for deprecation.', 'found', '=', 'dict', '(', 'language', '=', '[', ']', ',', 'extlang', '=', '[', ']', ',', 'variant', '=', '[', ']', ',', 'script', '=', '[', ']', ',', 'region', '=', '[', ']', ')', 'for', 'subtag', 'in', 'subtags', ':', 'type', '=', 'subtag', '.', 'type', 'if', 'subtag', '.', 'deprecated', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_SUBTAG_DEPRECATED', ',', 'subtag', ')', ')', 'if', 'type', 'in', 'found', ':', 'found', '[', 'type', ']', '.', 'append', '(', 'subtag', ')', 'if', "'language'", '==', 'type', ':', 'if', 'len', '(', 'found', '[', "'language'", ']', ')', '>', '1', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_EXTRA_LANGUAGE', ',', 'subtag', ')', ')', 'elif', "'region'", '==', 'type', ':', 'if', 'len', '(', 'found', '[', "'region'", ']', ')', '>', '1', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_EXTRA_REGION', ',', 'subtag', ')', ')', 'elif', "'extlang'", '==', 'type', ':', 'if', 'len', '(', 'found', '[', "'extlang'", ']', ')', '>', '1', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_EXTRA_EXTLANG', ',', 'subtag', ')', ')', 'elif', "'script'", '==', 'type', ':', 'if', 'len', '(', 'found', '[', "'script'", ']', ')', '>', '1', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_EXTRA_SCRIPT', ',', 'subtag', ')', ')', '# Check if script is same as language suppress-script.', 'else', ':', 'script', '=', 'subtags', '[', '0', ']', '.', 'script', 'if', 'script', ':', 'if', 'script', '.', 'format', '==', 'subtag', '.', 'format', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_SUPPRESS_SCRIPT', ',', 'subtag', ')', ')', 'elif', "'variant'", '==', 'type', ':', 'if', 'len', '(', 'found', '[', "'variant'", ']', ')', '>', '1', ':', 'for', 'variant', 'in', 'found', '[', "'variant'", ']', ':', 'if', 'variant', '.', 'format', '==', 'subtag', '.', 'format', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_DUPLICATE_VARIANT', ',', 'subtag', ')', ')', 'break', '# Check for correct order.', 'if', 'len', '(', 'subtags', ')', '>', '1', ':', 'priority', '=', 'dict', '(', 'language', '=', '4', ',', 'extlang', '=', '5', ',', 'script', '=', '6', ',', 'region', '=', '7', ',', 'variant', '=', '8', ')', 'for', 'i', ',', 'subtag', 'in', 'enumerate', '(', 'subtags', '[', '0', ':', 'len', '(', 'subtags', ')', '-', '1', ']', ')', ':', 'next', '=', 'subtags', '[', 'i', '+', '1', ']', 'if', 'next', ':', 'if', 'priority', '[', 'subtag', '.', 'type', ']', '>', 'priority', '[', 'next', '.', 'type', ']', ':', 'errors', '.', 'append', '(', 'error', '(', 'self', '.', 'ERR_WRONG_ORDER', ',', '[', 'subtag', ',', 'next', ']', ')', ')', 'return', 'errors']
Get the errors of the tag. If invalid then the list will consist of errors containing each a code and message explaining the error. Each error also refers to the respective (sub)tag(s). :return: list of errors of the tag. If the tag is valid, it returns an empty list.
['Get', 'the', 'errors', 'of', 'the', 'tag', '.', 'If', 'invalid', 'then', 'the', 'list', 'will', 'consist', 'of', 'errors', 'containing', 'each', 'a', 'code', 'and', 'message', 'explaining', 'the', 'error', '.', 'Each', 'error', 'also', 'refers', 'to', 'the', 'respective', '(', 'sub', ')', 'tag', '(', 's', ')', '.']
train
https://github.com/OnroerendErfgoed/language-tags/blob/acb91e5458d22617f344e2eefaba9a9865373fdd/language_tags/Tag.py#L263-L352
5,634
ArduPilot/MAVProxy
MAVProxy/modules/lib/wxhorizon_ui.py
HorizonFrame.createHeadingPointer
def createHeadingPointer(self): '''Creates the pointer for the current heading.''' self.headingTri = patches.RegularPolygon((0.0,0.80),3,0.05,color='k',zorder=4) self.axes.add_patch(self.headingTri) self.headingText = self.axes.text(0.0,0.675,'0',color='k',size=self.fontSize,horizontalalignment='center',verticalalignment='center',zorder=4)
python
def createHeadingPointer(self): '''Creates the pointer for the current heading.''' self.headingTri = patches.RegularPolygon((0.0,0.80),3,0.05,color='k',zorder=4) self.axes.add_patch(self.headingTri) self.headingText = self.axes.text(0.0,0.675,'0',color='k',size=self.fontSize,horizontalalignment='center',verticalalignment='center',zorder=4)
['def', 'createHeadingPointer', '(', 'self', ')', ':', 'self', '.', 'headingTri', '=', 'patches', '.', 'RegularPolygon', '(', '(', '0.0', ',', '0.80', ')', ',', '3', ',', '0.05', ',', 'color', '=', "'k'", ',', 'zorder', '=', '4', ')', 'self', '.', 'axes', '.', 'add_patch', '(', 'self', '.', 'headingTri', ')', 'self', '.', 'headingText', '=', 'self', '.', 'axes', '.', 'text', '(', '0.0', ',', '0.675', ',', "'0'", ',', 'color', '=', "'k'", ',', 'size', '=', 'self', '.', 'fontSize', ',', 'horizontalalignment', '=', "'center'", ',', 'verticalalignment', '=', "'center'", ',', 'zorder', '=', '4', ')']
Creates the pointer for the current heading.
['Creates', 'the', 'pointer', 'for', 'the', 'current', 'heading', '.']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L174-L178
5,635
google/python-gflags
gflags/__init__.py
register_multi_flags_validator
def register_multi_flags_validator(flag_names, multi_flags_checker, message='Flags validation failed', flag_values=FLAGS): """Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name. """ v = gflags_validators.MultiFlagsValidator( flag_names, multi_flags_checker, message) _add_validator(flag_values, v)
python
def register_multi_flags_validator(flag_names, multi_flags_checker, message='Flags validation failed', flag_values=FLAGS): """Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name. """ v = gflags_validators.MultiFlagsValidator( flag_names, multi_flags_checker, message) _add_validator(flag_values, v)
['def', 'register_multi_flags_validator', '(', 'flag_names', ',', 'multi_flags_checker', ',', 'message', '=', "'Flags validation failed'", ',', 'flag_values', '=', 'FLAGS', ')', ':', 'v', '=', 'gflags_validators', '.', 'MultiFlagsValidator', '(', 'flag_names', ',', 'multi_flags_checker', ',', 'message', ')', '_add_validator', '(', 'flag_values', ',', 'v', ')']
Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name.
['Adds', 'a', 'constraint', 'to', 'multiple', 'flags', '.']
train
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L187-L214
5,636
rene-aguirre/pywinusb
pywinusb/hid/core.py
hid_device_path_exists
def hid_device_path_exists(device_path, guid = None): """Test if required device_path is still valid (HID device connected to host) """ # expecing HID devices if not guid: guid = winapi.GetHidGuid() info_data = winapi.SP_DEVINFO_DATA() info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA) with winapi.DeviceInterfaceSetInfo(guid) as h_info: for interface_data in winapi.enum_device_interfaces(h_info, guid): test_device_path = winapi.get_device_path(h_info, interface_data, byref(info_data)) if test_device_path == device_path: return True # Not any device now with that path return False
python
def hid_device_path_exists(device_path, guid = None): """Test if required device_path is still valid (HID device connected to host) """ # expecing HID devices if not guid: guid = winapi.GetHidGuid() info_data = winapi.SP_DEVINFO_DATA() info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA) with winapi.DeviceInterfaceSetInfo(guid) as h_info: for interface_data in winapi.enum_device_interfaces(h_info, guid): test_device_path = winapi.get_device_path(h_info, interface_data, byref(info_data)) if test_device_path == device_path: return True # Not any device now with that path return False
['def', 'hid_device_path_exists', '(', 'device_path', ',', 'guid', '=', 'None', ')', ':', '# expecing HID devices\r', 'if', 'not', 'guid', ':', 'guid', '=', 'winapi', '.', 'GetHidGuid', '(', ')', 'info_data', '=', 'winapi', '.', 'SP_DEVINFO_DATA', '(', ')', 'info_data', '.', 'cb_size', '=', 'sizeof', '(', 'winapi', '.', 'SP_DEVINFO_DATA', ')', 'with', 'winapi', '.', 'DeviceInterfaceSetInfo', '(', 'guid', ')', 'as', 'h_info', ':', 'for', 'interface_data', 'in', 'winapi', '.', 'enum_device_interfaces', '(', 'h_info', ',', 'guid', ')', ':', 'test_device_path', '=', 'winapi', '.', 'get_device_path', '(', 'h_info', ',', 'interface_data', ',', 'byref', '(', 'info_data', ')', ')', 'if', 'test_device_path', '==', 'device_path', ':', 'return', 'True', '# Not any device now with that path\r', 'return', 'False']
Test if required device_path is still valid (HID device connected to host)
['Test', 'if', 'required', 'device_path', 'is', 'still', 'valid', '(', 'HID', 'device', 'connected', 'to', 'host', ')']
train
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L67-L86
5,637
LeastAuthority/txkube
src/txkube/_authentication.py
pick_cert_for_twisted
def pick_cert_for_twisted(netloc, possible): """ Pick the right client key/certificate to use for the given server and return it in the form Twisted wants. :param NetLocation netloc: The location of the server to consider. :param dict[TLSCredentials] possible: The available credentials from which to choose. :return: A two-tuple. If no credentials were found, the elements are ``None`` and ``[]``. Otherwise, the first element is a ``twisted.internet.ssl.PrivateCertificate`` instance representing the client certificate to use and the second element is a ``tuple`` of ``twisted.internet.ssl.Certificate`` instances representing the rest of the chain necessary to validate the client certificate. """ try: creds = possible[netloc] except KeyError: return (None, ()) key = ssl.KeyPair.load(creds.key.as_bytes(), FILETYPE_PEM) return ( ssl.PrivateCertificate.load( creds.chain.certificates[0].as_bytes(), key, FILETYPE_PEM, ), tuple( ssl.Certificate.load(cert.as_bytes(), FILETYPE_PEM) for cert in creds.chain.certificates[1:] ), )
python
def pick_cert_for_twisted(netloc, possible): """ Pick the right client key/certificate to use for the given server and return it in the form Twisted wants. :param NetLocation netloc: The location of the server to consider. :param dict[TLSCredentials] possible: The available credentials from which to choose. :return: A two-tuple. If no credentials were found, the elements are ``None`` and ``[]``. Otherwise, the first element is a ``twisted.internet.ssl.PrivateCertificate`` instance representing the client certificate to use and the second element is a ``tuple`` of ``twisted.internet.ssl.Certificate`` instances representing the rest of the chain necessary to validate the client certificate. """ try: creds = possible[netloc] except KeyError: return (None, ()) key = ssl.KeyPair.load(creds.key.as_bytes(), FILETYPE_PEM) return ( ssl.PrivateCertificate.load( creds.chain.certificates[0].as_bytes(), key, FILETYPE_PEM, ), tuple( ssl.Certificate.load(cert.as_bytes(), FILETYPE_PEM) for cert in creds.chain.certificates[1:] ), )
['def', 'pick_cert_for_twisted', '(', 'netloc', ',', 'possible', ')', ':', 'try', ':', 'creds', '=', 'possible', '[', 'netloc', ']', 'except', 'KeyError', ':', 'return', '(', 'None', ',', '(', ')', ')', 'key', '=', 'ssl', '.', 'KeyPair', '.', 'load', '(', 'creds', '.', 'key', '.', 'as_bytes', '(', ')', ',', 'FILETYPE_PEM', ')', 'return', '(', 'ssl', '.', 'PrivateCertificate', '.', 'load', '(', 'creds', '.', 'chain', '.', 'certificates', '[', '0', ']', '.', 'as_bytes', '(', ')', ',', 'key', ',', 'FILETYPE_PEM', ',', ')', ',', 'tuple', '(', 'ssl', '.', 'Certificate', '.', 'load', '(', 'cert', '.', 'as_bytes', '(', ')', ',', 'FILETYPE_PEM', ')', 'for', 'cert', 'in', 'creds', '.', 'chain', '.', 'certificates', '[', '1', ':', ']', ')', ',', ')']
Pick the right client key/certificate to use for the given server and return it in the form Twisted wants. :param NetLocation netloc: The location of the server to consider. :param dict[TLSCredentials] possible: The available credentials from which to choose. :return: A two-tuple. If no credentials were found, the elements are ``None`` and ``[]``. Otherwise, the first element is a ``twisted.internet.ssl.PrivateCertificate`` instance representing the client certificate to use and the second element is a ``tuple`` of ``twisted.internet.ssl.Certificate`` instances representing the rest of the chain necessary to validate the client certificate.
['Pick', 'the', 'right', 'client', 'key', '/', 'certificate', 'to', 'use', 'for', 'the', 'given', 'server', 'and', 'return', 'it', 'in', 'the', 'form', 'Twisted', 'wants', '.']
train
https://github.com/LeastAuthority/txkube/blob/a7e555d00535ff787d4b1204c264780da40cf736/src/txkube/_authentication.py#L122-L153
5,638
deepmind/pysc2
pysc2/bin/gen_actions.py
get_data
def get_data(): """Retrieve static data from the game.""" run_config = run_configs.get() with run_config.start(want_rgb=False) as controller: m = maps.get("Sequencer") # Arbitrary ladder map. create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap( map_path=m.path, map_data=m.data(run_config))) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy) join = sc_pb.RequestJoinGame(race=sc_common.Random, options=sc_pb.InterfaceOptions(raw=True)) controller.create_game(create) controller.join_game(join) return controller.data()
python
def get_data(): """Retrieve static data from the game.""" run_config = run_configs.get() with run_config.start(want_rgb=False) as controller: m = maps.get("Sequencer") # Arbitrary ladder map. create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap( map_path=m.path, map_data=m.data(run_config))) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy) join = sc_pb.RequestJoinGame(race=sc_common.Random, options=sc_pb.InterfaceOptions(raw=True)) controller.create_game(create) controller.join_game(join) return controller.data()
['def', 'get_data', '(', ')', ':', 'run_config', '=', 'run_configs', '.', 'get', '(', ')', 'with', 'run_config', '.', 'start', '(', 'want_rgb', '=', 'False', ')', 'as', 'controller', ':', 'm', '=', 'maps', '.', 'get', '(', '"Sequencer"', ')', '# Arbitrary ladder map.', 'create', '=', 'sc_pb', '.', 'RequestCreateGame', '(', 'local_map', '=', 'sc_pb', '.', 'LocalMap', '(', 'map_path', '=', 'm', '.', 'path', ',', 'map_data', '=', 'm', '.', 'data', '(', 'run_config', ')', ')', ')', 'create', '.', 'player_setup', '.', 'add', '(', 'type', '=', 'sc_pb', '.', 'Participant', ')', 'create', '.', 'player_setup', '.', 'add', '(', 'type', '=', 'sc_pb', '.', 'Computer', ',', 'race', '=', 'sc_common', '.', 'Random', ',', 'difficulty', '=', 'sc_pb', '.', 'VeryEasy', ')', 'join', '=', 'sc_pb', '.', 'RequestJoinGame', '(', 'race', '=', 'sc_common', '.', 'Random', ',', 'options', '=', 'sc_pb', '.', 'InterfaceOptions', '(', 'raw', '=', 'True', ')', ')', 'controller', '.', 'create_game', '(', 'create', ')', 'controller', '.', 'join_game', '(', 'join', ')', 'return', 'controller', '.', 'data', '(', ')']
Retrieve static data from the game.
['Retrieve', 'static', 'data', 'from', 'the', 'game', '.']
train
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/bin/gen_actions.py#L39-L55
5,639
romanz/trezor-agent
libagent/device/interface.py
identity_to_string
def identity_to_string(identity_dict): """Dump Identity dictionary into its string representation.""" result = [] if identity_dict.get('proto'): result.append(identity_dict['proto'] + '://') if identity_dict.get('user'): result.append(identity_dict['user'] + '@') result.append(identity_dict['host']) if identity_dict.get('port'): result.append(':' + identity_dict['port']) if identity_dict.get('path'): result.append(identity_dict['path']) log.debug('identity parts: %s', result) return ''.join(result)
python
def identity_to_string(identity_dict): """Dump Identity dictionary into its string representation.""" result = [] if identity_dict.get('proto'): result.append(identity_dict['proto'] + '://') if identity_dict.get('user'): result.append(identity_dict['user'] + '@') result.append(identity_dict['host']) if identity_dict.get('port'): result.append(':' + identity_dict['port']) if identity_dict.get('path'): result.append(identity_dict['path']) log.debug('identity parts: %s', result) return ''.join(result)
['def', 'identity_to_string', '(', 'identity_dict', ')', ':', 'result', '=', '[', ']', 'if', 'identity_dict', '.', 'get', '(', "'proto'", ')', ':', 'result', '.', 'append', '(', 'identity_dict', '[', "'proto'", ']', '+', "'://'", ')', 'if', 'identity_dict', '.', 'get', '(', "'user'", ')', ':', 'result', '.', 'append', '(', 'identity_dict', '[', "'user'", ']', '+', "'@'", ')', 'result', '.', 'append', '(', 'identity_dict', '[', "'host'", ']', ')', 'if', 'identity_dict', '.', 'get', '(', "'port'", ')', ':', 'result', '.', 'append', '(', "':'", '+', 'identity_dict', '[', "'port'", ']', ')', 'if', 'identity_dict', '.', 'get', '(', "'path'", ')', ':', 'result', '.', 'append', '(', 'identity_dict', '[', "'path'", ']', ')', 'log', '.', 'debug', '(', "'identity parts: %s'", ',', 'result', ')', 'return', "''", '.', 'join', '(', 'result', ')']
Dump Identity dictionary into its string representation.
['Dump', 'Identity', 'dictionary', 'into', 'its', 'string', 'representation', '.']
train
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/interface.py#L34-L47
5,640
dustin/twitty-twister
twittytwister/twitter.py
TwitterMonitor._state_stopped
def _state_stopped(self): """ The service is not running. This is the initial state, and the state after L{stopService} was called. To get out of this state, call L{startService}. If there is a current connection, we disconnect. """ if self._reconnectDelayedCall: self._reconnectDelayedCall.cancel() self._reconnectDelayedCall = None self.loseConnection()
python
def _state_stopped(self): """ The service is not running. This is the initial state, and the state after L{stopService} was called. To get out of this state, call L{startService}. If there is a current connection, we disconnect. """ if self._reconnectDelayedCall: self._reconnectDelayedCall.cancel() self._reconnectDelayedCall = None self.loseConnection()
['def', '_state_stopped', '(', 'self', ')', ':', 'if', 'self', '.', '_reconnectDelayedCall', ':', 'self', '.', '_reconnectDelayedCall', '.', 'cancel', '(', ')', 'self', '.', '_reconnectDelayedCall', '=', 'None', 'self', '.', 'loseConnection', '(', ')']
The service is not running. This is the initial state, and the state after L{stopService} was called. To get out of this state, call L{startService}. If there is a current connection, we disconnect.
['The', 'service', 'is', 'not', 'running', '.']
train
https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1048-L1059
5,641
jalanb/pysyte
pysyte/bash/git.py
log
def log(args, number=None, oneline=False, quiet=False): """Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option """ options = ' '.join([ number and str('-n %s' % number) or '', oneline and '--oneline' or '' ]) try: return run('log %s %s' % (options, args), quiet=quiet) except UnknownRevision: return ''
python
def log(args, number=None, oneline=False, quiet=False): """Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option """ options = ' '.join([ number and str('-n %s' % number) or '', oneline and '--oneline' or '' ]) try: return run('log %s %s' % (options, args), quiet=quiet) except UnknownRevision: return ''
['def', 'log', '(', 'args', ',', 'number', '=', 'None', ',', 'oneline', '=', 'False', ',', 'quiet', '=', 'False', ')', ':', 'options', '=', "' '", '.', 'join', '(', '[', 'number', 'and', 'str', '(', "'-n %s'", '%', 'number', ')', 'or', "''", ',', 'oneline', 'and', "'--oneline'", 'or', "''", ']', ')', 'try', ':', 'return', 'run', '(', "'log %s %s'", '%', '(', 'options', ',', 'args', ')', ',', 'quiet', '=', 'quiet', ')', 'except', 'UnknownRevision', ':', 'return', "''"]
Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option
['Run', 'a', 'git', 'log', '...', 'command', 'and', 'return', 'stdout']
train
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/bash/git.py#L217-L232
5,642
tchellomello/python-arlo
pyarlo/__init__.py
PyArlo.update
def update(self, update_cameras=False, update_base_station=False): """Refresh object.""" self._authenticate() # update attributes in all cameras to avoid duped queries if update_cameras: url = DEVICES_ENDPOINT response = self.query(url) if not response or not isinstance(response, dict): return for camera in self.cameras: for dev_info in response.get('data'): if dev_info.get('deviceName') == camera.name: _LOGGER.debug("Refreshing %s attributes", camera.name) camera.attrs = dev_info # preload cached videos # the user is still able to force a new query by # calling the Arlo.video() camera.make_video_cache() # force update base_station if update_base_station: for base in self.base_stations: base.update()
python
def update(self, update_cameras=False, update_base_station=False): """Refresh object.""" self._authenticate() # update attributes in all cameras to avoid duped queries if update_cameras: url = DEVICES_ENDPOINT response = self.query(url) if not response or not isinstance(response, dict): return for camera in self.cameras: for dev_info in response.get('data'): if dev_info.get('deviceName') == camera.name: _LOGGER.debug("Refreshing %s attributes", camera.name) camera.attrs = dev_info # preload cached videos # the user is still able to force a new query by # calling the Arlo.video() camera.make_video_cache() # force update base_station if update_base_station: for base in self.base_stations: base.update()
['def', 'update', '(', 'self', ',', 'update_cameras', '=', 'False', ',', 'update_base_station', '=', 'False', ')', ':', 'self', '.', '_authenticate', '(', ')', '# update attributes in all cameras to avoid duped queries', 'if', 'update_cameras', ':', 'url', '=', 'DEVICES_ENDPOINT', 'response', '=', 'self', '.', 'query', '(', 'url', ')', 'if', 'not', 'response', 'or', 'not', 'isinstance', '(', 'response', ',', 'dict', ')', ':', 'return', 'for', 'camera', 'in', 'self', '.', 'cameras', ':', 'for', 'dev_info', 'in', 'response', '.', 'get', '(', "'data'", ')', ':', 'if', 'dev_info', '.', 'get', '(', "'deviceName'", ')', '==', 'camera', '.', 'name', ':', '_LOGGER', '.', 'debug', '(', '"Refreshing %s attributes"', ',', 'camera', '.', 'name', ')', 'camera', '.', 'attrs', '=', 'dev_info', '# preload cached videos', '# the user is still able to force a new query by', '# calling the Arlo.video()', 'camera', '.', 'make_video_cache', '(', ')', '# force update base_station', 'if', 'update_base_station', ':', 'for', 'base', 'in', 'self', '.', 'base_stations', ':', 'base', '.', 'update', '(', ')']
Refresh object.
['Refresh', 'object', '.']
train
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L249-L274
5,643
tomi77/python-t77-date
t77_date/datetime.py
end_of_month
def end_of_month(val): """ Return a new datetime.datetime object with values that represent a end of a month. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime """ if type(val) == date: val = datetime.fromordinal(val.toordinal()) if val.month == 12: return start_of_month(val).replace(year=val.year + 1, month=1) \ - timedelta(microseconds=1) else: return start_of_month(val).replace(month=val.month + 1) \ - timedelta(microseconds=1)
python
def end_of_month(val): """ Return a new datetime.datetime object with values that represent a end of a month. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime """ if type(val) == date: val = datetime.fromordinal(val.toordinal()) if val.month == 12: return start_of_month(val).replace(year=val.year + 1, month=1) \ - timedelta(microseconds=1) else: return start_of_month(val).replace(month=val.month + 1) \ - timedelta(microseconds=1)
['def', 'end_of_month', '(', 'val', ')', ':', 'if', 'type', '(', 'val', ')', '==', 'date', ':', 'val', '=', 'datetime', '.', 'fromordinal', '(', 'val', '.', 'toordinal', '(', ')', ')', 'if', 'val', '.', 'month', '==', '12', ':', 'return', 'start_of_month', '(', 'val', ')', '.', 'replace', '(', 'year', '=', 'val', '.', 'year', '+', '1', ',', 'month', '=', '1', ')', '-', 'timedelta', '(', 'microseconds', '=', '1', ')', 'else', ':', 'return', 'start_of_month', '(', 'val', ')', '.', 'replace', '(', 'month', '=', 'val', '.', 'month', '+', '1', ')', '-', 'timedelta', '(', 'microseconds', '=', '1', ')']
Return a new datetime.datetime object with values that represent a end of a month. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime
['Return', 'a', 'new', 'datetime', '.', 'datetime', 'object', 'with', 'values', 'that', 'represent', 'a', 'end', 'of', 'a', 'month', '.', ':', 'param', 'val', ':', 'Date', 'to', '...', ':', 'type', 'val', ':', 'datetime', '.', 'datetime', '|', 'datetime', '.', 'date', ':', 'rtype', ':', 'datetime', '.', 'datetime']
train
https://github.com/tomi77/python-t77-date/blob/b4b12ce6a02884fb62460f6b9068e7fa28979fce/t77_date/datetime.py#L45-L60
5,644
fstab50/metal
metal/cli.py
SetLogging.set
def set(self, mode, disable): """ create logger object, enable or disable logging """ global logger try: if logger: if disable: logger.disabled = True else: if mode in ('STREAM', 'FILE'): logger = logd.getLogger(mode, __version__) except Exception as e: logger.exception( '%s: Problem incurred during logging setup' % inspect.stack()[0][3] ) return False return True
python
def set(self, mode, disable): """ create logger object, enable or disable logging """ global logger try: if logger: if disable: logger.disabled = True else: if mode in ('STREAM', 'FILE'): logger = logd.getLogger(mode, __version__) except Exception as e: logger.exception( '%s: Problem incurred during logging setup' % inspect.stack()[0][3] ) return False return True
['def', 'set', '(', 'self', ',', 'mode', ',', 'disable', ')', ':', 'global', 'logger', 'try', ':', 'if', 'logger', ':', 'if', 'disable', ':', 'logger', '.', 'disabled', '=', 'True', 'else', ':', 'if', 'mode', 'in', '(', "'STREAM'", ',', "'FILE'", ')', ':', 'logger', '=', 'logd', '.', 'getLogger', '(', 'mode', ',', '__version__', ')', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'exception', '(', "'%s: Problem incurred during logging setup'", '%', 'inspect', '.', 'stack', '(', ')', '[', '0', ']', '[', '3', ']', ')', 'return', 'False', 'return', 'True']
create logger object, enable or disable logging
['create', 'logger', 'object', 'enable', 'or', 'disable', 'logging']
train
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/cli.py#L123-L138
5,645
mitsei/dlkit
dlkit/records/osid/base_records.py
FilesFormRecord._init_metadata
def _init_metadata(self): """stub""" self._files_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'files'), 'element_label': 'Files', 'instructions': 'enter a file id with optional label', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] } self._file_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'file'), 'element_label': 'File', 'instructions': 'accepts an Asset Id', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] } self._label_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'label'), 'element_label': 'Label', 'instructions': 'enter a string', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [str(ObjectId())], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 128, 'string_set': [] }
python
def _init_metadata(self): """stub""" self._files_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'files'), 'element_label': 'Files', 'instructions': 'enter a file id with optional label', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] } self._file_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'file'), 'element_label': 'File', 'instructions': 'accepts an Asset Id', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] } self._label_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'label'), 'element_label': 'Label', 'instructions': 'enter a string', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [str(ObjectId())], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 128, 'string_set': [] }
['def', '_init_metadata', '(', 'self', ')', ':', 'self', '.', '_files_metadata', '=', '{', "'element_id'", ':', 'Id', '(', 'self', '.', 'my_osid_object_form', '.', '_authority', ',', 'self', '.', 'my_osid_object_form', '.', '_namespace', ',', "'files'", ')', ',', "'element_label'", ':', "'Files'", ',', "'instructions'", ':', "'enter a file id with optional label'", ',', "'required'", ':', 'False', ',', "'read_only'", ':', 'False', ',', "'linked'", ':', 'False', ',', "'array'", ':', 'False', ',', "'default_object_values'", ':', '[', '{', '}', ']', ',', "'syntax'", ':', "'OBJECT'", ',', "'object_set'", ':', '[', ']', '}', 'self', '.', '_file_metadata', '=', '{', "'element_id'", ':', 'Id', '(', 'self', '.', 'my_osid_object_form', '.', '_authority', ',', 'self', '.', 'my_osid_object_form', '.', '_namespace', ',', "'file'", ')', ',', "'element_label'", ':', "'File'", ',', "'instructions'", ':', "'accepts an Asset Id'", ',', "'required'", ':', 'True', ',', "'read_only'", ':', 'False', ',', "'linked'", ':', 'False', ',', "'array'", ':', 'False', ',', "'default_id_values'", ':', '[', "''", ']', ',', "'syntax'", ':', "'ID'", ',', "'id_set'", ':', '[', ']', '}', 'self', '.', '_label_metadata', '=', '{', "'element_id'", ':', 'Id', '(', 'self', '.', 'my_osid_object_form', '.', '_authority', ',', 'self', '.', 'my_osid_object_form', '.', '_namespace', ',', "'label'", ')', ',', "'element_label'", ':', "'Label'", ',', "'instructions'", ':', "'enter a string'", ',', "'required'", ':', 'False', ',', "'read_only'", ':', 'False', ',', "'linked'", ':', 'False', ',', "'array'", ':', 'False', ',', "'default_string_values'", ':', '[', 'str', '(', 'ObjectId', '(', ')', ')', ']', ',', "'syntax'", ':', "'STRING'", ',', "'minimum_string_length'", ':', '0', ',', "'maximum_string_length'", ':', '128', ',', "'string_set'", ':', '[', ']', '}']
stub
['stub']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L1949-L1994
5,646
PMBio/limix-backup
limix/mtSet/core/preprocessCore.py
computePCsPlink
def computePCsPlink(plink_path,k,out_dir,bfile,ffile): """ computing the covariance matrix via plink """ print("Using plink to compute principal components") cmd = '%s --bfile %s --pca %d '%(plink_path,bfile,k) cmd+= '--out %s'%(os.path.join(out_dir,'plink')) subprocess.call(cmd,shell=True) plink_fn = os.path.join(out_dir, 'plink.eigenvec') M = sp.loadtxt(plink_fn,dtype=str) U = sp.array(M[:,2:],dtype=float) U-= U.mean(0) U/= U.std(0) sp.savetxt(ffile,U)
python
def computePCsPlink(plink_path,k,out_dir,bfile,ffile): """ computing the covariance matrix via plink """ print("Using plink to compute principal components") cmd = '%s --bfile %s --pca %d '%(plink_path,bfile,k) cmd+= '--out %s'%(os.path.join(out_dir,'plink')) subprocess.call(cmd,shell=True) plink_fn = os.path.join(out_dir, 'plink.eigenvec') M = sp.loadtxt(plink_fn,dtype=str) U = sp.array(M[:,2:],dtype=float) U-= U.mean(0) U/= U.std(0) sp.savetxt(ffile,U)
['def', 'computePCsPlink', '(', 'plink_path', ',', 'k', ',', 'out_dir', ',', 'bfile', ',', 'ffile', ')', ':', 'print', '(', '"Using plink to compute principal components"', ')', 'cmd', '=', "'%s --bfile %s --pca %d '", '%', '(', 'plink_path', ',', 'bfile', ',', 'k', ')', 'cmd', '+=', "'--out %s'", '%', '(', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', "'plink'", ')', ')', 'subprocess', '.', 'call', '(', 'cmd', ',', 'shell', '=', 'True', ')', 'plink_fn', '=', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', "'plink.eigenvec'", ')', 'M', '=', 'sp', '.', 'loadtxt', '(', 'plink_fn', ',', 'dtype', '=', 'str', ')', 'U', '=', 'sp', '.', 'array', '(', 'M', '[', ':', ',', '2', ':', ']', ',', 'dtype', '=', 'float', ')', 'U', '-=', 'U', '.', 'mean', '(', '0', ')', 'U', '/=', 'U', '.', 'std', '(', '0', ')', 'sp', '.', 'savetxt', '(', 'ffile', ',', 'U', ')']
computing the covariance matrix via plink
['computing', 'the', 'covariance', 'matrix', 'via', 'plink']
train
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/mtSet/core/preprocessCore.py#L56-L69
5,647
sassoftware/saspy
saspy/sasml.py
SASml.hpforest
def hpforest(self, data: ['SASdata', str] = None, freq: str = None, id: str = None, input: [str, list, dict] = None, save: str = None, score: [str, bool, 'SASdata'] = True, target: [str, list, dict] = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the HPFOREST procedure Documentation link: https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf :param data: SASdata object or string. This parameter is required. :parm freq: The freq variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm save: The save variable can only be a string type. :parm score: The score variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
python
def hpforest(self, data: ['SASdata', str] = None, freq: str = None, id: str = None, input: [str, list, dict] = None, save: str = None, score: [str, bool, 'SASdata'] = True, target: [str, list, dict] = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the HPFOREST procedure Documentation link: https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf :param data: SASdata object or string. This parameter is required. :parm freq: The freq variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm save: The save variable can only be a string type. :parm score: The score variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
['def', 'hpforest', '(', 'self', ',', 'data', ':', '[', "'SASdata'", ',', 'str', ']', '=', 'None', ',', 'freq', ':', 'str', '=', 'None', ',', 'id', ':', 'str', '=', 'None', ',', 'input', ':', '[', 'str', ',', 'list', ',', 'dict', ']', '=', 'None', ',', 'save', ':', 'str', '=', 'None', ',', 'score', ':', '[', 'str', ',', 'bool', ',', "'SASdata'", ']', '=', 'True', ',', 'target', ':', '[', 'str', ',', 'list', ',', 'dict', ']', '=', 'None', ',', 'procopts', ':', 'str', '=', 'None', ',', 'stmtpassthrough', ':', 'str', '=', 'None', ',', '*', '*', 'kwargs', ':', 'dict', ')', '->', "'SASresults'", ':']
Python method to call the HPFOREST procedure Documentation link: https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf :param data: SASdata object or string. This parameter is required. :parm freq: The freq variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm save: The save variable can only be a string type. :parm score: The score variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object
['Python', 'method', 'to', 'call', 'the', 'HPFOREST', 'procedure']
train
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasml.py#L89-L115
5,648
Jaza/flask-thumbnails-s3
flask_thumbnails_s3/__init__.py
Thumbnail._thumbnail_s3
def _thumbnail_s3(self, original_filename, thumb_filename, thumb_size, thumb_url, bucket_name, crop=None, bg=None, quality=85): """Finds or creates a thumbnail for the specified image on Amazon S3.""" scheme = self.app.config.get('THUMBNAIL_S3_USE_HTTPS') and 'https' or 'http' thumb_url_full = url_for_s3( 'static', bucket_name=self.app.config.get('THUMBNAIL_S3_BUCKET_NAME'), filename=thumb_url, scheme=scheme) original_url_full = url_for_s3( 'static', bucket_name=bucket_name, filename=self._get_s3_path(original_filename).replace('static/', ''), scheme=scheme) # Return the thumbnail URL now if it already exists on S3. # HTTP HEAD request saves us actually downloading the image # for this check. # Thanks to: # http://stackoverflow.com/a/16778749/2066849 try: resp = httplib2.Http().request(thumb_url_full, 'HEAD') resp_status = int(resp[0]['status']) assert(resp_status < 400) return thumb_url_full except Exception: pass # Thanks to: # http://stackoverflow.com/a/12020860/2066849 try: fd = urllib.urlopen(original_url_full) temp_file = BytesIO(fd.read()) image = Image.open(temp_file) except Exception: return '' img = self._thumbnail_resize(image, thumb_size, crop=crop, bg=bg) temp_file = BytesIO() img.save(temp_file, image.format, quality=quality) conn = S3Connection(self.app.config.get('THUMBNAIL_S3_ACCESS_KEY_ID'), self.app.config.get('THUMBNAIL_S3_ACCESS_KEY_SECRET')) bucket = conn.get_bucket(self.app.config.get('THUMBNAIL_S3_BUCKET_NAME')) path = self._get_s3_path(thumb_filename) k = bucket.new_key(path) try: k.set_contents_from_string(temp_file.getvalue()) k.set_acl(self.app.config.get('THUMBNAIL_S3_ACL', 'public-read')) except S3ResponseError: return '' return thumb_url_full
python
def _thumbnail_s3(self, original_filename, thumb_filename, thumb_size, thumb_url, bucket_name, crop=None, bg=None, quality=85): """Finds or creates a thumbnail for the specified image on Amazon S3.""" scheme = self.app.config.get('THUMBNAIL_S3_USE_HTTPS') and 'https' or 'http' thumb_url_full = url_for_s3( 'static', bucket_name=self.app.config.get('THUMBNAIL_S3_BUCKET_NAME'), filename=thumb_url, scheme=scheme) original_url_full = url_for_s3( 'static', bucket_name=bucket_name, filename=self._get_s3_path(original_filename).replace('static/', ''), scheme=scheme) # Return the thumbnail URL now if it already exists on S3. # HTTP HEAD request saves us actually downloading the image # for this check. # Thanks to: # http://stackoverflow.com/a/16778749/2066849 try: resp = httplib2.Http().request(thumb_url_full, 'HEAD') resp_status = int(resp[0]['status']) assert(resp_status < 400) return thumb_url_full except Exception: pass # Thanks to: # http://stackoverflow.com/a/12020860/2066849 try: fd = urllib.urlopen(original_url_full) temp_file = BytesIO(fd.read()) image = Image.open(temp_file) except Exception: return '' img = self._thumbnail_resize(image, thumb_size, crop=crop, bg=bg) temp_file = BytesIO() img.save(temp_file, image.format, quality=quality) conn = S3Connection(self.app.config.get('THUMBNAIL_S3_ACCESS_KEY_ID'), self.app.config.get('THUMBNAIL_S3_ACCESS_KEY_SECRET')) bucket = conn.get_bucket(self.app.config.get('THUMBNAIL_S3_BUCKET_NAME')) path = self._get_s3_path(thumb_filename) k = bucket.new_key(path) try: k.set_contents_from_string(temp_file.getvalue()) k.set_acl(self.app.config.get('THUMBNAIL_S3_ACL', 'public-read')) except S3ResponseError: return '' return thumb_url_full
['def', '_thumbnail_s3', '(', 'self', ',', 'original_filename', ',', 'thumb_filename', ',', 'thumb_size', ',', 'thumb_url', ',', 'bucket_name', ',', 'crop', '=', 'None', ',', 'bg', '=', 'None', ',', 'quality', '=', '85', ')', ':', 'scheme', '=', 'self', '.', 'app', '.', 'config', '.', 'get', '(', "'THUMBNAIL_S3_USE_HTTPS'", ')', 'and', "'https'", 'or', "'http'", 'thumb_url_full', '=', 'url_for_s3', '(', "'static'", ',', 'bucket_name', '=', 'self', '.', 'app', '.', 'config', '.', 'get', '(', "'THUMBNAIL_S3_BUCKET_NAME'", ')', ',', 'filename', '=', 'thumb_url', ',', 'scheme', '=', 'scheme', ')', 'original_url_full', '=', 'url_for_s3', '(', "'static'", ',', 'bucket_name', '=', 'bucket_name', ',', 'filename', '=', 'self', '.', '_get_s3_path', '(', 'original_filename', ')', '.', 'replace', '(', "'static/'", ',', "''", ')', ',', 'scheme', '=', 'scheme', ')', '# Return the thumbnail URL now if it already exists on S3.', '# HTTP HEAD request saves us actually downloading the image', '# for this check.', '# Thanks to:', '# http://stackoverflow.com/a/16778749/2066849', 'try', ':', 'resp', '=', 'httplib2', '.', 'Http', '(', ')', '.', 'request', '(', 'thumb_url_full', ',', "'HEAD'", ')', 'resp_status', '=', 'int', '(', 'resp', '[', '0', ']', '[', "'status'", ']', ')', 'assert', '(', 'resp_status', '<', '400', ')', 'return', 'thumb_url_full', 'except', 'Exception', ':', 'pass', '# Thanks to:', '# http://stackoverflow.com/a/12020860/2066849', 'try', ':', 'fd', '=', 'urllib', '.', 'urlopen', '(', 'original_url_full', ')', 'temp_file', '=', 'BytesIO', '(', 'fd', '.', 'read', '(', ')', ')', 'image', '=', 'Image', '.', 'open', '(', 'temp_file', ')', 'except', 'Exception', ':', 'return', "''", 'img', '=', 'self', '.', '_thumbnail_resize', '(', 'image', ',', 'thumb_size', ',', 'crop', '=', 'crop', ',', 'bg', '=', 'bg', ')', 'temp_file', '=', 'BytesIO', '(', ')', 'img', '.', 'save', '(', 'temp_file', ',', 'image', '.', 'format', ',', 'quality', '=', 'quality', ')', 'conn', '=', 'S3Connection', '(', 'self', '.', 'app', '.', 'config', '.', 'get', '(', "'THUMBNAIL_S3_ACCESS_KEY_ID'", ')', ',', 'self', '.', 'app', '.', 'config', '.', 'get', '(', "'THUMBNAIL_S3_ACCESS_KEY_SECRET'", ')', ')', 'bucket', '=', 'conn', '.', 'get_bucket', '(', 'self', '.', 'app', '.', 'config', '.', 'get', '(', "'THUMBNAIL_S3_BUCKET_NAME'", ')', ')', 'path', '=', 'self', '.', '_get_s3_path', '(', 'thumb_filename', ')', 'k', '=', 'bucket', '.', 'new_key', '(', 'path', ')', 'try', ':', 'k', '.', 'set_contents_from_string', '(', 'temp_file', '.', 'getvalue', '(', ')', ')', 'k', '.', 'set_acl', '(', 'self', '.', 'app', '.', 'config', '.', 'get', '(', "'THUMBNAIL_S3_ACL'", ',', "'public-read'", ')', ')', 'except', 'S3ResponseError', ':', 'return', "''", 'return', 'thumb_url_full']
Finds or creates a thumbnail for the specified image on Amazon S3.
['Finds', 'or', 'creates', 'a', 'thumbnail', 'for', 'the', 'specified', 'image', 'on', 'Amazon', 'S3', '.']
train
https://github.com/Jaza/flask-thumbnails-s3/blob/a4f20fa643cea175f7b7c22315f4ae8a3edc7636/flask_thumbnails_s3/__init__.py#L90-L147
5,649
sdispater/eloquent
eloquent/orm/scopes/soft_deleting.py
SoftDeletingScope.remove
def remove(self, builder, model): """ Remove the scope from a given query builder. :param builder: The query builder :type builder: eloquent.orm.builder.Builder :param model: The model :type model: eloquent.orm.Model """ column = model.get_qualified_deleted_at_column() query = builder.get_query() wheres = [] for where in query.wheres: # If the where clause is a soft delete date constraint, # we will remove it from the query and reset the keys # on the wheres. This allows the developer to include # deleted model in a relationship result set that is lazy loaded. if not self._is_soft_delete_constraint(where, column): wheres.append(where) query.wheres = wheres
python
def remove(self, builder, model): """ Remove the scope from a given query builder. :param builder: The query builder :type builder: eloquent.orm.builder.Builder :param model: The model :type model: eloquent.orm.Model """ column = model.get_qualified_deleted_at_column() query = builder.get_query() wheres = [] for where in query.wheres: # If the where clause is a soft delete date constraint, # we will remove it from the query and reset the keys # on the wheres. This allows the developer to include # deleted model in a relationship result set that is lazy loaded. if not self._is_soft_delete_constraint(where, column): wheres.append(where) query.wheres = wheres
['def', 'remove', '(', 'self', ',', 'builder', ',', 'model', ')', ':', 'column', '=', 'model', '.', 'get_qualified_deleted_at_column', '(', ')', 'query', '=', 'builder', '.', 'get_query', '(', ')', 'wheres', '=', '[', ']', 'for', 'where', 'in', 'query', '.', 'wheres', ':', '# If the where clause is a soft delete date constraint,', '# we will remove it from the query and reset the keys', '# on the wheres. This allows the developer to include', '# deleted model in a relationship result set that is lazy loaded.', 'if', 'not', 'self', '.', '_is_soft_delete_constraint', '(', 'where', ',', 'column', ')', ':', 'wheres', '.', 'append', '(', 'where', ')', 'query', '.', 'wheres', '=', 'wheres']
Remove the scope from a given query builder. :param builder: The query builder :type builder: eloquent.orm.builder.Builder :param model: The model :type model: eloquent.orm.Model
['Remove', 'the', 'scope', 'from', 'a', 'given', 'query', 'builder', '.']
train
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/scopes/soft_deleting.py#L24-L47
5,650
AlexandreDecan/python-intervals
intervals.py
AtomicInterval.replace
def replace(self, left=None, lower=None, upper=None, right=None, ignore_inf=True): """ Create a new interval based on the current one and the provided values. Callable can be passed instead of values. In that case, it is called with the current corresponding value except if ignore_inf if set (default) and the corresponding bound is an infinity. :param left: (a function of) left boundary. :param lower: (a function of) value of the lower bound. :param upper: (a function of) value of the upper bound. :param right: (a function of) right boundary. :param ignore_inf: ignore infinities if functions are provided (default is True). :return: an Interval instance """ if callable(left): left = left(self._left) else: left = self._left if left is None else left if callable(lower): lower = self._lower if ignore_inf and self._lower in [-inf, inf] else lower(self._lower) else: lower = self._lower if lower is None else lower if callable(upper): upper = self._upper if ignore_inf and self._upper in [-inf, inf] else upper(self._upper) else: upper = self._upper if upper is None else upper if callable(right): right = right(self._right) else: right = self._right if right is None else right return AtomicInterval(left, lower, upper, right)
python
def replace(self, left=None, lower=None, upper=None, right=None, ignore_inf=True): """ Create a new interval based on the current one and the provided values. Callable can be passed instead of values. In that case, it is called with the current corresponding value except if ignore_inf if set (default) and the corresponding bound is an infinity. :param left: (a function of) left boundary. :param lower: (a function of) value of the lower bound. :param upper: (a function of) value of the upper bound. :param right: (a function of) right boundary. :param ignore_inf: ignore infinities if functions are provided (default is True). :return: an Interval instance """ if callable(left): left = left(self._left) else: left = self._left if left is None else left if callable(lower): lower = self._lower if ignore_inf and self._lower in [-inf, inf] else lower(self._lower) else: lower = self._lower if lower is None else lower if callable(upper): upper = self._upper if ignore_inf and self._upper in [-inf, inf] else upper(self._upper) else: upper = self._upper if upper is None else upper if callable(right): right = right(self._right) else: right = self._right if right is None else right return AtomicInterval(left, lower, upper, right)
['def', 'replace', '(', 'self', ',', 'left', '=', 'None', ',', 'lower', '=', 'None', ',', 'upper', '=', 'None', ',', 'right', '=', 'None', ',', 'ignore_inf', '=', 'True', ')', ':', 'if', 'callable', '(', 'left', ')', ':', 'left', '=', 'left', '(', 'self', '.', '_left', ')', 'else', ':', 'left', '=', 'self', '.', '_left', 'if', 'left', 'is', 'None', 'else', 'left', 'if', 'callable', '(', 'lower', ')', ':', 'lower', '=', 'self', '.', '_lower', 'if', 'ignore_inf', 'and', 'self', '.', '_lower', 'in', '[', '-', 'inf', ',', 'inf', ']', 'else', 'lower', '(', 'self', '.', '_lower', ')', 'else', ':', 'lower', '=', 'self', '.', '_lower', 'if', 'lower', 'is', 'None', 'else', 'lower', 'if', 'callable', '(', 'upper', ')', ':', 'upper', '=', 'self', '.', '_upper', 'if', 'ignore_inf', 'and', 'self', '.', '_upper', 'in', '[', '-', 'inf', ',', 'inf', ']', 'else', 'upper', '(', 'self', '.', '_upper', ')', 'else', ':', 'upper', '=', 'self', '.', '_upper', 'if', 'upper', 'is', 'None', 'else', 'upper', 'if', 'callable', '(', 'right', ')', ':', 'right', '=', 'right', '(', 'self', '.', '_right', ')', 'else', ':', 'right', '=', 'self', '.', '_right', 'if', 'right', 'is', 'None', 'else', 'right', 'return', 'AtomicInterval', '(', 'left', ',', 'lower', ',', 'upper', ',', 'right', ')']
Create a new interval based on the current one and the provided values. Callable can be passed instead of values. In that case, it is called with the current corresponding value except if ignore_inf if set (default) and the corresponding bound is an infinity. :param left: (a function of) left boundary. :param lower: (a function of) value of the lower bound. :param upper: (a function of) value of the upper bound. :param right: (a function of) right boundary. :param ignore_inf: ignore infinities if functions are provided (default is True). :return: an Interval instance
['Create', 'a', 'new', 'interval', 'based', 'on', 'the', 'current', 'one', 'and', 'the', 'provided', 'values', '.']
train
https://github.com/AlexandreDecan/python-intervals/blob/eda4da7dd39afabab2c1689e0b5158abae08c831/intervals.py#L366-L401
5,651
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/PharLapCommon.py
addPharLapPaths
def addPharLapPaths(env): """This function adds the path to the Phar Lap binaries, includes, and libraries, if they are not already there.""" ph_path = getPharLapPath() try: env_dict = env['ENV'] except KeyError: env_dict = {} env['ENV'] = env_dict SCons.Util.AddPathIfNotExists(env_dict, 'PATH', os.path.join(ph_path, 'bin')) SCons.Util.AddPathIfNotExists(env_dict, 'INCLUDE', os.path.join(ph_path, 'include')) SCons.Util.AddPathIfNotExists(env_dict, 'LIB', os.path.join(ph_path, 'lib')) SCons.Util.AddPathIfNotExists(env_dict, 'LIB', os.path.join(ph_path, os.path.normpath('lib/vclib'))) env['PHARLAP_PATH'] = getPharLapPath() env['PHARLAP_VERSION'] = str(getPharLapVersion())
python
def addPharLapPaths(env): """This function adds the path to the Phar Lap binaries, includes, and libraries, if they are not already there.""" ph_path = getPharLapPath() try: env_dict = env['ENV'] except KeyError: env_dict = {} env['ENV'] = env_dict SCons.Util.AddPathIfNotExists(env_dict, 'PATH', os.path.join(ph_path, 'bin')) SCons.Util.AddPathIfNotExists(env_dict, 'INCLUDE', os.path.join(ph_path, 'include')) SCons.Util.AddPathIfNotExists(env_dict, 'LIB', os.path.join(ph_path, 'lib')) SCons.Util.AddPathIfNotExists(env_dict, 'LIB', os.path.join(ph_path, os.path.normpath('lib/vclib'))) env['PHARLAP_PATH'] = getPharLapPath() env['PHARLAP_VERSION'] = str(getPharLapVersion())
['def', 'addPharLapPaths', '(', 'env', ')', ':', 'ph_path', '=', 'getPharLapPath', '(', ')', 'try', ':', 'env_dict', '=', 'env', '[', "'ENV'", ']', 'except', 'KeyError', ':', 'env_dict', '=', '{', '}', 'env', '[', "'ENV'", ']', '=', 'env_dict', 'SCons', '.', 'Util', '.', 'AddPathIfNotExists', '(', 'env_dict', ',', "'PATH'", ',', 'os', '.', 'path', '.', 'join', '(', 'ph_path', ',', "'bin'", ')', ')', 'SCons', '.', 'Util', '.', 'AddPathIfNotExists', '(', 'env_dict', ',', "'INCLUDE'", ',', 'os', '.', 'path', '.', 'join', '(', 'ph_path', ',', "'include'", ')', ')', 'SCons', '.', 'Util', '.', 'AddPathIfNotExists', '(', 'env_dict', ',', "'LIB'", ',', 'os', '.', 'path', '.', 'join', '(', 'ph_path', ',', "'lib'", ')', ')', 'SCons', '.', 'Util', '.', 'AddPathIfNotExists', '(', 'env_dict', ',', "'LIB'", ',', 'os', '.', 'path', '.', 'join', '(', 'ph_path', ',', 'os', '.', 'path', '.', 'normpath', '(', "'lib/vclib'", ')', ')', ')', 'env', '[', "'PHARLAP_PATH'", ']', '=', 'getPharLapPath', '(', ')', 'env', '[', "'PHARLAP_VERSION'", ']', '=', 'str', '(', 'getPharLapVersion', '(', ')', ')']
This function adds the path to the Phar Lap binaries, includes, and libraries, if they are not already there.
['This', 'function', 'adds', 'the', 'path', 'to', 'the', 'Phar', 'Lap', 'binaries', 'includes', 'and', 'libraries', 'if', 'they', 'are', 'not', 'already', 'there', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/PharLapCommon.py#L88-L108
5,652
macbre/sql-metadata
sql_metadata.py
generalize_sql
def generalize_sql(sql): """ Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str """ if sql is None: return None # multiple spaces sql = re.sub(r'\s{2,}', ' ', sql) # MW comments # e.g. /* CategoryDataService::getMostVisited N.N.N.N */ sql = remove_comments_from_sql(sql) # handle LIKE statements sql = normalize_likes(sql) sql = re.sub(r"\\\\", '', sql) sql = re.sub(r"\\'", '', sql) sql = re.sub(r'\\"', '', sql) sql = re.sub(r"'[^\']*'", 'X', sql) sql = re.sub(r'"[^\"]*"', 'X', sql) # All newlines, tabs, etc replaced by single space sql = re.sub(r'\s+', ' ', sql) # All numbers => N sql = re.sub(r'-?[0-9]+', 'N', sql) # WHERE foo IN ('880987','882618','708228','522330') sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE) return sql.strip()
python
def generalize_sql(sql): """ Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str """ if sql is None: return None # multiple spaces sql = re.sub(r'\s{2,}', ' ', sql) # MW comments # e.g. /* CategoryDataService::getMostVisited N.N.N.N */ sql = remove_comments_from_sql(sql) # handle LIKE statements sql = normalize_likes(sql) sql = re.sub(r"\\\\", '', sql) sql = re.sub(r"\\'", '', sql) sql = re.sub(r'\\"', '', sql) sql = re.sub(r"'[^\']*'", 'X', sql) sql = re.sub(r'"[^\"]*"', 'X', sql) # All newlines, tabs, etc replaced by single space sql = re.sub(r'\s+', ' ', sql) # All numbers => N sql = re.sub(r'-?[0-9]+', 'N', sql) # WHERE foo IN ('880987','882618','708228','522330') sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE) return sql.strip()
['def', 'generalize_sql', '(', 'sql', ')', ':', 'if', 'sql', 'is', 'None', ':', 'return', 'None', '# multiple spaces', 'sql', '=', 're', '.', 'sub', '(', "r'\\s{2,}'", ',', "' '", ',', 'sql', ')', '# MW comments', '# e.g. /* CategoryDataService::getMostVisited N.N.N.N */', 'sql', '=', 'remove_comments_from_sql', '(', 'sql', ')', '# handle LIKE statements', 'sql', '=', 'normalize_likes', '(', 'sql', ')', 'sql', '=', 're', '.', 'sub', '(', 'r"\\\\\\\\"', ',', "''", ',', 'sql', ')', 'sql', '=', 're', '.', 'sub', '(', 'r"\\\\\'"', ',', "''", ',', 'sql', ')', 'sql', '=', 're', '.', 'sub', '(', 'r\'\\\\"\'', ',', "''", ',', 'sql', ')', 'sql', '=', 're', '.', 'sub', '(', 'r"\'[^\\\']*\'"', ',', "'X'", ',', 'sql', ')', 'sql', '=', 're', '.', 'sub', '(', 'r\'"[^\\"]*"\'', ',', "'X'", ',', 'sql', ')', '# All newlines, tabs, etc replaced by single space', 'sql', '=', 're', '.', 'sub', '(', "r'\\s+'", ',', "' '", ',', 'sql', ')', '# All numbers => N', 'sql', '=', 're', '.', 'sub', '(', "r'-?[0-9]+'", ',', "'N'", ',', 'sql', ')', "# WHERE foo IN ('880987','882618','708228','522330')", 'sql', '=', 're', '.', 'sub', '(', "r' (IN|VALUES)\\s*\\([^,]+,[^)]+\\)'", ',', "' \\\\1 (XYZ)'", ',', 'sql', ',', 'flags', '=', 're', '.', 'IGNORECASE', ')', 'return', 'sql', '.', 'strip', '(', ')']
Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str
['Removes', 'most', 'variables', 'from', 'an', 'SQL', 'query', 'and', 'replaces', 'them', 'with', 'X', 'or', 'N', 'for', 'numbers', '.']
train
https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L269-L306
5,653
InQuest/python-sandboxapi
sandboxapi/falcon.py
FalconAPI.analyze
def analyze(self, handle, filename): """Submit a file for analysis. :type handle: File handle :param handle: Handle to file to upload for analysis. :type filename: str :param filename: File name. :rtype: str :return: File hash as a string """ # multipart post files. files = {"file" : (filename, handle)} # ensure the handle is at offset 0. handle.seek(0) response = self._request("/submit/file", method='POST', files=files) try: if response.status_code == 201: # good response return response.json()['job_id'] else: raise sandboxapi.SandboxError("api error in analyze: {r}".format(r=response.content.decode('utf-8'))) except (ValueError, KeyError) as e: raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e))
python
def analyze(self, handle, filename): """Submit a file for analysis. :type handle: File handle :param handle: Handle to file to upload for analysis. :type filename: str :param filename: File name. :rtype: str :return: File hash as a string """ # multipart post files. files = {"file" : (filename, handle)} # ensure the handle is at offset 0. handle.seek(0) response = self._request("/submit/file", method='POST', files=files) try: if response.status_code == 201: # good response return response.json()['job_id'] else: raise sandboxapi.SandboxError("api error in analyze: {r}".format(r=response.content.decode('utf-8'))) except (ValueError, KeyError) as e: raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e))
['def', 'analyze', '(', 'self', ',', 'handle', ',', 'filename', ')', ':', '# multipart post files.', 'files', '=', '{', '"file"', ':', '(', 'filename', ',', 'handle', ')', '}', '# ensure the handle is at offset 0.', 'handle', '.', 'seek', '(', '0', ')', 'response', '=', 'self', '.', '_request', '(', '"/submit/file"', ',', 'method', '=', "'POST'", ',', 'files', '=', 'files', ')', 'try', ':', 'if', 'response', '.', 'status_code', '==', '201', ':', '# good response', 'return', 'response', '.', 'json', '(', ')', '[', "'job_id'", ']', 'else', ':', 'raise', 'sandboxapi', '.', 'SandboxError', '(', '"api error in analyze: {r}"', '.', 'format', '(', 'r', '=', 'response', '.', 'content', '.', 'decode', '(', "'utf-8'", ')', ')', ')', 'except', '(', 'ValueError', ',', 'KeyError', ')', 'as', 'e', ':', 'raise', 'sandboxapi', '.', 'SandboxError', '(', '"error in analyze: {e}"', '.', 'format', '(', 'e', '=', 'e', ')', ')']
Submit a file for analysis. :type handle: File handle :param handle: Handle to file to upload for analysis. :type filename: str :param filename: File name. :rtype: str :return: File hash as a string
['Submit', 'a', 'file', 'for', 'analysis', '.']
train
https://github.com/InQuest/python-sandboxapi/blob/9bad73f453e25d7d23e7b4b1ae927f44a35a5bc3/sandboxapi/falcon.py#L45-L71
5,654
olls/graphics
graphics/funcs.py
rotateImage
def rotateImage(image, angle): """ rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw """ image = [list(row) for row in image] for n in range(angle % 4): image = list(zip(*image[::-1])) return image
python
def rotateImage(image, angle): """ rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw """ image = [list(row) for row in image] for n in range(angle % 4): image = list(zip(*image[::-1])) return image
['def', 'rotateImage', '(', 'image', ',', 'angle', ')', ':', 'image', '=', '[', 'list', '(', 'row', ')', 'for', 'row', 'in', 'image', ']', 'for', 'n', 'in', 'range', '(', 'angle', '%', '4', ')', ':', 'image', '=', 'list', '(', 'zip', '(', '*', 'image', '[', ':', ':', '-', '1', ']', ')', ')', 'return', 'image']
rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw
['rotates', 'a', '2d', 'array', 'to', 'a', 'multiple', 'of', '90', 'deg', '.', '0', '=', 'default', '1', '=', '90', 'deg', '.', 'cw', '2', '=', '180', 'deg', '.', '3', '=', '90', 'deg', '.', 'ccw']
train
https://github.com/olls/graphics/blob/a302e9fe648d2d44603b52ac5bb80df4863b2a7d/graphics/funcs.py#L7-L20
5,655
konomae/lastpass-python
lastpass/parser.py
extract_chunks
def extract_chunks(blob): """Splits the blob into chucks grouped by kind.""" chunks = [] stream = BytesIO(blob.bytes) current_pos = stream.tell() stream.seek(0, 2) length = stream.tell() stream.seek(current_pos, 0) while stream.tell() < length: chunks.append(read_chunk(stream)) return chunks
python
def extract_chunks(blob): """Splits the blob into chucks grouped by kind.""" chunks = [] stream = BytesIO(blob.bytes) current_pos = stream.tell() stream.seek(0, 2) length = stream.tell() stream.seek(current_pos, 0) while stream.tell() < length: chunks.append(read_chunk(stream)) return chunks
['def', 'extract_chunks', '(', 'blob', ')', ':', 'chunks', '=', '[', ']', 'stream', '=', 'BytesIO', '(', 'blob', '.', 'bytes', ')', 'current_pos', '=', 'stream', '.', 'tell', '(', ')', 'stream', '.', 'seek', '(', '0', ',', '2', ')', 'length', '=', 'stream', '.', 'tell', '(', ')', 'stream', '.', 'seek', '(', 'current_pos', ',', '0', ')', 'while', 'stream', '.', 'tell', '(', ')', '<', 'length', ':', 'chunks', '.', 'append', '(', 'read_chunk', '(', 'stream', ')', ')', 'return', 'chunks']
Splits the blob into chucks grouped by kind.
['Splits', 'the', 'blob', 'into', 'chucks', 'grouped', 'by', 'kind', '.']
train
https://github.com/konomae/lastpass-python/blob/5063911b789868a1fd9db9922db82cdf156b938a/lastpass/parser.py#L26-L37
5,656
Esri/ArcREST
src/arcrest/ags/mapservice.py
MapService.getExtensions
def getExtensions(self): """returns objects for all map service extensions""" extensions = [] if isinstance(self.supportedExtensions, list): for ext in self.supportedExtensions: extensionURL = self._url + "/exts/%s" % ext if ext == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions else: extensionURL = self._url + "/exts/%s" % self.supportedExtensions if self.supportedExtensions == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions
python
def getExtensions(self): """returns objects for all map service extensions""" extensions = [] if isinstance(self.supportedExtensions, list): for ext in self.supportedExtensions: extensionURL = self._url + "/exts/%s" % ext if ext == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions else: extensionURL = self._url + "/exts/%s" % self.supportedExtensions if self.supportedExtensions == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions
['def', 'getExtensions', '(', 'self', ')', ':', 'extensions', '=', '[', ']', 'if', 'isinstance', '(', 'self', '.', 'supportedExtensions', ',', 'list', ')', ':', 'for', 'ext', 'in', 'self', '.', 'supportedExtensions', ':', 'extensionURL', '=', 'self', '.', '_url', '+', '"/exts/%s"', '%', 'ext', 'if', 'ext', '==', '"SchematicsServer"', ':', 'extensions', '.', 'append', '(', 'SchematicsService', '(', 'url', '=', 'extensionURL', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ')', ')', 'return', 'extensions', 'else', ':', 'extensionURL', '=', 'self', '.', '_url', '+', '"/exts/%s"', '%', 'self', '.', 'supportedExtensions', 'if', 'self', '.', 'supportedExtensions', '==', '"SchematicsServer"', ':', 'extensions', '.', 'append', '(', 'SchematicsService', '(', 'url', '=', 'extensionURL', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ')', ')', 'return', 'extensions']
returns objects for all map service extensions
['returns', 'objects', 'for', 'all', 'map', 'service', 'extensions']
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/mapservice.py#L423-L442
5,657
CivicSpleen/ambry
ambry/bundle/bundle.py
Bundle.sync_out
def sync_out(self, file_name=None, force=False): """Synchronize from objects to records""" self.log('---- Sync Out ----') from ambry.bundle.files import BuildSourceFile self.dstate = self.STATES.BUILDING for f in self.build_source_files.list_records(): if (f.sync_dir() == BuildSourceFile.SYNC_DIR.RECORD_TO_FILE or f.record.path == file_name) or force: self.log('Sync: {}'.format(f.record.path)) f.record_to_fs() self.commit()
python
def sync_out(self, file_name=None, force=False): """Synchronize from objects to records""" self.log('---- Sync Out ----') from ambry.bundle.files import BuildSourceFile self.dstate = self.STATES.BUILDING for f in self.build_source_files.list_records(): if (f.sync_dir() == BuildSourceFile.SYNC_DIR.RECORD_TO_FILE or f.record.path == file_name) or force: self.log('Sync: {}'.format(f.record.path)) f.record_to_fs() self.commit()
['def', 'sync_out', '(', 'self', ',', 'file_name', '=', 'None', ',', 'force', '=', 'False', ')', ':', 'self', '.', 'log', '(', "'---- Sync Out ----'", ')', 'from', 'ambry', '.', 'bundle', '.', 'files', 'import', 'BuildSourceFile', 'self', '.', 'dstate', '=', 'self', '.', 'STATES', '.', 'BUILDING', 'for', 'f', 'in', 'self', '.', 'build_source_files', '.', 'list_records', '(', ')', ':', 'if', '(', 'f', '.', 'sync_dir', '(', ')', '==', 'BuildSourceFile', '.', 'SYNC_DIR', '.', 'RECORD_TO_FILE', 'or', 'f', '.', 'record', '.', 'path', '==', 'file_name', ')', 'or', 'force', ':', 'self', '.', 'log', '(', "'Sync: {}'", '.', 'format', '(', 'f', '.', 'record', '.', 'path', ')', ')', 'f', '.', 'record_to_fs', '(', ')', 'self', '.', 'commit', '(', ')']
Synchronize from objects to records
['Synchronize', 'from', 'objects', 'to', 'records']
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L1472-L1485
5,658
apache/incubator-mxnet
python/mxnet/metric.py
EvalMetric.reset
def reset(self): """Resets the internal evaluation result to initial state.""" self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0
python
def reset(self): """Resets the internal evaluation result to initial state.""" self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0
['def', 'reset', '(', 'self', ')', ':', 'self', '.', 'num_inst', '=', '0', 'self', '.', 'sum_metric', '=', '0.0', 'self', '.', 'global_num_inst', '=', '0', 'self', '.', 'global_sum_metric', '=', '0.0']
Resets the internal evaluation result to initial state.
['Resets', 'the', 'internal', 'evaluation', 'result', 'to', 'initial', 'state', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L148-L153
5,659
ejeschke/ginga
ginga/misc/Task.py
ThreadPool.register_dn
def register_dn(self): """Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Decrement the running-thread count. If we are the last thread to start, release the ThreadPool thread, which is stuck in start() """ with self.regcond: self.runningcount -= 1 tid = thread.get_ident() self.tids.remove(tid) self.logger.debug("register_dn: count_dn is %d" % self.runningcount) self.logger.debug("register_dn: remaining: %s" % str(self.tids)) if self.runningcount == 0: self.status = 'down' self.regcond.notify()
python
def register_dn(self): """Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Decrement the running-thread count. If we are the last thread to start, release the ThreadPool thread, which is stuck in start() """ with self.regcond: self.runningcount -= 1 tid = thread.get_ident() self.tids.remove(tid) self.logger.debug("register_dn: count_dn is %d" % self.runningcount) self.logger.debug("register_dn: remaining: %s" % str(self.tids)) if self.runningcount == 0: self.status = 'down' self.regcond.notify()
['def', 'register_dn', '(', 'self', ')', ':', 'with', 'self', '.', 'regcond', ':', 'self', '.', 'runningcount', '-=', '1', 'tid', '=', 'thread', '.', 'get_ident', '(', ')', 'self', '.', 'tids', '.', 'remove', '(', 'tid', ')', 'self', '.', 'logger', '.', 'debug', '(', '"register_dn: count_dn is %d"', '%', 'self', '.', 'runningcount', ')', 'self', '.', 'logger', '.', 'debug', '(', '"register_dn: remaining: %s"', '%', 'str', '(', 'self', '.', 'tids', ')', ')', 'if', 'self', '.', 'runningcount', '==', '0', ':', 'self', '.', 'status', '=', "'down'", 'self', '.', 'regcond', '.', 'notify', '(', ')']
Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Decrement the running-thread count. If we are the last thread to start, release the ThreadPool thread, which is stuck in start()
['Called', 'by', 'WorkerThread', 'objects', 'to', 'register', 'themselves', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L1130-L1145
5,660
sosy-lab/benchexec
benchexec/result.py
get_result_category
def get_result_category(expected_results, result, properties): ''' This function determines the relation between actual result and expected result for the given file and properties. @param filename: The file name of the input file. @param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized). @param properties: The list of property names to check. @return One of the CATEGORY_* strings. ''' result_class = get_result_classification(result) if result_class == RESULT_CLASS_OTHER: if result == RESULT_UNKNOWN: return CATEGORY_UNKNOWN elif result == RESULT_DONE: return CATEGORY_MISSING else: return CATEGORY_ERROR if not properties: # Without property we cannot return correct or wrong results. return CATEGORY_MISSING # For now, we have at most one property assert len(properties) == 1, properties prop = properties[0] expected_result = expected_results.get(prop.filename) if not expected_result or expected_result.result is None: # expected result of task is unknown return CATEGORY_MISSING if prop.is_well_known: # for well-known properties, only support hard-coded results is_valid_result = result in _VALID_RESULTS_PER_PROPERTY[prop.name] elif expected_result.subproperty: is_valid_result = result in { RESULT_TRUE_PROP, RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")"} else: is_valid_result = (result == RESULT_TRUE_PROP) or result.startswith(RESULT_FALSE_PROP) if not is_valid_result: return CATEGORY_UNKNOWN # result does not match property if expected_result.result: return CATEGORY_CORRECT if result_class == RESULT_CLASS_TRUE else CATEGORY_WRONG else: if expected_result.subproperty: return CATEGORY_CORRECT if result == RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")" else CATEGORY_WRONG else: return CATEGORY_CORRECT if result_class == RESULT_CLASS_FALSE else CATEGORY_WRONG
python
def get_result_category(expected_results, result, properties): ''' This function determines the relation between actual result and expected result for the given file and properties. @param filename: The file name of the input file. @param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized). @param properties: The list of property names to check. @return One of the CATEGORY_* strings. ''' result_class = get_result_classification(result) if result_class == RESULT_CLASS_OTHER: if result == RESULT_UNKNOWN: return CATEGORY_UNKNOWN elif result == RESULT_DONE: return CATEGORY_MISSING else: return CATEGORY_ERROR if not properties: # Without property we cannot return correct or wrong results. return CATEGORY_MISSING # For now, we have at most one property assert len(properties) == 1, properties prop = properties[0] expected_result = expected_results.get(prop.filename) if not expected_result or expected_result.result is None: # expected result of task is unknown return CATEGORY_MISSING if prop.is_well_known: # for well-known properties, only support hard-coded results is_valid_result = result in _VALID_RESULTS_PER_PROPERTY[prop.name] elif expected_result.subproperty: is_valid_result = result in { RESULT_TRUE_PROP, RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")"} else: is_valid_result = (result == RESULT_TRUE_PROP) or result.startswith(RESULT_FALSE_PROP) if not is_valid_result: return CATEGORY_UNKNOWN # result does not match property if expected_result.result: return CATEGORY_CORRECT if result_class == RESULT_CLASS_TRUE else CATEGORY_WRONG else: if expected_result.subproperty: return CATEGORY_CORRECT if result == RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")" else CATEGORY_WRONG else: return CATEGORY_CORRECT if result_class == RESULT_CLASS_FALSE else CATEGORY_WRONG
['def', 'get_result_category', '(', 'expected_results', ',', 'result', ',', 'properties', ')', ':', 'result_class', '=', 'get_result_classification', '(', 'result', ')', 'if', 'result_class', '==', 'RESULT_CLASS_OTHER', ':', 'if', 'result', '==', 'RESULT_UNKNOWN', ':', 'return', 'CATEGORY_UNKNOWN', 'elif', 'result', '==', 'RESULT_DONE', ':', 'return', 'CATEGORY_MISSING', 'else', ':', 'return', 'CATEGORY_ERROR', 'if', 'not', 'properties', ':', '# Without property we cannot return correct or wrong results.', 'return', 'CATEGORY_MISSING', '# For now, we have at most one property', 'assert', 'len', '(', 'properties', ')', '==', '1', ',', 'properties', 'prop', '=', 'properties', '[', '0', ']', 'expected_result', '=', 'expected_results', '.', 'get', '(', 'prop', '.', 'filename', ')', 'if', 'not', 'expected_result', 'or', 'expected_result', '.', 'result', 'is', 'None', ':', '# expected result of task is unknown', 'return', 'CATEGORY_MISSING', 'if', 'prop', '.', 'is_well_known', ':', '# for well-known properties, only support hard-coded results', 'is_valid_result', '=', 'result', 'in', '_VALID_RESULTS_PER_PROPERTY', '[', 'prop', '.', 'name', ']', 'elif', 'expected_result', '.', 'subproperty', ':', 'is_valid_result', '=', 'result', 'in', '{', 'RESULT_TRUE_PROP', ',', 'RESULT_FALSE_PROP', '+', '"("', '+', 'expected_result', '.', 'subproperty', '+', '")"', '}', 'else', ':', 'is_valid_result', '=', '(', 'result', '==', 'RESULT_TRUE_PROP', ')', 'or', 'result', '.', 'startswith', '(', 'RESULT_FALSE_PROP', ')', 'if', 'not', 'is_valid_result', ':', 'return', 'CATEGORY_UNKNOWN', '# result does not match property', 'if', 'expected_result', '.', 'result', ':', 'return', 'CATEGORY_CORRECT', 'if', 'result_class', '==', 'RESULT_CLASS_TRUE', 'else', 'CATEGORY_WRONG', 'else', ':', 'if', 'expected_result', '.', 'subproperty', ':', 'return', 'CATEGORY_CORRECT', 'if', 'result', '==', 'RESULT_FALSE_PROP', '+', '"("', '+', 'expected_result', '.', 'subproperty', '+', '")"', 'else', 'CATEGORY_WRONG', 'else', ':', 'return', 'CATEGORY_CORRECT', 'if', 'result_class', '==', 'RESULT_CLASS_FALSE', 'else', 'CATEGORY_WRONG']
This function determines the relation between actual result and expected result for the given file and properties. @param filename: The file name of the input file. @param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized). @param properties: The list of property names to check. @return One of the CATEGORY_* strings.
['This', 'function', 'determines', 'the', 'relation', 'between', 'actual', 'result', 'and', 'expected', 'result', 'for', 'the', 'given', 'file', 'and', 'properties', '.']
train
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/result.py#L440-L489
5,661
valohai/valohai-yaml
valohai_yaml/utils/__init__.py
listify
def listify(value): """ Wrap the given value into a list, with the below provisions: * If the value is a list or a tuple, it's coerced into a new list. * If the value is None, an empty list is returned. * Otherwise, a single-element list is returned, containing the value. :param value: A value. :return: a list! :rtype: list """ if value is None: return [] if isinstance(value, (list, tuple)): return list(value) return [value]
python
def listify(value): """ Wrap the given value into a list, with the below provisions: * If the value is a list or a tuple, it's coerced into a new list. * If the value is None, an empty list is returned. * Otherwise, a single-element list is returned, containing the value. :param value: A value. :return: a list! :rtype: list """ if value is None: return [] if isinstance(value, (list, tuple)): return list(value) return [value]
['def', 'listify', '(', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'return', '[', ']', 'if', 'isinstance', '(', 'value', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'return', 'list', '(', 'value', ')', 'return', '[', 'value', ']']
Wrap the given value into a list, with the below provisions: * If the value is a list or a tuple, it's coerced into a new list. * If the value is None, an empty list is returned. * Otherwise, a single-element list is returned, containing the value. :param value: A value. :return: a list! :rtype: list
['Wrap', 'the', 'given', 'value', 'into', 'a', 'list', 'with', 'the', 'below', 'provisions', ':']
train
https://github.com/valohai/valohai-yaml/blob/3d2e92381633d84cdba039f6905df34c9633a2e1/valohai_yaml/utils/__init__.py#L12-L28
5,662
GNS3/gns3-server
gns3server/compute/base_node.py
BaseNode.name
def name(self, new_name): """ Sets the name of this node. :param new_name: name """ log.info("{module}: {name} [{id}] renamed to {new_name}".format(module=self.manager.module_name, name=self.name, id=self.id, new_name=new_name)) self._name = new_name
python
def name(self, new_name): """ Sets the name of this node. :param new_name: name """ log.info("{module}: {name} [{id}] renamed to {new_name}".format(module=self.manager.module_name, name=self.name, id=self.id, new_name=new_name)) self._name = new_name
['def', 'name', '(', 'self', ',', 'new_name', ')', ':', 'log', '.', 'info', '(', '"{module}: {name} [{id}] renamed to {new_name}"', '.', 'format', '(', 'module', '=', 'self', '.', 'manager', '.', 'module_name', ',', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ',', 'new_name', '=', 'new_name', ')', ')', 'self', '.', '_name', '=', 'new_name']
Sets the name of this node. :param new_name: name
['Sets', 'the', 'name', 'of', 'this', 'node', '.']
train
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_node.py#L176-L187
5,663
jopohl/urh
src/urh/awre/components/Address.py
Address.find_candidates
def find_candidates(candidates): """ Find candidate addresses using LCS algorithm perform a scoring based on how often a candidate appears in a longer candidate Input is something like ------------------------ ['1b6033', '1b6033fd57', '701b603378e289', '20701b603378e289000c62', '1b603300', '78e289757e', '7078e2891b6033000000', '207078e2891b6033000000'] Output like ----------- {'1b6033': 18, '1b6033fd57': 1, '701b603378e289': 2, '207078e2891b6033000000': 1, '57': 1, '7078e2891b6033000000': 2, '78e289757e': 1, '20701b603378e289000c62': 1, '78e289': 4, '1b603300': 3} :type candidates: list of CommonRange :return: """ result = defaultdict(int) for i, c_i in enumerate(candidates): for j in range(i, len(candidates)): lcs = util.longest_common_substring(c_i.hex_value, candidates[j].hex_value) if lcs: result[lcs] += 1 return result
python
def find_candidates(candidates): """ Find candidate addresses using LCS algorithm perform a scoring based on how often a candidate appears in a longer candidate Input is something like ------------------------ ['1b6033', '1b6033fd57', '701b603378e289', '20701b603378e289000c62', '1b603300', '78e289757e', '7078e2891b6033000000', '207078e2891b6033000000'] Output like ----------- {'1b6033': 18, '1b6033fd57': 1, '701b603378e289': 2, '207078e2891b6033000000': 1, '57': 1, '7078e2891b6033000000': 2, '78e289757e': 1, '20701b603378e289000c62': 1, '78e289': 4, '1b603300': 3} :type candidates: list of CommonRange :return: """ result = defaultdict(int) for i, c_i in enumerate(candidates): for j in range(i, len(candidates)): lcs = util.longest_common_substring(c_i.hex_value, candidates[j].hex_value) if lcs: result[lcs] += 1 return result
['def', 'find_candidates', '(', 'candidates', ')', ':', 'result', '=', 'defaultdict', '(', 'int', ')', 'for', 'i', ',', 'c_i', 'in', 'enumerate', '(', 'candidates', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', ',', 'len', '(', 'candidates', ')', ')', ':', 'lcs', '=', 'util', '.', 'longest_common_substring', '(', 'c_i', '.', 'hex_value', ',', 'candidates', '[', 'j', ']', '.', 'hex_value', ')', 'if', 'lcs', ':', 'result', '[', 'lcs', ']', '+=', '1', 'return', 'result']
Find candidate addresses using LCS algorithm perform a scoring based on how often a candidate appears in a longer candidate Input is something like ------------------------ ['1b6033', '1b6033fd57', '701b603378e289', '20701b603378e289000c62', '1b603300', '78e289757e', '7078e2891b6033000000', '207078e2891b6033000000'] Output like ----------- {'1b6033': 18, '1b6033fd57': 1, '701b603378e289': 2, '207078e2891b6033000000': 1, '57': 1, '7078e2891b6033000000': 2, '78e289757e': 1, '20701b603378e289000c62': 1, '78e289': 4, '1b603300': 3} :type candidates: list of CommonRange :return:
['Find', 'candidate', 'addresses', 'using', 'LCS', 'algorithm', 'perform', 'a', 'scoring', 'based', 'on', 'how', 'often', 'a', 'candidate', 'appears', 'in', 'a', 'longer', 'candidate']
train
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/awre/components/Address.py#L190-L217
5,664
dead-beef/markovchain
markovchain/text/util.py
CharCase.convert
def convert(self, string): """Return a copy of string converted to case. Parameters ---------- string : `str` Returns ------- `str` Examples -------- >>> CharCase.LOWER.convert('sTr InG') 'str ing' >>> CharCase.UPPER.convert('sTr InG') 'STR ING' >>> CharCase.TITLE.convert('sTr InG') 'Str ing' >>> CharCase.PRESERVE.convert('sTr InG') 'sTr InG' """ if self == self.__class__.TITLE: return capitalize(string) if self == self.__class__.UPPER: return string.upper() if self == self.__class__.LOWER: return string.lower() return string
python
def convert(self, string): """Return a copy of string converted to case. Parameters ---------- string : `str` Returns ------- `str` Examples -------- >>> CharCase.LOWER.convert('sTr InG') 'str ing' >>> CharCase.UPPER.convert('sTr InG') 'STR ING' >>> CharCase.TITLE.convert('sTr InG') 'Str ing' >>> CharCase.PRESERVE.convert('sTr InG') 'sTr InG' """ if self == self.__class__.TITLE: return capitalize(string) if self == self.__class__.UPPER: return string.upper() if self == self.__class__.LOWER: return string.lower() return string
['def', 'convert', '(', 'self', ',', 'string', ')', ':', 'if', 'self', '==', 'self', '.', '__class__', '.', 'TITLE', ':', 'return', 'capitalize', '(', 'string', ')', 'if', 'self', '==', 'self', '.', '__class__', '.', 'UPPER', ':', 'return', 'string', '.', 'upper', '(', ')', 'if', 'self', '==', 'self', '.', '__class__', '.', 'LOWER', ':', 'return', 'string', '.', 'lower', '(', ')', 'return', 'string']
Return a copy of string converted to case. Parameters ---------- string : `str` Returns ------- `str` Examples -------- >>> CharCase.LOWER.convert('sTr InG') 'str ing' >>> CharCase.UPPER.convert('sTr InG') 'STR ING' >>> CharCase.TITLE.convert('sTr InG') 'Str ing' >>> CharCase.PRESERVE.convert('sTr InG') 'sTr InG'
['Return', 'a', 'copy', 'of', 'string', 'converted', 'to', 'case', '.']
train
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/text/util.py#L27-L55
5,665
gem/oq-engine
openquake/hazardlib/gsim/abrahamson_silva_1997.py
AbrahamsonSilva1997._compute_f5
def _compute_f5(self, C, pga_rock): """ Compute f5 term (non-linear soil response) """ return C['a10'] + C['a11'] * np.log(pga_rock + C['c5'])
python
def _compute_f5(self, C, pga_rock): """ Compute f5 term (non-linear soil response) """ return C['a10'] + C['a11'] * np.log(pga_rock + C['c5'])
['def', '_compute_f5', '(', 'self', ',', 'C', ',', 'pga_rock', ')', ':', 'return', 'C', '[', "'a10'", ']', '+', 'C', '[', "'a11'", ']', '*', 'np', '.', 'log', '(', 'pga_rock', '+', 'C', '[', "'c5'", ']', ')']
Compute f5 term (non-linear soil response)
['Compute', 'f5', 'term', '(', 'non', '-', 'linear', 'soil', 'response', ')']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_silva_1997.py#L228-L232
5,666
ModisWorks/modis
modis/discord_modis/gui.py
BotControl.stop
def stop(self): """Stop Modis and log it out of Discord.""" self.button_toggle_text.set("Start Modis") self.state = "off" logger.info("Stopping Discord Modis") from ._client import client asyncio.run_coroutine_threadsafe(client.logout(), client.loop) self.status_bar.set_status(0)
python
def stop(self): """Stop Modis and log it out of Discord.""" self.button_toggle_text.set("Start Modis") self.state = "off" logger.info("Stopping Discord Modis") from ._client import client asyncio.run_coroutine_threadsafe(client.logout(), client.loop) self.status_bar.set_status(0)
['def', 'stop', '(', 'self', ')', ':', 'self', '.', 'button_toggle_text', '.', 'set', '(', '"Start Modis"', ')', 'self', '.', 'state', '=', '"off"', 'logger', '.', 'info', '(', '"Stopping Discord Modis"', ')', 'from', '.', '_client', 'import', 'client', 'asyncio', '.', 'run_coroutine_threadsafe', '(', 'client', '.', 'logout', '(', ')', ',', 'client', '.', 'loop', ')', 'self', '.', 'status_bar', '.', 'set_status', '(', '0', ')']
Stop Modis and log it out of Discord.
['Stop', 'Modis', 'and', 'log', 'it', 'out', 'of', 'Discord', '.']
train
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/gui.py#L350-L359
5,667
exa-analytics/exa
exa/core/container.py
Container.slice_cardinal
def slice_cardinal(self, key): """ Slice the container according to its (primary) cardinal axis. The "cardinal" axis can have any name so long as the name matches a data object attached to the container. The index name for this object should also match the value of the cardinal axis. The algorithm builds a network graph representing the data relationships (including information about the type of relationship) and then traverses the edge tree (starting from the cardinal table). Each subsequent child object in the tree is sliced based on its relationship with its parent. Note: Breadth first traversal is performed. Warning: This function does not make a copy (if possible): to ensure a new object is created (a copy) use :func:`~exa.core.container.Container.copy` after slicing. .. code-block:: Python myslice = mycontainer[::2].copy() See Also: For data network generation, see :func:`~exa.core.container.Container.network`. For information about relationships between data objects see :mod:`~exa.core.numerical`. """ if self._cardinal: cls = self.__class__ key = check_key(self[self._cardinal], key, cardinal=True) g = self.network(fig=False) kwargs = {self._cardinal: self[self._cardinal].ix[key], 'name': self.name, 'description': self.description, 'meta': self.meta} # Next traverse, breadth first, all data objects for parent, child in nx.bfs_edges(g, self._cardinal): if child in kwargs: continue typ = g.edge_types[(parent, child)] if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'): kwargs[child] = self[child].slice_cardinal(key) elif typ == 'index-index': # Select from the child on the parent's index (the parent is # in the kwargs already). kwargs[child] = self[child].ix[kwargs[parent].index.values] elif typ == 'index-column': # Select from the child where the column (of the same name as # the parent) is in the parent's index values cdf = self[child] kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)] elif typ == 'column-index': # Select from the child where the child's index is in the # column of the parent. Note that this relationship cdf = self[child] cin = cdf.index.name cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())] index = kwargs[parent][cols].stack().astype(np.int64).values kwargs[child] = cdf[cdf.index.isin(index)] return cls(**kwargs)
python
def slice_cardinal(self, key): """ Slice the container according to its (primary) cardinal axis. The "cardinal" axis can have any name so long as the name matches a data object attached to the container. The index name for this object should also match the value of the cardinal axis. The algorithm builds a network graph representing the data relationships (including information about the type of relationship) and then traverses the edge tree (starting from the cardinal table). Each subsequent child object in the tree is sliced based on its relationship with its parent. Note: Breadth first traversal is performed. Warning: This function does not make a copy (if possible): to ensure a new object is created (a copy) use :func:`~exa.core.container.Container.copy` after slicing. .. code-block:: Python myslice = mycontainer[::2].copy() See Also: For data network generation, see :func:`~exa.core.container.Container.network`. For information about relationships between data objects see :mod:`~exa.core.numerical`. """ if self._cardinal: cls = self.__class__ key = check_key(self[self._cardinal], key, cardinal=True) g = self.network(fig=False) kwargs = {self._cardinal: self[self._cardinal].ix[key], 'name': self.name, 'description': self.description, 'meta': self.meta} # Next traverse, breadth first, all data objects for parent, child in nx.bfs_edges(g, self._cardinal): if child in kwargs: continue typ = g.edge_types[(parent, child)] if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'): kwargs[child] = self[child].slice_cardinal(key) elif typ == 'index-index': # Select from the child on the parent's index (the parent is # in the kwargs already). kwargs[child] = self[child].ix[kwargs[parent].index.values] elif typ == 'index-column': # Select from the child where the column (of the same name as # the parent) is in the parent's index values cdf = self[child] kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)] elif typ == 'column-index': # Select from the child where the child's index is in the # column of the parent. Note that this relationship cdf = self[child] cin = cdf.index.name cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())] index = kwargs[parent][cols].stack().astype(np.int64).values kwargs[child] = cdf[cdf.index.isin(index)] return cls(**kwargs)
['def', 'slice_cardinal', '(', 'self', ',', 'key', ')', ':', 'if', 'self', '.', '_cardinal', ':', 'cls', '=', 'self', '.', '__class__', 'key', '=', 'check_key', '(', 'self', '[', 'self', '.', '_cardinal', ']', ',', 'key', ',', 'cardinal', '=', 'True', ')', 'g', '=', 'self', '.', 'network', '(', 'fig', '=', 'False', ')', 'kwargs', '=', '{', 'self', '.', '_cardinal', ':', 'self', '[', 'self', '.', '_cardinal', ']', '.', 'ix', '[', 'key', ']', ',', "'name'", ':', 'self', '.', 'name', ',', "'description'", ':', 'self', '.', 'description', ',', "'meta'", ':', 'self', '.', 'meta', '}', '# Next traverse, breadth first, all data objects', 'for', 'parent', ',', 'child', 'in', 'nx', '.', 'bfs_edges', '(', 'g', ',', 'self', '.', '_cardinal', ')', ':', 'if', 'child', 'in', 'kwargs', ':', 'continue', 'typ', '=', 'g', '.', 'edge_types', '[', '(', 'parent', ',', 'child', ')', ']', 'if', 'self', '.', '_cardinal', 'in', 'self', '[', 'child', ']', '.', 'columns', 'and', 'hasattr', '(', 'self', '[', 'child', ']', ',', "'slice_cardinal'", ')', ':', 'kwargs', '[', 'child', ']', '=', 'self', '[', 'child', ']', '.', 'slice_cardinal', '(', 'key', ')', 'elif', 'typ', '==', "'index-index'", ':', "# Select from the child on the parent's index (the parent is", '# in the kwargs already).', 'kwargs', '[', 'child', ']', '=', 'self', '[', 'child', ']', '.', 'ix', '[', 'kwargs', '[', 'parent', ']', '.', 'index', '.', 'values', ']', 'elif', 'typ', '==', "'index-column'", ':', '# Select from the child where the column (of the same name as', "# the parent) is in the parent's index values", 'cdf', '=', 'self', '[', 'child', ']', 'kwargs', '[', 'child', ']', '=', 'cdf', '[', 'cdf', '[', 'parent', ']', '.', 'isin', '(', 'kwargs', '[', 'parent', ']', '.', 'index', '.', 'values', ')', ']', 'elif', 'typ', '==', "'column-index'", ':', "# Select from the child where the child's index is in the", '# column of the parent. Note that this relationship', 'cdf', '=', 'self', '[', 'child', ']', 'cin', '=', 'cdf', '.', 'index', '.', 'name', 'cols', '=', '[', 'col', 'for', 'col', 'in', 'kwargs', '[', 'parent', ']', 'if', 'cin', '==', 'col', 'or', '(', 'cin', '==', 'col', '[', ':', '-', '1', ']', 'and', 'col', '[', '-', '1', ']', '.', 'isdigit', '(', ')', ')', ']', 'index', '=', 'kwargs', '[', 'parent', ']', '[', 'cols', ']', '.', 'stack', '(', ')', '.', 'astype', '(', 'np', '.', 'int64', ')', '.', 'values', 'kwargs', '[', 'child', ']', '=', 'cdf', '[', 'cdf', '.', 'index', '.', 'isin', '(', 'index', ')', ']', 'return', 'cls', '(', '*', '*', 'kwargs', ')']
Slice the container according to its (primary) cardinal axis. The "cardinal" axis can have any name so long as the name matches a data object attached to the container. The index name for this object should also match the value of the cardinal axis. The algorithm builds a network graph representing the data relationships (including information about the type of relationship) and then traverses the edge tree (starting from the cardinal table). Each subsequent child object in the tree is sliced based on its relationship with its parent. Note: Breadth first traversal is performed. Warning: This function does not make a copy (if possible): to ensure a new object is created (a copy) use :func:`~exa.core.container.Container.copy` after slicing. .. code-block:: Python myslice = mycontainer[::2].copy() See Also: For data network generation, see :func:`~exa.core.container.Container.network`. For information about relationships between data objects see :mod:`~exa.core.numerical`.
['Slice', 'the', 'container', 'according', 'to', 'its', '(', 'primary', ')', 'cardinal', 'axis', '.']
train
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/container.py#L84-L144
5,668
benmoran56/esper
esper.py
World._get_component
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]: """Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples. """ entity_db = self._entities for entity in self._components.get(component_type, []): yield entity, entity_db[entity][component_type]
python
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]: """Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples. """ entity_db = self._entities for entity in self._components.get(component_type, []): yield entity, entity_db[entity][component_type]
['def', '_get_component', '(', 'self', ',', 'component_type', ':', 'Type', '[', 'C', ']', ')', '->', 'Iterable', '[', 'Tuple', '[', 'int', ',', 'C', ']', ']', ':', 'entity_db', '=', 'self', '.', '_entities', 'for', 'entity', 'in', 'self', '.', '_components', '.', 'get', '(', 'component_type', ',', '[', ']', ')', ':', 'yield', 'entity', ',', 'entity_db', '[', 'entity', ']', '[', 'component_type', ']']
Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples.
['Get', 'an', 'iterator', 'for', 'Entity', 'Component', 'pairs', '.']
train
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L224-L233
5,669
bitesofcode/projexui
projexui/widgets/xdocktoolbar.py
XDockToolbar.rebuild
def rebuild(self): """ Rebuilds the widget based on the position and current size/location of its parent. """ if not self.isVisible(): return self.raise_() max_size = self.maximumPixmapSize() min_size = self.minimumPixmapSize() widget = self.window() rect = widget.rect() rect.setBottom(rect.bottom() - widget.statusBar().height()) rect.setTop(widget.menuBar().height()) offset = self.padding() # align this widget to the north if self.position() == XDockToolbar.Position.North: self.move(rect.left(), rect.top()) self.resize(rect.width(), min_size.height() + offset) # align this widget to the east elif self.position() == XDockToolbar.Position.East: self.move(rect.left(), rect.top()) self.resize(min_size.width() + offset, rect.height()) # align this widget to the south elif self.position() == XDockToolbar.Position.South: self.move(rect.left(), rect.top() - min_size.height() - offset) self.resize(rect.width(), min_size.height() + offset) # align this widget to the west else: self.move(rect.right() - min_size.width() - offset, rect.top()) self.resize(min_size.width() + offset, rect.height())
python
def rebuild(self): """ Rebuilds the widget based on the position and current size/location of its parent. """ if not self.isVisible(): return self.raise_() max_size = self.maximumPixmapSize() min_size = self.minimumPixmapSize() widget = self.window() rect = widget.rect() rect.setBottom(rect.bottom() - widget.statusBar().height()) rect.setTop(widget.menuBar().height()) offset = self.padding() # align this widget to the north if self.position() == XDockToolbar.Position.North: self.move(rect.left(), rect.top()) self.resize(rect.width(), min_size.height() + offset) # align this widget to the east elif self.position() == XDockToolbar.Position.East: self.move(rect.left(), rect.top()) self.resize(min_size.width() + offset, rect.height()) # align this widget to the south elif self.position() == XDockToolbar.Position.South: self.move(rect.left(), rect.top() - min_size.height() - offset) self.resize(rect.width(), min_size.height() + offset) # align this widget to the west else: self.move(rect.right() - min_size.width() - offset, rect.top()) self.resize(min_size.width() + offset, rect.height())
['def', 'rebuild', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'isVisible', '(', ')', ':', 'return', 'self', '.', 'raise_', '(', ')', 'max_size', '=', 'self', '.', 'maximumPixmapSize', '(', ')', 'min_size', '=', 'self', '.', 'minimumPixmapSize', '(', ')', 'widget', '=', 'self', '.', 'window', '(', ')', 'rect', '=', 'widget', '.', 'rect', '(', ')', 'rect', '.', 'setBottom', '(', 'rect', '.', 'bottom', '(', ')', '-', 'widget', '.', 'statusBar', '(', ')', '.', 'height', '(', ')', ')', 'rect', '.', 'setTop', '(', 'widget', '.', 'menuBar', '(', ')', '.', 'height', '(', ')', ')', 'offset', '=', 'self', '.', 'padding', '(', ')', '# align this widget to the north\r', 'if', 'self', '.', 'position', '(', ')', '==', 'XDockToolbar', '.', 'Position', '.', 'North', ':', 'self', '.', 'move', '(', 'rect', '.', 'left', '(', ')', ',', 'rect', '.', 'top', '(', ')', ')', 'self', '.', 'resize', '(', 'rect', '.', 'width', '(', ')', ',', 'min_size', '.', 'height', '(', ')', '+', 'offset', ')', '# align this widget to the east\r', 'elif', 'self', '.', 'position', '(', ')', '==', 'XDockToolbar', '.', 'Position', '.', 'East', ':', 'self', '.', 'move', '(', 'rect', '.', 'left', '(', ')', ',', 'rect', '.', 'top', '(', ')', ')', 'self', '.', 'resize', '(', 'min_size', '.', 'width', '(', ')', '+', 'offset', ',', 'rect', '.', 'height', '(', ')', ')', '# align this widget to the south\r', 'elif', 'self', '.', 'position', '(', ')', '==', 'XDockToolbar', '.', 'Position', '.', 'South', ':', 'self', '.', 'move', '(', 'rect', '.', 'left', '(', ')', ',', 'rect', '.', 'top', '(', ')', '-', 'min_size', '.', 'height', '(', ')', '-', 'offset', ')', 'self', '.', 'resize', '(', 'rect', '.', 'width', '(', ')', ',', 'min_size', '.', 'height', '(', ')', '+', 'offset', ')', '# align this widget to the west\r', 'else', ':', 'self', '.', 'move', '(', 'rect', '.', 'right', '(', ')', '-', 'min_size', '.', 'width', '(', ')', '-', 'offset', ',', 'rect', '.', 'top', '(', ')', ')', 'self', '.', 'resize', '(', 'min_size', '.', 'width', '(', ')', '+', 'offset', ',', 'rect', '.', 'height', '(', ')', ')']
Rebuilds the widget based on the position and current size/location of its parent.
['Rebuilds', 'the', 'widget', 'based', 'on', 'the', 'position', 'and', 'current', 'size', '/', 'location', 'of', 'its', 'parent', '.']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdocktoolbar.py#L452-L488
5,670
pypa/pipenv
pipenv/vendor/tomlkit/source.py
Source.inc
def inc(self, exception=None): # type: (Optional[ParseError.__class__]) -> bool """ Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance. """ try: self._idx, self._current = next(self._chars) return True except StopIteration: self._idx = len(self) self._current = self.EOF if exception: raise self.parse_error(exception) return False
python
def inc(self, exception=None): # type: (Optional[ParseError.__class__]) -> bool """ Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance. """ try: self._idx, self._current = next(self._chars) return True except StopIteration: self._idx = len(self) self._current = self.EOF if exception: raise self.parse_error(exception) return False
['def', 'inc', '(', 'self', ',', 'exception', '=', 'None', ')', ':', '# type: (Optional[ParseError.__class__]) -> bool', 'try', ':', 'self', '.', '_idx', ',', 'self', '.', '_current', '=', 'next', '(', 'self', '.', '_chars', ')', 'return', 'True', 'except', 'StopIteration', ':', 'self', '.', '_idx', '=', 'len', '(', 'self', ')', 'self', '.', '_current', '=', 'self', '.', 'EOF', 'if', 'exception', ':', 'raise', 'self', '.', 'parse_error', '(', 'exception', ')', 'return', 'False']
Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance.
['Increments', 'the', 'parser', 'if', 'the', 'end', 'of', 'the', 'input', 'has', 'not', 'been', 'reached', '.', 'Returns', 'whether', 'or', 'not', 'it', 'was', 'able', 'to', 'advance', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/tomlkit/source.py#L117-L132
5,671
abilian/abilian-core
abilian/web/attachments/extension.py
AttachmentExtension.manager
def manager(self, obj): """Returns the :class:`AttachmentsManager` instance for this object.""" manager = getattr(obj, _MANAGER_ATTR, None) if manager is None: manager = AttachmentsManager() setattr(obj.__class__, _MANAGER_ATTR, manager) return manager
python
def manager(self, obj): """Returns the :class:`AttachmentsManager` instance for this object.""" manager = getattr(obj, _MANAGER_ATTR, None) if manager is None: manager = AttachmentsManager() setattr(obj.__class__, _MANAGER_ATTR, manager) return manager
['def', 'manager', '(', 'self', ',', 'obj', ')', ':', 'manager', '=', 'getattr', '(', 'obj', ',', '_MANAGER_ATTR', ',', 'None', ')', 'if', 'manager', 'is', 'None', ':', 'manager', '=', 'AttachmentsManager', '(', ')', 'setattr', '(', 'obj', '.', '__class__', ',', '_MANAGER_ATTR', ',', 'manager', ')', 'return', 'manager']
Returns the :class:`AttachmentsManager` instance for this object.
['Returns', 'the', ':', 'class', ':', 'AttachmentsManager', 'instance', 'for', 'this', 'object', '.']
train
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/attachments/extension.py#L28-L35
5,672
foremast/foremast
src/foremast/awslambda/api_gateway_event/api_gateway_event.py
APIGateway.find_api_id
def find_api_id(self): """Given API name, find API ID.""" allapis = self.client.get_rest_apis() api_name = self.trigger_settings['api_name'] api_id = None for api in allapis['items']: if api['name'] == api_name: api_id = api['id'] self.log.info("Found API for: %s", api_name) break else: api_id = self.create_api() return api_id
python
def find_api_id(self): """Given API name, find API ID.""" allapis = self.client.get_rest_apis() api_name = self.trigger_settings['api_name'] api_id = None for api in allapis['items']: if api['name'] == api_name: api_id = api['id'] self.log.info("Found API for: %s", api_name) break else: api_id = self.create_api() return api_id
['def', 'find_api_id', '(', 'self', ')', ':', 'allapis', '=', 'self', '.', 'client', '.', 'get_rest_apis', '(', ')', 'api_name', '=', 'self', '.', 'trigger_settings', '[', "'api_name'", ']', 'api_id', '=', 'None', 'for', 'api', 'in', 'allapis', '[', "'items'", ']', ':', 'if', 'api', '[', "'name'", ']', '==', 'api_name', ':', 'api_id', '=', 'api', '[', "'id'", ']', 'self', '.', 'log', '.', 'info', '(', '"Found API for: %s"', ',', 'api_name', ')', 'break', 'else', ':', 'api_id', '=', 'self', '.', 'create_api', '(', ')', 'return', 'api_id']
Given API name, find API ID.
['Given', 'API', 'name', 'find', 'API', 'ID', '.']
train
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/api_gateway_event/api_gateway_event.py#L59-L72
5,673
bokeh/bokeh
bokeh/colors/rgb.py
RGB.to_css
def to_css(self): ''' Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"`` ''' if self.a == 1.0: return "rgb(%d, %d, %d)" % (self.r, self.g, self.b) else: return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
python
def to_css(self): ''' Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"`` ''' if self.a == 1.0: return "rgb(%d, %d, %d)" % (self.r, self.g, self.b) else: return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
['def', 'to_css', '(', 'self', ')', ':', 'if', 'self', '.', 'a', '==', '1.0', ':', 'return', '"rgb(%d, %d, %d)"', '%', '(', 'self', '.', 'r', ',', 'self', '.', 'g', ',', 'self', '.', 'b', ')', 'else', ':', 'return', '"rgba(%d, %d, %d, %s)"', '%', '(', 'self', '.', 'r', ',', 'self', '.', 'g', ',', 'self', '.', 'b', ',', 'self', '.', 'a', ')']
Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"``
['Generate', 'the', 'CSS', 'representation', 'of', 'this', 'RGB', 'color', '.']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/colors/rgb.py#L110-L120
5,674
twitterdev/tweet_parser
tweet_parser/tweet_checking.py
get_all_keys
def get_all_keys(tweet, parent_key=''): """ Takes a tweet object and recursively returns a list of all keys contained in this level and all nexstted levels of the tweet. Args: tweet (Tweet): the tweet dict parent_key (str): key from which this process will start, e.g., you can get keys only under some key that is not the top-level key. Returns: list of all keys in nested dicts. Example: >>> import tweet_parser.tweet_checking as tc >>> tweet = {"created_at": 124125125125, "text": "just setting up my twttr", ... "nested_field": {"nested_1": "field", "nested_2": "field2"}} >>> tc.get_all_keys(tweet) ['created_at', 'text', 'nested_field nested_1', 'nested_field nested_2'] """ items = [] for k, v in tweet.items(): new_key = parent_key + " " + k if isinstance(v, dict): items.extend(get_all_keys(v, parent_key=new_key)) else: items.append(new_key.strip(" ")) return items
python
def get_all_keys(tweet, parent_key=''): """ Takes a tweet object and recursively returns a list of all keys contained in this level and all nexstted levels of the tweet. Args: tweet (Tweet): the tweet dict parent_key (str): key from which this process will start, e.g., you can get keys only under some key that is not the top-level key. Returns: list of all keys in nested dicts. Example: >>> import tweet_parser.tweet_checking as tc >>> tweet = {"created_at": 124125125125, "text": "just setting up my twttr", ... "nested_field": {"nested_1": "field", "nested_2": "field2"}} >>> tc.get_all_keys(tweet) ['created_at', 'text', 'nested_field nested_1', 'nested_field nested_2'] """ items = [] for k, v in tweet.items(): new_key = parent_key + " " + k if isinstance(v, dict): items.extend(get_all_keys(v, parent_key=new_key)) else: items.append(new_key.strip(" ")) return items
['def', 'get_all_keys', '(', 'tweet', ',', 'parent_key', '=', "''", ')', ':', 'items', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'tweet', '.', 'items', '(', ')', ':', 'new_key', '=', 'parent_key', '+', '" "', '+', 'k', 'if', 'isinstance', '(', 'v', ',', 'dict', ')', ':', 'items', '.', 'extend', '(', 'get_all_keys', '(', 'v', ',', 'parent_key', '=', 'new_key', ')', ')', 'else', ':', 'items', '.', 'append', '(', 'new_key', '.', 'strip', '(', '" "', ')', ')', 'return', 'items']
Takes a tweet object and recursively returns a list of all keys contained in this level and all nexstted levels of the tweet. Args: tweet (Tweet): the tweet dict parent_key (str): key from which this process will start, e.g., you can get keys only under some key that is not the top-level key. Returns: list of all keys in nested dicts. Example: >>> import tweet_parser.tweet_checking as tc >>> tweet = {"created_at": 124125125125, "text": "just setting up my twttr", ... "nested_field": {"nested_1": "field", "nested_2": "field2"}} >>> tc.get_all_keys(tweet) ['created_at', 'text', 'nested_field nested_1', 'nested_field nested_2']
['Takes', 'a', 'tweet', 'object', 'and', 'recursively', 'returns', 'a', 'list', 'of', 'all', 'keys', 'contained', 'in', 'this', 'level', 'and', 'all', 'nexstted', 'levels', 'of', 'the', 'tweet', '.']
train
https://github.com/twitterdev/tweet_parser/blob/3435de8367d36b483a6cfd8d46cc28694ee8a42e/tweet_parser/tweet_checking.py#L46-L73
5,675
pantsbuild/pants
src/python/pants/option/options.py
Options._check_and_apply_deprecations
def _check_and_apply_deprecations(self, scope, values): """Checks whether a ScopeInfo has options specified in a deprecated scope. There are two related cases here. Either: 1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated scope, meaning that the options temporarily live in two locations. 2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies), meaning that the options live in one location. In the first case, this method has the sideeffect of merging options values from deprecated scopes into the given values. """ si = self.known_scope_to_info[scope] # If this Scope is itself deprecated, report that. if si.removal_version: explicit_keys = self.for_scope(scope, inherit_from_enclosing_scope=False).get_explicit_keys() if explicit_keys: warn_or_error( removal_version=si.removal_version, deprecated_entity_description='scope {}'.format(scope), hint=si.removal_hint, ) # Check if we're the new name of a deprecated scope, and clone values from that scope. # Note that deprecated_scope and scope share the same Optionable class, so deprecated_scope's # Optionable has a deprecated_options_scope equal to deprecated_scope. Therefore we must # check that scope != deprecated_scope to prevent infinite recursion. deprecated_scope = si.deprecated_scope if deprecated_scope is not None and scope != deprecated_scope: # Do the deprecation check only on keys that were explicitly set on the deprecated scope # (and not on its enclosing scopes). explicit_keys = self.for_scope(deprecated_scope, inherit_from_enclosing_scope=False).get_explicit_keys() if explicit_keys: # Update our values with those of the deprecated scope (now including values inherited # from its enclosing scope). # Note that a deprecated val will take precedence over a val of equal rank. # This makes the code a bit neater. values.update(self.for_scope(deprecated_scope)) warn_or_error( removal_version=self.known_scope_to_info[scope].deprecated_scope_removal_version, deprecated_entity_description='scope {}'.format(deprecated_scope), hint='Use scope {} instead (options: {})'.format(scope, ', '.join(explicit_keys)) )
python
def _check_and_apply_deprecations(self, scope, values): """Checks whether a ScopeInfo has options specified in a deprecated scope. There are two related cases here. Either: 1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated scope, meaning that the options temporarily live in two locations. 2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies), meaning that the options live in one location. In the first case, this method has the sideeffect of merging options values from deprecated scopes into the given values. """ si = self.known_scope_to_info[scope] # If this Scope is itself deprecated, report that. if si.removal_version: explicit_keys = self.for_scope(scope, inherit_from_enclosing_scope=False).get_explicit_keys() if explicit_keys: warn_or_error( removal_version=si.removal_version, deprecated_entity_description='scope {}'.format(scope), hint=si.removal_hint, ) # Check if we're the new name of a deprecated scope, and clone values from that scope. # Note that deprecated_scope and scope share the same Optionable class, so deprecated_scope's # Optionable has a deprecated_options_scope equal to deprecated_scope. Therefore we must # check that scope != deprecated_scope to prevent infinite recursion. deprecated_scope = si.deprecated_scope if deprecated_scope is not None and scope != deprecated_scope: # Do the deprecation check only on keys that were explicitly set on the deprecated scope # (and not on its enclosing scopes). explicit_keys = self.for_scope(deprecated_scope, inherit_from_enclosing_scope=False).get_explicit_keys() if explicit_keys: # Update our values with those of the deprecated scope (now including values inherited # from its enclosing scope). # Note that a deprecated val will take precedence over a val of equal rank. # This makes the code a bit neater. values.update(self.for_scope(deprecated_scope)) warn_or_error( removal_version=self.known_scope_to_info[scope].deprecated_scope_removal_version, deprecated_entity_description='scope {}'.format(deprecated_scope), hint='Use scope {} instead (options: {})'.format(scope, ', '.join(explicit_keys)) )
['def', '_check_and_apply_deprecations', '(', 'self', ',', 'scope', ',', 'values', ')', ':', 'si', '=', 'self', '.', 'known_scope_to_info', '[', 'scope', ']', '# If this Scope is itself deprecated, report that.', 'if', 'si', '.', 'removal_version', ':', 'explicit_keys', '=', 'self', '.', 'for_scope', '(', 'scope', ',', 'inherit_from_enclosing_scope', '=', 'False', ')', '.', 'get_explicit_keys', '(', ')', 'if', 'explicit_keys', ':', 'warn_or_error', '(', 'removal_version', '=', 'si', '.', 'removal_version', ',', 'deprecated_entity_description', '=', "'scope {}'", '.', 'format', '(', 'scope', ')', ',', 'hint', '=', 'si', '.', 'removal_hint', ',', ')', "# Check if we're the new name of a deprecated scope, and clone values from that scope.", "# Note that deprecated_scope and scope share the same Optionable class, so deprecated_scope's", '# Optionable has a deprecated_options_scope equal to deprecated_scope. Therefore we must', '# check that scope != deprecated_scope to prevent infinite recursion.', 'deprecated_scope', '=', 'si', '.', 'deprecated_scope', 'if', 'deprecated_scope', 'is', 'not', 'None', 'and', 'scope', '!=', 'deprecated_scope', ':', '# Do the deprecation check only on keys that were explicitly set on the deprecated scope', '# (and not on its enclosing scopes).', 'explicit_keys', '=', 'self', '.', 'for_scope', '(', 'deprecated_scope', ',', 'inherit_from_enclosing_scope', '=', 'False', ')', '.', 'get_explicit_keys', '(', ')', 'if', 'explicit_keys', ':', '# Update our values with those of the deprecated scope (now including values inherited', '# from its enclosing scope).', '# Note that a deprecated val will take precedence over a val of equal rank.', '# This makes the code a bit neater.', 'values', '.', 'update', '(', 'self', '.', 'for_scope', '(', 'deprecated_scope', ')', ')', 'warn_or_error', '(', 'removal_version', '=', 'self', '.', 'known_scope_to_info', '[', 'scope', ']', '.', 'deprecated_scope_removal_version', ',', 'deprecated_entity_description', '=', "'scope {}'", '.', 'format', '(', 'deprecated_scope', ')', ',', 'hint', '=', "'Use scope {} instead (options: {})'", '.', 'format', '(', 'scope', ',', "', '", '.', 'join', '(', 'explicit_keys', ')', ')', ')']
Checks whether a ScopeInfo has options specified in a deprecated scope. There are two related cases here. Either: 1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated scope, meaning that the options temporarily live in two locations. 2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies), meaning that the options live in one location. In the first case, this method has the sideeffect of merging options values from deprecated scopes into the given values.
['Checks', 'whether', 'a', 'ScopeInfo', 'has', 'options', 'specified', 'in', 'a', 'deprecated', 'scope', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/option/options.py#L323-L368
5,676
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/workspace_statistics.py
WorkspaceStatisticsInstance._proxy
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ if self._context is None: self._context = WorkspaceStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], ) return self._context
python
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ if self._context is None: self._context = WorkspaceStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], ) return self._context
['def', '_proxy', '(', 'self', ')', ':', 'if', 'self', '.', '_context', 'is', 'None', ':', 'self', '.', '_context', '=', 'WorkspaceStatisticsContext', '(', 'self', '.', '_version', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', ')', 'return', 'self', '.', '_context']
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
['Generate', 'an', 'instance', 'context', 'for', 'the', 'instance', 'the', 'context', 'is', 'capable', 'of', 'performing', 'various', 'actions', '.', 'All', 'instance', 'actions', 'are', 'proxied', 'to', 'the', 'context']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/workspace_statistics.py#L198-L211
5,677
Robpol86/Flask-Statics-Helper
flask_statics/helpers.py
get_resources
def get_resources(minify=False): """Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values. """ all_resources = dict() subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__() for resource in subclasses: obj = resource(minify) all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js)) return all_resources
python
def get_resources(minify=False): """Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values. """ all_resources = dict() subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__() for resource in subclasses: obj = resource(minify) all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js)) return all_resources
['def', 'get_resources', '(', 'minify', '=', 'False', ')', ':', 'all_resources', '=', 'dict', '(', ')', 'subclasses', '=', 'resource_base', '.', 'ResourceBase', '.', '__subclasses__', '(', ')', '+', 'resource_definitions', '.', 'ResourceAngular', '.', '__subclasses__', '(', ')', 'for', 'resource', 'in', 'subclasses', ':', 'obj', '=', 'resource', '(', 'minify', ')', 'all_resources', '[', 'resource', '.', 'RESOURCE_NAME', ']', '=', 'dict', '(', 'css', '=', 'tuple', '(', 'obj', '.', 'resources_css', ')', ',', 'js', '=', 'tuple', '(', 'obj', '.', 'resources_js', ')', ')', 'return', 'all_resources']
Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values.
['Find', 'all', 'resources', 'which', 'subclass', 'ResourceBase', '.']
train
https://github.com/Robpol86/Flask-Statics-Helper/blob/b1771e65225f62b760b3ef841b710ff23ef6f83c/flask_statics/helpers.py#L23-L38
5,678
tanghaibao/goatools
goatools/grouper/sorter.py
Sorter.get_desc2nts
def get_desc2nts(self, **kws_usr): """Return grouped, sorted namedtuples in either format: flat, sections.""" # desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj) # keys_nts: hdrgo_prt section_prt top_n use_sections kws_nts = {k:v for k, v in kws_usr.items() if k in self.keys_nts} return self.get_desc2nts_fnc(**kws_nts)
python
def get_desc2nts(self, **kws_usr): """Return grouped, sorted namedtuples in either format: flat, sections.""" # desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj) # keys_nts: hdrgo_prt section_prt top_n use_sections kws_nts = {k:v for k, v in kws_usr.items() if k in self.keys_nts} return self.get_desc2nts_fnc(**kws_nts)
['def', 'get_desc2nts', '(', 'self', ',', '*', '*', 'kws_usr', ')', ':', '# desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj)', '# keys_nts: hdrgo_prt section_prt top_n use_sections', 'kws_nts', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'kws_usr', '.', 'items', '(', ')', 'if', 'k', 'in', 'self', '.', 'keys_nts', '}', 'return', 'self', '.', 'get_desc2nts_fnc', '(', '*', '*', 'kws_nts', ')']
Return grouped, sorted namedtuples in either format: flat, sections.
['Return', 'grouped', 'sorted', 'namedtuples', 'in', 'either', 'format', ':', 'flat', 'sections', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/sorter.py#L87-L92
5,679
saltstack/salt
salt/proxy/marathon.py
ping
def ping(): ''' Is the marathon api responding? ''' try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type='plain', decode=True, ) log.debug( 'marathon.info returned successfully: %s', response, ) if 'text' in response and response['text'].strip() == 'pong': return True except Exception as ex: log.error( 'error calling marathon.info with base_url %s: %s', CONFIG[CONFIG_BASE_URL], ex, ) return False
python
def ping(): ''' Is the marathon api responding? ''' try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type='plain', decode=True, ) log.debug( 'marathon.info returned successfully: %s', response, ) if 'text' in response and response['text'].strip() == 'pong': return True except Exception as ex: log.error( 'error calling marathon.info with base_url %s: %s', CONFIG[CONFIG_BASE_URL], ex, ) return False
['def', 'ping', '(', ')', ':', 'try', ':', 'response', '=', 'salt', '.', 'utils', '.', 'http', '.', 'query', '(', '"{0}/ping"', '.', 'format', '(', 'CONFIG', '[', 'CONFIG_BASE_URL', ']', ')', ',', 'decode_type', '=', "'plain'", ',', 'decode', '=', 'True', ',', ')', 'log', '.', 'debug', '(', "'marathon.info returned successfully: %s'", ',', 'response', ',', ')', 'if', "'text'", 'in', 'response', 'and', 'response', '[', "'text'", ']', '.', 'strip', '(', ')', '==', "'pong'", ':', 'return', 'True', 'except', 'Exception', 'as', 'ex', ':', 'log', '.', 'error', '(', "'error calling marathon.info with base_url %s: %s'", ',', 'CONFIG', '[', 'CONFIG_BASE_URL', ']', ',', 'ex', ',', ')', 'return', 'False']
Is the marathon api responding?
['Is', 'the', 'marathon', 'api', 'responding?']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/marathon.py#L54-L76
5,680
coin-or/GiMPy
src/gimpy/graph.py
Graph.minimum_spanning_tree_kruskal
def minimum_spanning_tree_kruskal(self, display = None, components = None): ''' API: minimum_spanning_tree_kruskal(self, display = None, components = None) Description: Determines a minimum spanning tree using Kruskal's Algorithm. Input: display: Display method. component: component number. Post: 'color' attribute of nodes and edges may change. Return: Returns list of edges where edges are tuples in (source,sink) format. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if components is None: components = DisjointSet(display = display, layout = 'dot', optimize = False) sorted_edge_list = sorted(self.get_edge_list(), key=self.get_edge_cost) edges = [] for n in self.get_node_list(): components.add([n]) components.display() for e in sorted_edge_list: if len(edges) == len(self.get_node_list()) - 1: break self.set_edge_attr(e[0], e[1], 'color', 'yellow') self.display() if components.union(e[0], e[1]): self.set_edge_attr(e[0], e[1], 'color', 'green') self.display() edges.append(e) else: self.set_edge_attr(e[0], e[1], 'color', 'black') self.display() components.display() return edges
python
def minimum_spanning_tree_kruskal(self, display = None, components = None): ''' API: minimum_spanning_tree_kruskal(self, display = None, components = None) Description: Determines a minimum spanning tree using Kruskal's Algorithm. Input: display: Display method. component: component number. Post: 'color' attribute of nodes and edges may change. Return: Returns list of edges where edges are tuples in (source,sink) format. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if components is None: components = DisjointSet(display = display, layout = 'dot', optimize = False) sorted_edge_list = sorted(self.get_edge_list(), key=self.get_edge_cost) edges = [] for n in self.get_node_list(): components.add([n]) components.display() for e in sorted_edge_list: if len(edges) == len(self.get_node_list()) - 1: break self.set_edge_attr(e[0], e[1], 'color', 'yellow') self.display() if components.union(e[0], e[1]): self.set_edge_attr(e[0], e[1], 'color', 'green') self.display() edges.append(e) else: self.set_edge_attr(e[0], e[1], 'color', 'black') self.display() components.display() return edges
['def', 'minimum_spanning_tree_kruskal', '(', 'self', ',', 'display', '=', 'None', ',', 'components', '=', 'None', ')', ':', 'if', 'display', '==', 'None', ':', 'display', '=', 'self', '.', 'attr', '[', "'display'", ']', 'else', ':', 'self', '.', 'set_display_mode', '(', 'display', ')', 'if', 'components', 'is', 'None', ':', 'components', '=', 'DisjointSet', '(', 'display', '=', 'display', ',', 'layout', '=', "'dot'", ',', 'optimize', '=', 'False', ')', 'sorted_edge_list', '=', 'sorted', '(', 'self', '.', 'get_edge_list', '(', ')', ',', 'key', '=', 'self', '.', 'get_edge_cost', ')', 'edges', '=', '[', ']', 'for', 'n', 'in', 'self', '.', 'get_node_list', '(', ')', ':', 'components', '.', 'add', '(', '[', 'n', ']', ')', 'components', '.', 'display', '(', ')', 'for', 'e', 'in', 'sorted_edge_list', ':', 'if', 'len', '(', 'edges', ')', '==', 'len', '(', 'self', '.', 'get_node_list', '(', ')', ')', '-', '1', ':', 'break', 'self', '.', 'set_edge_attr', '(', 'e', '[', '0', ']', ',', 'e', '[', '1', ']', ',', "'color'", ',', "'yellow'", ')', 'self', '.', 'display', '(', ')', 'if', 'components', '.', 'union', '(', 'e', '[', '0', ']', ',', 'e', '[', '1', ']', ')', ':', 'self', '.', 'set_edge_attr', '(', 'e', '[', '0', ']', ',', 'e', '[', '1', ']', ',', "'color'", ',', "'green'", ')', 'self', '.', 'display', '(', ')', 'edges', '.', 'append', '(', 'e', ')', 'else', ':', 'self', '.', 'set_edge_attr', '(', 'e', '[', '0', ']', ',', 'e', '[', '1', ']', ',', "'color'", ',', "'black'", ')', 'self', '.', 'display', '(', ')', 'components', '.', 'display', '(', ')', 'return', 'edges']
API: minimum_spanning_tree_kruskal(self, display = None, components = None) Description: Determines a minimum spanning tree using Kruskal's Algorithm. Input: display: Display method. component: component number. Post: 'color' attribute of nodes and edges may change. Return: Returns list of edges where edges are tuples in (source,sink) format.
['API', ':', 'minimum_spanning_tree_kruskal', '(', 'self', 'display', '=', 'None', 'components', '=', 'None', ')', 'Description', ':', 'Determines', 'a', 'minimum', 'spanning', 'tree', 'using', 'Kruskal', 's', 'Algorithm', '.', 'Input', ':', 'display', ':', 'Display', 'method', '.', 'component', ':', 'component', 'number', '.', 'Post', ':', 'color', 'attribute', 'of', 'nodes', 'and', 'edges', 'may', 'change', '.', 'Return', ':', 'Returns', 'list', 'of', 'edges', 'where', 'edges', 'are', 'tuples', 'in', '(', 'source', 'sink', ')', 'format', '.']
train
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L1217-L1257
5,681
hobson/pug-invest
pug/invest/util.py
clean_dataframes
def clean_dataframes(dfs): """Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values """ if isinstance(dfs, (list)): for df in dfs: df = clean_dataframe(df) return dfs else: return [clean_dataframe(dfs)]
python
def clean_dataframes(dfs): """Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values """ if isinstance(dfs, (list)): for df in dfs: df = clean_dataframe(df) return dfs else: return [clean_dataframe(dfs)]
['def', 'clean_dataframes', '(', 'dfs', ')', ':', 'if', 'isinstance', '(', 'dfs', ',', '(', 'list', ')', ')', ':', 'for', 'df', 'in', 'dfs', ':', 'df', '=', 'clean_dataframe', '(', 'df', ')', 'return', 'dfs', 'else', ':', 'return', '[', 'clean_dataframe', '(', 'dfs', ')', ']']
Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values
['Fill', 'NaNs', 'with', 'the', 'previous', 'value', 'the', 'next', 'value', 'or', 'if', 'all', 'are', 'NaN', 'then', '1', '.', '0']
train
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/util.py#L113-L130
5,682
mrstephenneal/mysql-toolkit
mysql/toolkit/components/connector.py
Connector._fetch
def _fetch(self, statement, commit, max_attempts=5): """ Execute a SQL query and return a result. Recursively disconnect and reconnect to the database if an error occurs. """ if self._auto_reconnect: attempts = 0 while attempts < max_attempts: try: # Execute statement self._cursor.execute(statement) fetch = self._cursor.fetchall() rows = self._fetch_rows(fetch) if commit: self._commit() # Return a single item if the list only has one item return rows[0] if len(rows) == 1 else rows except Exception as e: if attempts >= max_attempts: raise e else: attempts += 1 self.reconnect() continue else: # Execute statement self._cursor.execute(statement) fetch = self._cursor.fetchall() rows = self._fetch_rows(fetch) if commit: self._commit() # Return a single item if the list only has one item return rows[0] if len(rows) == 1 else rows
python
def _fetch(self, statement, commit, max_attempts=5): """ Execute a SQL query and return a result. Recursively disconnect and reconnect to the database if an error occurs. """ if self._auto_reconnect: attempts = 0 while attempts < max_attempts: try: # Execute statement self._cursor.execute(statement) fetch = self._cursor.fetchall() rows = self._fetch_rows(fetch) if commit: self._commit() # Return a single item if the list only has one item return rows[0] if len(rows) == 1 else rows except Exception as e: if attempts >= max_attempts: raise e else: attempts += 1 self.reconnect() continue else: # Execute statement self._cursor.execute(statement) fetch = self._cursor.fetchall() rows = self._fetch_rows(fetch) if commit: self._commit() # Return a single item if the list only has one item return rows[0] if len(rows) == 1 else rows
['def', '_fetch', '(', 'self', ',', 'statement', ',', 'commit', ',', 'max_attempts', '=', '5', ')', ':', 'if', 'self', '.', '_auto_reconnect', ':', 'attempts', '=', '0', 'while', 'attempts', '<', 'max_attempts', ':', 'try', ':', '# Execute statement', 'self', '.', '_cursor', '.', 'execute', '(', 'statement', ')', 'fetch', '=', 'self', '.', '_cursor', '.', 'fetchall', '(', ')', 'rows', '=', 'self', '.', '_fetch_rows', '(', 'fetch', ')', 'if', 'commit', ':', 'self', '.', '_commit', '(', ')', '# Return a single item if the list only has one item', 'return', 'rows', '[', '0', ']', 'if', 'len', '(', 'rows', ')', '==', '1', 'else', 'rows', 'except', 'Exception', 'as', 'e', ':', 'if', 'attempts', '>=', 'max_attempts', ':', 'raise', 'e', 'else', ':', 'attempts', '+=', '1', 'self', '.', 'reconnect', '(', ')', 'continue', 'else', ':', '# Execute statement', 'self', '.', '_cursor', '.', 'execute', '(', 'statement', ')', 'fetch', '=', 'self', '.', '_cursor', '.', 'fetchall', '(', ')', 'rows', '=', 'self', '.', '_fetch_rows', '(', 'fetch', ')', 'if', 'commit', ':', 'self', '.', '_commit', '(', ')', '# Return a single item if the list only has one item', 'return', 'rows', '[', '0', ']', 'if', 'len', '(', 'rows', ')', '==', '1', 'else', 'rows']
Execute a SQL query and return a result. Recursively disconnect and reconnect to the database if an error occurs.
['Execute', 'a', 'SQL', 'query', 'and', 'return', 'a', 'result', '.']
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/connector.py#L113-L149
5,683
candango/firenado
firenado/config.py
process_config
def process_config(config, config_data): """ Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file. """ if 'components' in config_data: process_components_config_section(config, config_data['components']) if 'data' in config_data: process_data_config_section(config, config_data['data']) if 'log' in config_data: process_log_config_section(config, config_data['log']) if 'management' in config_data: process_management_config_section(config, config_data['management']) if 'session' in config_data: process_session_config_section(config, config_data['session'])
python
def process_config(config, config_data): """ Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file. """ if 'components' in config_data: process_components_config_section(config, config_data['components']) if 'data' in config_data: process_data_config_section(config, config_data['data']) if 'log' in config_data: process_log_config_section(config, config_data['log']) if 'management' in config_data: process_management_config_section(config, config_data['management']) if 'session' in config_data: process_session_config_section(config, config_data['session'])
['def', 'process_config', '(', 'config', ',', 'config_data', ')', ':', 'if', "'components'", 'in', 'config_data', ':', 'process_components_config_section', '(', 'config', ',', 'config_data', '[', "'components'", ']', ')', 'if', "'data'", 'in', 'config_data', ':', 'process_data_config_section', '(', 'config', ',', 'config_data', '[', "'data'", ']', ')', 'if', "'log'", 'in', 'config_data', ':', 'process_log_config_section', '(', 'config', ',', 'config_data', '[', "'log'", ']', ')', 'if', "'management'", 'in', 'config_data', ':', 'process_management_config_section', '(', 'config', ',', 'config_data', '[', "'management'", ']', ')', 'if', "'session'", 'in', 'config_data', ':', 'process_session_config_section', '(', 'config', ',', 'config_data', '[', "'session'", ']', ')']
Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file.
['Populates', 'config', 'with', 'data', 'from', 'the', 'configuration', 'data', 'dict', '.', 'It', 'handles', 'components', 'data', 'log', 'management', 'and', 'session', 'sections', 'from', 'the', 'configuration', 'data', '.']
train
https://github.com/candango/firenado/blob/4b1f628e485b521e161d64169c46a9818f26949f/firenado/config.py#L124-L143
5,684
fishtown-analytics/dbt
core/dbt/clients/system.py
make_symlink
def make_symlink(source, link_path): """ Create a symlink at `link_path` referring to `source`. """ if not supports_symlinks(): dbt.exceptions.system_error('create a symbolic link') return os.symlink(source, link_path)
python
def make_symlink(source, link_path): """ Create a symlink at `link_path` referring to `source`. """ if not supports_symlinks(): dbt.exceptions.system_error('create a symbolic link') return os.symlink(source, link_path)
['def', 'make_symlink', '(', 'source', ',', 'link_path', ')', ':', 'if', 'not', 'supports_symlinks', '(', ')', ':', 'dbt', '.', 'exceptions', '.', 'system_error', '(', "'create a symbolic link'", ')', 'return', 'os', '.', 'symlink', '(', 'source', ',', 'link_path', ')']
Create a symlink at `link_path` referring to `source`.
['Create', 'a', 'symlink', 'at', 'link_path', 'referring', 'to', 'source', '.']
train
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/clients/system.py#L102-L109
5,685
dcos/shakedown
shakedown/dcos/package.py
uninstall_package_and_data
def uninstall_package_and_data( package_name, service_name=None, role=None, principal=None, zk_node=None, timeout_sec=600): """ Uninstall a package via the DC/OS library, wait for completion, and delete any persistent data :param package_name: name of the package :type package_name: str :param service_name: unique service name for the package :type service_name: str :param role: role to use when deleting data, or <service_name>-role if unset :type role: str, or None :param principal: principal to use when deleting data, or <service_name>-principal if unset :type principal: str, or None :param zk_node: zk node to delete, or dcos-service-<service_name> if unset :type zk_node: str, or None :param wait_for_completion: whether or not to wait for task completion before returning :type wait_for_completion: bool :param timeout_sec: number of seconds to wait for task completion :type timeout_sec: int """ start = time.time() if service_name is None: pkg = _get_package_manager().get_package_version(package_name, None) service_name = _get_service_name(package_name, pkg) print('\n{}uninstalling/deleting {}'.format(shakedown.cli.helpers.fchr('>>'), service_name)) try: uninstall_package_and_wait(package_name, service_name=service_name, timeout_sec=timeout_sec) except (errors.DCOSException, ValueError) as e: print('Got exception when uninstalling package, ' + 'continuing with janitor anyway: {}'.format(e)) data_start = time.time() if (not role or not principal or not zk_node) and service_name is None: raise DCOSException('service_name must be provided when data params are missing AND the package isn\'t installed') if not role: role = '{}-role'.format(service_name) if not zk_node: zk_node = 'dcos-service-{}'.format(service_name) delete_persistent_data(role, zk_node) finish = time.time() print('\n{}uninstall/delete done after pkg({}) + data({}) = total({})\n'.format( shakedown.cli.helpers.fchr('>>'), pretty_duration(data_start - start), pretty_duration(finish - data_start), pretty_duration(finish - start)))
python
def uninstall_package_and_data( package_name, service_name=None, role=None, principal=None, zk_node=None, timeout_sec=600): """ Uninstall a package via the DC/OS library, wait for completion, and delete any persistent data :param package_name: name of the package :type package_name: str :param service_name: unique service name for the package :type service_name: str :param role: role to use when deleting data, or <service_name>-role if unset :type role: str, or None :param principal: principal to use when deleting data, or <service_name>-principal if unset :type principal: str, or None :param zk_node: zk node to delete, or dcos-service-<service_name> if unset :type zk_node: str, or None :param wait_for_completion: whether or not to wait for task completion before returning :type wait_for_completion: bool :param timeout_sec: number of seconds to wait for task completion :type timeout_sec: int """ start = time.time() if service_name is None: pkg = _get_package_manager().get_package_version(package_name, None) service_name = _get_service_name(package_name, pkg) print('\n{}uninstalling/deleting {}'.format(shakedown.cli.helpers.fchr('>>'), service_name)) try: uninstall_package_and_wait(package_name, service_name=service_name, timeout_sec=timeout_sec) except (errors.DCOSException, ValueError) as e: print('Got exception when uninstalling package, ' + 'continuing with janitor anyway: {}'.format(e)) data_start = time.time() if (not role or not principal or not zk_node) and service_name is None: raise DCOSException('service_name must be provided when data params are missing AND the package isn\'t installed') if not role: role = '{}-role'.format(service_name) if not zk_node: zk_node = 'dcos-service-{}'.format(service_name) delete_persistent_data(role, zk_node) finish = time.time() print('\n{}uninstall/delete done after pkg({}) + data({}) = total({})\n'.format( shakedown.cli.helpers.fchr('>>'), pretty_duration(data_start - start), pretty_duration(finish - data_start), pretty_duration(finish - start)))
['def', 'uninstall_package_and_data', '(', 'package_name', ',', 'service_name', '=', 'None', ',', 'role', '=', 'None', ',', 'principal', '=', 'None', ',', 'zk_node', '=', 'None', ',', 'timeout_sec', '=', '600', ')', ':', 'start', '=', 'time', '.', 'time', '(', ')', 'if', 'service_name', 'is', 'None', ':', 'pkg', '=', '_get_package_manager', '(', ')', '.', 'get_package_version', '(', 'package_name', ',', 'None', ')', 'service_name', '=', '_get_service_name', '(', 'package_name', ',', 'pkg', ')', 'print', '(', "'\\n{}uninstalling/deleting {}'", '.', 'format', '(', 'shakedown', '.', 'cli', '.', 'helpers', '.', 'fchr', '(', "'>>'", ')', ',', 'service_name', ')', ')', 'try', ':', 'uninstall_package_and_wait', '(', 'package_name', ',', 'service_name', '=', 'service_name', ',', 'timeout_sec', '=', 'timeout_sec', ')', 'except', '(', 'errors', '.', 'DCOSException', ',', 'ValueError', ')', 'as', 'e', ':', 'print', '(', "'Got exception when uninstalling package, '", '+', "'continuing with janitor anyway: {}'", '.', 'format', '(', 'e', ')', ')', 'data_start', '=', 'time', '.', 'time', '(', ')', 'if', '(', 'not', 'role', 'or', 'not', 'principal', 'or', 'not', 'zk_node', ')', 'and', 'service_name', 'is', 'None', ':', 'raise', 'DCOSException', '(', "'service_name must be provided when data params are missing AND the package isn\\'t installed'", ')', 'if', 'not', 'role', ':', 'role', '=', "'{}-role'", '.', 'format', '(', 'service_name', ')', 'if', 'not', 'zk_node', ':', 'zk_node', '=', "'dcos-service-{}'", '.', 'format', '(', 'service_name', ')', 'delete_persistent_data', '(', 'role', ',', 'zk_node', ')', 'finish', '=', 'time', '.', 'time', '(', ')', 'print', '(', "'\\n{}uninstall/delete done after pkg({}) + data({}) = total({})\\n'", '.', 'format', '(', 'shakedown', '.', 'cli', '.', 'helpers', '.', 'fchr', '(', "'>>'", ')', ',', 'pretty_duration', '(', 'data_start', '-', 'start', ')', ',', 'pretty_duration', '(', 'finish', '-', 'data_start', ')', ',', 'pretty_duration', '(', 'finish', '-', 'start', ')', ')', ')']
Uninstall a package via the DC/OS library, wait for completion, and delete any persistent data :param package_name: name of the package :type package_name: str :param service_name: unique service name for the package :type service_name: str :param role: role to use when deleting data, or <service_name>-role if unset :type role: str, or None :param principal: principal to use when deleting data, or <service_name>-principal if unset :type principal: str, or None :param zk_node: zk node to delete, or dcos-service-<service_name> if unset :type zk_node: str, or None :param wait_for_completion: whether or not to wait for task completion before returning :type wait_for_completion: bool :param timeout_sec: number of seconds to wait for task completion :type timeout_sec: int
['Uninstall', 'a', 'package', 'via', 'the', 'DC', '/', 'OS', 'library', 'wait', 'for', 'completion', 'and', 'delete', 'any', 'persistent', 'data']
train
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/package.py#L282-L335
5,686
IndicoDataSolutions/IndicoIo-python
indicoio/utils/preprocessing.py
get_element_type
def get_element_type(_list, dimens): """ Given the dimensions of a nested list and the list, returns the type of the elements in the inner list. """ elem = _list for _ in range(len(dimens)): elem = elem[0] return type(elem)
python
def get_element_type(_list, dimens): """ Given the dimensions of a nested list and the list, returns the type of the elements in the inner list. """ elem = _list for _ in range(len(dimens)): elem = elem[0] return type(elem)
['def', 'get_element_type', '(', '_list', ',', 'dimens', ')', ':', 'elem', '=', '_list', 'for', '_', 'in', 'range', '(', 'len', '(', 'dimens', ')', ')', ':', 'elem', '=', 'elem', '[', '0', ']', 'return', 'type', '(', 'elem', ')']
Given the dimensions of a nested list and the list, returns the type of the elements in the inner list.
['Given', 'the', 'dimensions', 'of', 'a', 'nested', 'list', 'and', 'the', 'list', 'returns', 'the', 'type', 'of', 'the', 'elements', 'in', 'the', 'inner', 'list', '.']
train
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/utils/preprocessing.py#L109-L117
5,687
gregreen/dustmaps
dustmaps/bayestar.py
BayestarQuery.query
def query(self, coords, mode='random_sample', return_flags=False, pct=None): """ Returns reddening at the requested coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Seven different query modes are available: 'random_sample', 'random_sample_per_pix' 'samples', 'median', 'mean', 'best' and 'percentile'. The :obj:`mode` determines how the output will reflect the probabilistic nature of the Bayestar dust maps. return_flags (Optional[:obj:`bool`]): If :obj:`True`, then QA flags will be returned in a second numpy structured array. That is, the query will return :obj:`ret`, :obj:'flags`, where :obj:`ret` is the normal return value, containing reddening. Defaults to :obj:`False`. pct (Optional[:obj:`float` or list/array of :obj:`float`]): If the mode is :obj:`percentile`, then :obj:`pct` specifies which percentile(s) is (are) returned. Returns: Reddening at the specified coordinates, in magnitudes of reddening. The conversion to E(B-V) (or other reddening units) depends on whether :obj:`version='bayestar2017'` (the default) or :obj:`'bayestar2015'` was selected when the :obj:`BayestarQuery` object was created. To convert Bayestar2017 to Pan-STARRS 1 extinctions, multiply by the coefficients given in Table 1 of Green et al. (2018). Conversion to extinction in non-PS1 passbands depends on the choice of extinction law. To convert Bayestar2015 to extinction in various passbands, multiply by the coefficients in Table 6 of Schlafly & Finkbeiner (2011). See Green et al. (2015, 2018) for more detailed discussion of how to convert the Bayestar dust maps into reddenings or extinctions in different passbands. The shape of the output depends on the :obj:`mode`, and on whether :obj:`coords` contains distances. If :obj:`coords` does not specify distance(s), then the shape of the output begins with :obj:`coords.shape`. If :obj:`coords` does specify distance(s), then the shape of the output begins with :obj:`coords.shape + ([number of distance bins],)`. If :obj:`mode` is :obj:`'random_sample'`, then at each coordinate/distance, a random sample of reddening is given. If :obj:`mode` is :obj:`'random_sample_per_pix'`, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If :obj:`mode` is :obj:`'median'`, then at each coordinate/distance, the median reddening is returned. If :obj:`mode` is :obj:`'mean'`, then at each coordinate/distance, the mean reddening is returned. If :obj:`mode` is :obj:`'best'`, then at each coordinate/distance, the maximum posterior density reddening is returned (the "best fit"). If :obj:`mode` is :obj:`'percentile'`, then an additional keyword argument, :obj:`pct`, must be specified. At each coordinate/distance, the requested percentiles (in :obj:`pct`) will be returned. If :obj:`pct` is a list/array, then the last axis of the output will correspond to different percentiles. Finally, if :obj:`mode` is :obj:`'samples'`, then at each coordinate/distance, all samples are returned. The last axis of the output will correspond to different samples. If :obj:`return_flags` is :obj:`True`, then in addition to reddening, a structured array containing QA flags will be returned. If the input coordinates include distances, the QA flags will be :obj:`"converged"` (whether or not the line-of-sight fit converged in a given pixel) and :obj:`"reliable_dist"` (whether or not the requested distance is within the range considered reliable, based on the inferred stellar distances). If the input coordinates do not include distances, then instead of :obj:`"reliable_dist"`, the flags will include :obj:`"min_reliable_distmod"` and :obj:`"max_reliable_distmod"`, the minimum and maximum reliable distance moduli in the given pixel. """ # Check that the query mode is supported self._raise_on_mode(mode) # Validate percentile specification pct, scalar_pct = self._interpret_percentile(mode, pct) # Get number of coordinates requested n_coords_ret = coords.shape[0] # Determine if distance has been requested has_dist = hasattr(coords.distance, 'kpc') d = coords.distance.kpc if has_dist else None # Extract the correct angular pixel(s) # t0 = time.time() pix_idx = self._find_data_idx(coords.l.deg, coords.b.deg) in_bounds_idx = (pix_idx != -1) # t1 = time.time() # Extract the correct samples if mode == 'random_sample': # A different sample in each queried coordinate samp_idx = np.random.randint(0, self._n_samples, pix_idx.size) n_samp_ret = 1 elif mode == 'random_sample_per_pix': # Choose same sample in all coordinates that fall in same angular # HEALPix pixel samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx] n_samp_ret = 1 elif mode == 'best': samp_idx = slice(None) n_samp_ret = 1 else: # Return all samples in each queried coordinate samp_idx = slice(None) n_samp_ret = self._n_samples # t2 = time.time() if mode == 'best': val = self._best_fit else: val = self._samples # Create empty array to store flags if return_flags: if has_dist: # If distances are provided in query, return only covergence and # whether or not this distance is reliable dtype = [('converged', 'bool'), ('reliable_dist', 'bool')] # shape = (n_coords_ret) else: # Return convergence and reliable distance ranges dtype = [('converged', 'bool'), ('min_reliable_distmod', 'f4'), ('max_reliable_distmod', 'f4')] flags = np.empty(n_coords_ret, dtype=dtype) # samples = self._samples[pix_idx, samp_idx] # samples[pix_idx == -1] = np.nan # t3 = time.time() # Extract the correct distance bin (possibly using linear interpolation) if has_dist: # Distance has been provided # Determine ceiling bin index for each coordinate dm = 5. * (np.log10(d) + 2.) bin_idx_ceil = np.searchsorted(self._DM_bin_edges, dm) # Create NaN-filled return arrays if isinstance(samp_idx, slice): ret = np.full((n_coords_ret, n_samp_ret), np.nan, dtype='f4') else: ret = np.full((n_coords_ret,), np.nan, dtype='f4') # d < d(nearest distance slice) idx_near = (bin_idx_ceil == 0) & in_bounds_idx if np.any(idx_near): a = 10.**(0.2 * (dm[idx_near] - self._DM_bin_edges[0])) if isinstance(samp_idx, slice): ret[idx_near] = ( a[:,None] * val[pix_idx[idx_near], samp_idx, 0]) else: # print('idx_near: {} true'.format(np.sum(idx_near))) # print('ret[idx_near].shape = {}'.format(ret[idx_near].shape)) # print('val.shape = {}'.format(val.shape)) # print('pix_idx[idx_near].shape = {}'.format(pix_idx[idx_near].shape)) ret[idx_near] = ( a * val[pix_idx[idx_near], samp_idx[idx_near], 0]) # d > d(farthest distance slice) idx_far = (bin_idx_ceil == self._n_distances) & in_bounds_idx if np.any(idx_far): # print('idx_far: {} true'.format(np.sum(idx_far))) # print('pix_idx[idx_far].shape = {}'.format(pix_idx[idx_far].shape)) # print('ret[idx_far].shape = {}'.format(ret[idx_far].shape)) # print('val.shape = {}'.format(val.shape)) if isinstance(samp_idx, slice): ret[idx_far] = val[pix_idx[idx_far], samp_idx, -1] else: ret[idx_far] = val[pix_idx[idx_far], samp_idx[idx_far], -1] # d(nearest distance slice) < d < d(farthest distance slice) idx_btw = ~idx_near & ~idx_far & in_bounds_idx if np.any(idx_btw): DM_ceil = self._DM_bin_edges[bin_idx_ceil[idx_btw]] DM_floor = self._DM_bin_edges[bin_idx_ceil[idx_btw]-1] a = (DM_ceil - dm[idx_btw]) / (DM_ceil - DM_floor) if isinstance(samp_idx, slice): ret[idx_btw] = ( (1.-a[:,None]) * val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]] + a[:,None] * val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]-1] ) else: ret[idx_btw] = ( (1.-a) * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]] + a * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]-1] ) # Flag: distance in reliable range? if return_flags: dm_min = self._pixel_info['DM_reliable_min'][pix_idx] dm_max = self._pixel_info['DM_reliable_max'][pix_idx] flags['reliable_dist'] = ( (dm >= dm_min) & (dm <= dm_max) & np.isfinite(dm_min) & np.isfinite(dm_max)) flags['reliable_dist'][~in_bounds_idx] = False else: # No distances provided ret = val[pix_idx, samp_idx, :] # Return all distances ret[~in_bounds_idx] = np.nan # Flag: reliable distance bounds if return_flags: dm_min = self._pixel_info['DM_reliable_min'][pix_idx] dm_max = self._pixel_info['DM_reliable_max'][pix_idx] flags['min_reliable_distmod'] = dm_min flags['max_reliable_distmod'] = dm_max flags['min_reliable_distmod'][~in_bounds_idx] = np.nan flags['max_reliable_distmod'][~in_bounds_idx] = np.nan # t4 = time.time() # Flag: convergence if return_flags: flags['converged'] = ( self._pixel_info['converged'][pix_idx].astype(np.bool)) flags['converged'][~in_bounds_idx] = False # t5 = time.time() # Reduce the samples in the requested manner if mode == 'median': ret = np.median(ret, axis=1) elif mode == 'mean': ret = np.mean(ret, axis=1) elif mode == 'percentile': ret = np.nanpercentile(ret, pct, axis=1) if not scalar_pct: # (percentile, pixel) -> (pixel, percentile) # (pctile, pixel, distance) -> (pixel, distance, pctile) ret = np.moveaxis(ret, 0, -1) elif mode == 'best': # Remove "samples" axis s = ret.shape ret.shape = s[:1] + s[2:] elif mode == 'samples': # Swap sample and distance axes to be consistent with other 3D dust # maps. The output shape will be (pixel, distance, sample). if not has_dist: np.swapaxes(ret, 1, 2) # t6 = time.time() # # print('') # print('time inside bayestar.query: {:.4f} s'.format(t6-t0)) # print('{: >7.4f} s : {: >6.4f} s : _find_data_idx'.format(t1-t0, t1-t0)) # print('{: >7.4f} s : {: >6.4f} s : sample slice spec'.format(t2-t0, t2-t1)) # print('{: >7.4f} s : {: >6.4f} s : create empty return flag array'.format(t3-t0, t3-t2)) # print('{: >7.4f} s : {: >6.4f} s : extract results'.format(t4-t0, t4-t3)) # print('{: >7.4f} s : {: >6.4f} s : convergence flag'.format(t5-t0, t5-t4)) # print('{: >7.4f} s : {: >6.4f} s : reduce'.format(t6-t0, t6-t5)) # print('') if return_flags: return ret, flags return ret
python
def query(self, coords, mode='random_sample', return_flags=False, pct=None): """ Returns reddening at the requested coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Seven different query modes are available: 'random_sample', 'random_sample_per_pix' 'samples', 'median', 'mean', 'best' and 'percentile'. The :obj:`mode` determines how the output will reflect the probabilistic nature of the Bayestar dust maps. return_flags (Optional[:obj:`bool`]): If :obj:`True`, then QA flags will be returned in a second numpy structured array. That is, the query will return :obj:`ret`, :obj:'flags`, where :obj:`ret` is the normal return value, containing reddening. Defaults to :obj:`False`. pct (Optional[:obj:`float` or list/array of :obj:`float`]): If the mode is :obj:`percentile`, then :obj:`pct` specifies which percentile(s) is (are) returned. Returns: Reddening at the specified coordinates, in magnitudes of reddening. The conversion to E(B-V) (or other reddening units) depends on whether :obj:`version='bayestar2017'` (the default) or :obj:`'bayestar2015'` was selected when the :obj:`BayestarQuery` object was created. To convert Bayestar2017 to Pan-STARRS 1 extinctions, multiply by the coefficients given in Table 1 of Green et al. (2018). Conversion to extinction in non-PS1 passbands depends on the choice of extinction law. To convert Bayestar2015 to extinction in various passbands, multiply by the coefficients in Table 6 of Schlafly & Finkbeiner (2011). See Green et al. (2015, 2018) for more detailed discussion of how to convert the Bayestar dust maps into reddenings or extinctions in different passbands. The shape of the output depends on the :obj:`mode`, and on whether :obj:`coords` contains distances. If :obj:`coords` does not specify distance(s), then the shape of the output begins with :obj:`coords.shape`. If :obj:`coords` does specify distance(s), then the shape of the output begins with :obj:`coords.shape + ([number of distance bins],)`. If :obj:`mode` is :obj:`'random_sample'`, then at each coordinate/distance, a random sample of reddening is given. If :obj:`mode` is :obj:`'random_sample_per_pix'`, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If :obj:`mode` is :obj:`'median'`, then at each coordinate/distance, the median reddening is returned. If :obj:`mode` is :obj:`'mean'`, then at each coordinate/distance, the mean reddening is returned. If :obj:`mode` is :obj:`'best'`, then at each coordinate/distance, the maximum posterior density reddening is returned (the "best fit"). If :obj:`mode` is :obj:`'percentile'`, then an additional keyword argument, :obj:`pct`, must be specified. At each coordinate/distance, the requested percentiles (in :obj:`pct`) will be returned. If :obj:`pct` is a list/array, then the last axis of the output will correspond to different percentiles. Finally, if :obj:`mode` is :obj:`'samples'`, then at each coordinate/distance, all samples are returned. The last axis of the output will correspond to different samples. If :obj:`return_flags` is :obj:`True`, then in addition to reddening, a structured array containing QA flags will be returned. If the input coordinates include distances, the QA flags will be :obj:`"converged"` (whether or not the line-of-sight fit converged in a given pixel) and :obj:`"reliable_dist"` (whether or not the requested distance is within the range considered reliable, based on the inferred stellar distances). If the input coordinates do not include distances, then instead of :obj:`"reliable_dist"`, the flags will include :obj:`"min_reliable_distmod"` and :obj:`"max_reliable_distmod"`, the minimum and maximum reliable distance moduli in the given pixel. """ # Check that the query mode is supported self._raise_on_mode(mode) # Validate percentile specification pct, scalar_pct = self._interpret_percentile(mode, pct) # Get number of coordinates requested n_coords_ret = coords.shape[0] # Determine if distance has been requested has_dist = hasattr(coords.distance, 'kpc') d = coords.distance.kpc if has_dist else None # Extract the correct angular pixel(s) # t0 = time.time() pix_idx = self._find_data_idx(coords.l.deg, coords.b.deg) in_bounds_idx = (pix_idx != -1) # t1 = time.time() # Extract the correct samples if mode == 'random_sample': # A different sample in each queried coordinate samp_idx = np.random.randint(0, self._n_samples, pix_idx.size) n_samp_ret = 1 elif mode == 'random_sample_per_pix': # Choose same sample in all coordinates that fall in same angular # HEALPix pixel samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx] n_samp_ret = 1 elif mode == 'best': samp_idx = slice(None) n_samp_ret = 1 else: # Return all samples in each queried coordinate samp_idx = slice(None) n_samp_ret = self._n_samples # t2 = time.time() if mode == 'best': val = self._best_fit else: val = self._samples # Create empty array to store flags if return_flags: if has_dist: # If distances are provided in query, return only covergence and # whether or not this distance is reliable dtype = [('converged', 'bool'), ('reliable_dist', 'bool')] # shape = (n_coords_ret) else: # Return convergence and reliable distance ranges dtype = [('converged', 'bool'), ('min_reliable_distmod', 'f4'), ('max_reliable_distmod', 'f4')] flags = np.empty(n_coords_ret, dtype=dtype) # samples = self._samples[pix_idx, samp_idx] # samples[pix_idx == -1] = np.nan # t3 = time.time() # Extract the correct distance bin (possibly using linear interpolation) if has_dist: # Distance has been provided # Determine ceiling bin index for each coordinate dm = 5. * (np.log10(d) + 2.) bin_idx_ceil = np.searchsorted(self._DM_bin_edges, dm) # Create NaN-filled return arrays if isinstance(samp_idx, slice): ret = np.full((n_coords_ret, n_samp_ret), np.nan, dtype='f4') else: ret = np.full((n_coords_ret,), np.nan, dtype='f4') # d < d(nearest distance slice) idx_near = (bin_idx_ceil == 0) & in_bounds_idx if np.any(idx_near): a = 10.**(0.2 * (dm[idx_near] - self._DM_bin_edges[0])) if isinstance(samp_idx, slice): ret[idx_near] = ( a[:,None] * val[pix_idx[idx_near], samp_idx, 0]) else: # print('idx_near: {} true'.format(np.sum(idx_near))) # print('ret[idx_near].shape = {}'.format(ret[idx_near].shape)) # print('val.shape = {}'.format(val.shape)) # print('pix_idx[idx_near].shape = {}'.format(pix_idx[idx_near].shape)) ret[idx_near] = ( a * val[pix_idx[idx_near], samp_idx[idx_near], 0]) # d > d(farthest distance slice) idx_far = (bin_idx_ceil == self._n_distances) & in_bounds_idx if np.any(idx_far): # print('idx_far: {} true'.format(np.sum(idx_far))) # print('pix_idx[idx_far].shape = {}'.format(pix_idx[idx_far].shape)) # print('ret[idx_far].shape = {}'.format(ret[idx_far].shape)) # print('val.shape = {}'.format(val.shape)) if isinstance(samp_idx, slice): ret[idx_far] = val[pix_idx[idx_far], samp_idx, -1] else: ret[idx_far] = val[pix_idx[idx_far], samp_idx[idx_far], -1] # d(nearest distance slice) < d < d(farthest distance slice) idx_btw = ~idx_near & ~idx_far & in_bounds_idx if np.any(idx_btw): DM_ceil = self._DM_bin_edges[bin_idx_ceil[idx_btw]] DM_floor = self._DM_bin_edges[bin_idx_ceil[idx_btw]-1] a = (DM_ceil - dm[idx_btw]) / (DM_ceil - DM_floor) if isinstance(samp_idx, slice): ret[idx_btw] = ( (1.-a[:,None]) * val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]] + a[:,None] * val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]-1] ) else: ret[idx_btw] = ( (1.-a) * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]] + a * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]-1] ) # Flag: distance in reliable range? if return_flags: dm_min = self._pixel_info['DM_reliable_min'][pix_idx] dm_max = self._pixel_info['DM_reliable_max'][pix_idx] flags['reliable_dist'] = ( (dm >= dm_min) & (dm <= dm_max) & np.isfinite(dm_min) & np.isfinite(dm_max)) flags['reliable_dist'][~in_bounds_idx] = False else: # No distances provided ret = val[pix_idx, samp_idx, :] # Return all distances ret[~in_bounds_idx] = np.nan # Flag: reliable distance bounds if return_flags: dm_min = self._pixel_info['DM_reliable_min'][pix_idx] dm_max = self._pixel_info['DM_reliable_max'][pix_idx] flags['min_reliable_distmod'] = dm_min flags['max_reliable_distmod'] = dm_max flags['min_reliable_distmod'][~in_bounds_idx] = np.nan flags['max_reliable_distmod'][~in_bounds_idx] = np.nan # t4 = time.time() # Flag: convergence if return_flags: flags['converged'] = ( self._pixel_info['converged'][pix_idx].astype(np.bool)) flags['converged'][~in_bounds_idx] = False # t5 = time.time() # Reduce the samples in the requested manner if mode == 'median': ret = np.median(ret, axis=1) elif mode == 'mean': ret = np.mean(ret, axis=1) elif mode == 'percentile': ret = np.nanpercentile(ret, pct, axis=1) if not scalar_pct: # (percentile, pixel) -> (pixel, percentile) # (pctile, pixel, distance) -> (pixel, distance, pctile) ret = np.moveaxis(ret, 0, -1) elif mode == 'best': # Remove "samples" axis s = ret.shape ret.shape = s[:1] + s[2:] elif mode == 'samples': # Swap sample and distance axes to be consistent with other 3D dust # maps. The output shape will be (pixel, distance, sample). if not has_dist: np.swapaxes(ret, 1, 2) # t6 = time.time() # # print('') # print('time inside bayestar.query: {:.4f} s'.format(t6-t0)) # print('{: >7.4f} s : {: >6.4f} s : _find_data_idx'.format(t1-t0, t1-t0)) # print('{: >7.4f} s : {: >6.4f} s : sample slice spec'.format(t2-t0, t2-t1)) # print('{: >7.4f} s : {: >6.4f} s : create empty return flag array'.format(t3-t0, t3-t2)) # print('{: >7.4f} s : {: >6.4f} s : extract results'.format(t4-t0, t4-t3)) # print('{: >7.4f} s : {: >6.4f} s : convergence flag'.format(t5-t0, t5-t4)) # print('{: >7.4f} s : {: >6.4f} s : reduce'.format(t6-t0, t6-t5)) # print('') if return_flags: return ret, flags return ret
['def', 'query', '(', 'self', ',', 'coords', ',', 'mode', '=', "'random_sample'", ',', 'return_flags', '=', 'False', ',', 'pct', '=', 'None', ')', ':', '# Check that the query mode is supported', 'self', '.', '_raise_on_mode', '(', 'mode', ')', '# Validate percentile specification', 'pct', ',', 'scalar_pct', '=', 'self', '.', '_interpret_percentile', '(', 'mode', ',', 'pct', ')', '# Get number of coordinates requested', 'n_coords_ret', '=', 'coords', '.', 'shape', '[', '0', ']', '# Determine if distance has been requested', 'has_dist', '=', 'hasattr', '(', 'coords', '.', 'distance', ',', "'kpc'", ')', 'd', '=', 'coords', '.', 'distance', '.', 'kpc', 'if', 'has_dist', 'else', 'None', '# Extract the correct angular pixel(s)', '# t0 = time.time()', 'pix_idx', '=', 'self', '.', '_find_data_idx', '(', 'coords', '.', 'l', '.', 'deg', ',', 'coords', '.', 'b', '.', 'deg', ')', 'in_bounds_idx', '=', '(', 'pix_idx', '!=', '-', '1', ')', '# t1 = time.time()', '# Extract the correct samples', 'if', 'mode', '==', "'random_sample'", ':', '# A different sample in each queried coordinate', 'samp_idx', '=', 'np', '.', 'random', '.', 'randint', '(', '0', ',', 'self', '.', '_n_samples', ',', 'pix_idx', '.', 'size', ')', 'n_samp_ret', '=', '1', 'elif', 'mode', '==', "'random_sample_per_pix'", ':', '# Choose same sample in all coordinates that fall in same angular', '# HEALPix pixel', 'samp_idx', '=', 'np', '.', 'random', '.', 'randint', '(', '0', ',', 'self', '.', '_n_samples', ',', 'self', '.', '_n_pix', ')', '[', 'pix_idx', ']', 'n_samp_ret', '=', '1', 'elif', 'mode', '==', "'best'", ':', 'samp_idx', '=', 'slice', '(', 'None', ')', 'n_samp_ret', '=', '1', 'else', ':', '# Return all samples in each queried coordinate', 'samp_idx', '=', 'slice', '(', 'None', ')', 'n_samp_ret', '=', 'self', '.', '_n_samples', '# t2 = time.time()', 'if', 'mode', '==', "'best'", ':', 'val', '=', 'self', '.', '_best_fit', 'else', ':', 'val', '=', 'self', '.', '_samples', '# Create empty array to store flags', 'if', 'return_flags', ':', 'if', 'has_dist', ':', '# If distances are provided in query, return only covergence and', '# whether or not this distance is reliable', 'dtype', '=', '[', '(', "'converged'", ',', "'bool'", ')', ',', '(', "'reliable_dist'", ',', "'bool'", ')', ']', '# shape = (n_coords_ret)', 'else', ':', '# Return convergence and reliable distance ranges', 'dtype', '=', '[', '(', "'converged'", ',', "'bool'", ')', ',', '(', "'min_reliable_distmod'", ',', "'f4'", ')', ',', '(', "'max_reliable_distmod'", ',', "'f4'", ')', ']', 'flags', '=', 'np', '.', 'empty', '(', 'n_coords_ret', ',', 'dtype', '=', 'dtype', ')', '# samples = self._samples[pix_idx, samp_idx]', '# samples[pix_idx == -1] = np.nan', '# t3 = time.time()', '# Extract the correct distance bin (possibly using linear interpolation)', 'if', 'has_dist', ':', '# Distance has been provided', '# Determine ceiling bin index for each coordinate', 'dm', '=', '5.', '*', '(', 'np', '.', 'log10', '(', 'd', ')', '+', '2.', ')', 'bin_idx_ceil', '=', 'np', '.', 'searchsorted', '(', 'self', '.', '_DM_bin_edges', ',', 'dm', ')', '# Create NaN-filled return arrays', 'if', 'isinstance', '(', 'samp_idx', ',', 'slice', ')', ':', 'ret', '=', 'np', '.', 'full', '(', '(', 'n_coords_ret', ',', 'n_samp_ret', ')', ',', 'np', '.', 'nan', ',', 'dtype', '=', "'f4'", ')', 'else', ':', 'ret', '=', 'np', '.', 'full', '(', '(', 'n_coords_ret', ',', ')', ',', 'np', '.', 'nan', ',', 'dtype', '=', "'f4'", ')', '# d < d(nearest distance slice)', 'idx_near', '=', '(', 'bin_idx_ceil', '==', '0', ')', '&', 'in_bounds_idx', 'if', 'np', '.', 'any', '(', 'idx_near', ')', ':', 'a', '=', '10.', '**', '(', '0.2', '*', '(', 'dm', '[', 'idx_near', ']', '-', 'self', '.', '_DM_bin_edges', '[', '0', ']', ')', ')', 'if', 'isinstance', '(', 'samp_idx', ',', 'slice', ')', ':', 'ret', '[', 'idx_near', ']', '=', '(', 'a', '[', ':', ',', 'None', ']', '*', 'val', '[', 'pix_idx', '[', 'idx_near', ']', ',', 'samp_idx', ',', '0', ']', ')', 'else', ':', "# print('idx_near: {} true'.format(np.sum(idx_near)))", "# print('ret[idx_near].shape = {}'.format(ret[idx_near].shape))", "# print('val.shape = {}'.format(val.shape))", "# print('pix_idx[idx_near].shape = {}'.format(pix_idx[idx_near].shape))", 'ret', '[', 'idx_near', ']', '=', '(', 'a', '*', 'val', '[', 'pix_idx', '[', 'idx_near', ']', ',', 'samp_idx', '[', 'idx_near', ']', ',', '0', ']', ')', '# d > d(farthest distance slice)', 'idx_far', '=', '(', 'bin_idx_ceil', '==', 'self', '.', '_n_distances', ')', '&', 'in_bounds_idx', 'if', 'np', '.', 'any', '(', 'idx_far', ')', ':', "# print('idx_far: {} true'.format(np.sum(idx_far)))", "# print('pix_idx[idx_far].shape = {}'.format(pix_idx[idx_far].shape))", "# print('ret[idx_far].shape = {}'.format(ret[idx_far].shape))", "# print('val.shape = {}'.format(val.shape))", 'if', 'isinstance', '(', 'samp_idx', ',', 'slice', ')', ':', 'ret', '[', 'idx_far', ']', '=', 'val', '[', 'pix_idx', '[', 'idx_far', ']', ',', 'samp_idx', ',', '-', '1', ']', 'else', ':', 'ret', '[', 'idx_far', ']', '=', 'val', '[', 'pix_idx', '[', 'idx_far', ']', ',', 'samp_idx', '[', 'idx_far', ']', ',', '-', '1', ']', '# d(nearest distance slice) < d < d(farthest distance slice)', 'idx_btw', '=', '~', 'idx_near', '&', '~', 'idx_far', '&', 'in_bounds_idx', 'if', 'np', '.', 'any', '(', 'idx_btw', ')', ':', 'DM_ceil', '=', 'self', '.', '_DM_bin_edges', '[', 'bin_idx_ceil', '[', 'idx_btw', ']', ']', 'DM_floor', '=', 'self', '.', '_DM_bin_edges', '[', 'bin_idx_ceil', '[', 'idx_btw', ']', '-', '1', ']', 'a', '=', '(', 'DM_ceil', '-', 'dm', '[', 'idx_btw', ']', ')', '/', '(', 'DM_ceil', '-', 'DM_floor', ')', 'if', 'isinstance', '(', 'samp_idx', ',', 'slice', ')', ':', 'ret', '[', 'idx_btw', ']', '=', '(', '(', '1.', '-', 'a', '[', ':', ',', 'None', ']', ')', '*', 'val', '[', 'pix_idx', '[', 'idx_btw', ']', ',', 'samp_idx', ',', 'bin_idx_ceil', '[', 'idx_btw', ']', ']', '+', 'a', '[', ':', ',', 'None', ']', '*', 'val', '[', 'pix_idx', '[', 'idx_btw', ']', ',', 'samp_idx', ',', 'bin_idx_ceil', '[', 'idx_btw', ']', '-', '1', ']', ')', 'else', ':', 'ret', '[', 'idx_btw', ']', '=', '(', '(', '1.', '-', 'a', ')', '*', 'val', '[', 'pix_idx', '[', 'idx_btw', ']', ',', 'samp_idx', '[', 'idx_btw', ']', ',', 'bin_idx_ceil', '[', 'idx_btw', ']', ']', '+', 'a', '*', 'val', '[', 'pix_idx', '[', 'idx_btw', ']', ',', 'samp_idx', '[', 'idx_btw', ']', ',', 'bin_idx_ceil', '[', 'idx_btw', ']', '-', '1', ']', ')', '# Flag: distance in reliable range?', 'if', 'return_flags', ':', 'dm_min', '=', 'self', '.', '_pixel_info', '[', "'DM_reliable_min'", ']', '[', 'pix_idx', ']', 'dm_max', '=', 'self', '.', '_pixel_info', '[', "'DM_reliable_max'", ']', '[', 'pix_idx', ']', 'flags', '[', "'reliable_dist'", ']', '=', '(', '(', 'dm', '>=', 'dm_min', ')', '&', '(', 'dm', '<=', 'dm_max', ')', '&', 'np', '.', 'isfinite', '(', 'dm_min', ')', '&', 'np', '.', 'isfinite', '(', 'dm_max', ')', ')', 'flags', '[', "'reliable_dist'", ']', '[', '~', 'in_bounds_idx', ']', '=', 'False', 'else', ':', '# No distances provided', 'ret', '=', 'val', '[', 'pix_idx', ',', 'samp_idx', ',', ':', ']', '# Return all distances', 'ret', '[', '~', 'in_bounds_idx', ']', '=', 'np', '.', 'nan', '# Flag: reliable distance bounds', 'if', 'return_flags', ':', 'dm_min', '=', 'self', '.', '_pixel_info', '[', "'DM_reliable_min'", ']', '[', 'pix_idx', ']', 'dm_max', '=', 'self', '.', '_pixel_info', '[', "'DM_reliable_max'", ']', '[', 'pix_idx', ']', 'flags', '[', "'min_reliable_distmod'", ']', '=', 'dm_min', 'flags', '[', "'max_reliable_distmod'", ']', '=', 'dm_max', 'flags', '[', "'min_reliable_distmod'", ']', '[', '~', 'in_bounds_idx', ']', '=', 'np', '.', 'nan', 'flags', '[', "'max_reliable_distmod'", ']', '[', '~', 'in_bounds_idx', ']', '=', 'np', '.', 'nan', '# t4 = time.time()', '# Flag: convergence', 'if', 'return_flags', ':', 'flags', '[', "'converged'", ']', '=', '(', 'self', '.', '_pixel_info', '[', "'converged'", ']', '[', 'pix_idx', ']', '.', 'astype', '(', 'np', '.', 'bool', ')', ')', 'flags', '[', "'converged'", ']', '[', '~', 'in_bounds_idx', ']', '=', 'False', '# t5 = time.time()', '# Reduce the samples in the requested manner', 'if', 'mode', '==', "'median'", ':', 'ret', '=', 'np', '.', 'median', '(', 'ret', ',', 'axis', '=', '1', ')', 'elif', 'mode', '==', "'mean'", ':', 'ret', '=', 'np', '.', 'mean', '(', 'ret', ',', 'axis', '=', '1', ')', 'elif', 'mode', '==', "'percentile'", ':', 'ret', '=', 'np', '.', 'nanpercentile', '(', 'ret', ',', 'pct', ',', 'axis', '=', '1', ')', 'if', 'not', 'scalar_pct', ':', '# (percentile, pixel) -> (pixel, percentile)', '# (pctile, pixel, distance) -> (pixel, distance, pctile)', 'ret', '=', 'np', '.', 'moveaxis', '(', 'ret', ',', '0', ',', '-', '1', ')', 'elif', 'mode', '==', "'best'", ':', '# Remove "samples" axis', 's', '=', 'ret', '.', 'shape', 'ret', '.', 'shape', '=', 's', '[', ':', '1', ']', '+', 's', '[', '2', ':', ']', 'elif', 'mode', '==', "'samples'", ':', '# Swap sample and distance axes to be consistent with other 3D dust', '# maps. The output shape will be (pixel, distance, sample).', 'if', 'not', 'has_dist', ':', 'np', '.', 'swapaxes', '(', 'ret', ',', '1', ',', '2', ')', '# t6 = time.time()', '#', "# print('')", "# print('time inside bayestar.query: {:.4f} s'.format(t6-t0))", "# print('{: >7.4f} s : {: >6.4f} s : _find_data_idx'.format(t1-t0, t1-t0))", "# print('{: >7.4f} s : {: >6.4f} s : sample slice spec'.format(t2-t0, t2-t1))", "# print('{: >7.4f} s : {: >6.4f} s : create empty return flag array'.format(t3-t0, t3-t2))", "# print('{: >7.4f} s : {: >6.4f} s : extract results'.format(t4-t0, t4-t3))", "# print('{: >7.4f} s : {: >6.4f} s : convergence flag'.format(t5-t0, t5-t4))", "# print('{: >7.4f} s : {: >6.4f} s : reduce'.format(t6-t0, t6-t5))", "# print('')", 'if', 'return_flags', ':', 'return', 'ret', ',', 'flags', 'return', 'ret']
Returns reddening at the requested coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Seven different query modes are available: 'random_sample', 'random_sample_per_pix' 'samples', 'median', 'mean', 'best' and 'percentile'. The :obj:`mode` determines how the output will reflect the probabilistic nature of the Bayestar dust maps. return_flags (Optional[:obj:`bool`]): If :obj:`True`, then QA flags will be returned in a second numpy structured array. That is, the query will return :obj:`ret`, :obj:'flags`, where :obj:`ret` is the normal return value, containing reddening. Defaults to :obj:`False`. pct (Optional[:obj:`float` or list/array of :obj:`float`]): If the mode is :obj:`percentile`, then :obj:`pct` specifies which percentile(s) is (are) returned. Returns: Reddening at the specified coordinates, in magnitudes of reddening. The conversion to E(B-V) (or other reddening units) depends on whether :obj:`version='bayestar2017'` (the default) or :obj:`'bayestar2015'` was selected when the :obj:`BayestarQuery` object was created. To convert Bayestar2017 to Pan-STARRS 1 extinctions, multiply by the coefficients given in Table 1 of Green et al. (2018). Conversion to extinction in non-PS1 passbands depends on the choice of extinction law. To convert Bayestar2015 to extinction in various passbands, multiply by the coefficients in Table 6 of Schlafly & Finkbeiner (2011). See Green et al. (2015, 2018) for more detailed discussion of how to convert the Bayestar dust maps into reddenings or extinctions in different passbands. The shape of the output depends on the :obj:`mode`, and on whether :obj:`coords` contains distances. If :obj:`coords` does not specify distance(s), then the shape of the output begins with :obj:`coords.shape`. If :obj:`coords` does specify distance(s), then the shape of the output begins with :obj:`coords.shape + ([number of distance bins],)`. If :obj:`mode` is :obj:`'random_sample'`, then at each coordinate/distance, a random sample of reddening is given. If :obj:`mode` is :obj:`'random_sample_per_pix'`, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If :obj:`mode` is :obj:`'median'`, then at each coordinate/distance, the median reddening is returned. If :obj:`mode` is :obj:`'mean'`, then at each coordinate/distance, the mean reddening is returned. If :obj:`mode` is :obj:`'best'`, then at each coordinate/distance, the maximum posterior density reddening is returned (the "best fit"). If :obj:`mode` is :obj:`'percentile'`, then an additional keyword argument, :obj:`pct`, must be specified. At each coordinate/distance, the requested percentiles (in :obj:`pct`) will be returned. If :obj:`pct` is a list/array, then the last axis of the output will correspond to different percentiles. Finally, if :obj:`mode` is :obj:`'samples'`, then at each coordinate/distance, all samples are returned. The last axis of the output will correspond to different samples. If :obj:`return_flags` is :obj:`True`, then in addition to reddening, a structured array containing QA flags will be returned. If the input coordinates include distances, the QA flags will be :obj:`"converged"` (whether or not the line-of-sight fit converged in a given pixel) and :obj:`"reliable_dist"` (whether or not the requested distance is within the range considered reliable, based on the inferred stellar distances). If the input coordinates do not include distances, then instead of :obj:`"reliable_dist"`, the flags will include :obj:`"min_reliable_distmod"` and :obj:`"max_reliable_distmod"`, the minimum and maximum reliable distance moduli in the given pixel.
['Returns', 'reddening', 'at', 'the', 'requested', 'coordinates', '.', 'There', 'are', 'several', 'different', 'query', 'modes', 'which', 'handle', 'the', 'probabilistic', 'nature', 'of', 'the', 'map', 'differently', '.']
train
https://github.com/gregreen/dustmaps/blob/c8f571a71da0d951bf8ea865621bee14492bdfd9/dustmaps/bayestar.py#L255-L533
5,688
spacetelescope/stsci.tools
lib/stsci/tools/editpar.py
EditParDialog.checkSetSaveChildren
def checkSetSaveChildren(self, doSave=True): """Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure. """ if self.isChild: return # Need to get all the entries and verify them. # Save the children in backwards order to coincide with the # display of the dialogs (LIFO) for n in range (len(self.top.childList)-1, -1, -1): self.badEntriesList = self.top.childList[n]. \ checkSetSaveEntries(doSave=doSave) if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.top.childList[n].taskName) if not ansOKCANCEL: return self.badEntriesList # If there were no invalid entries or the user says OK, # close down the child and increment to the next child self.top.childList[n].top.focus_set() self.top.childList[n].top.withdraw() del self.top.childList[n] # all windows saved successfully return
python
def checkSetSaveChildren(self, doSave=True): """Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure. """ if self.isChild: return # Need to get all the entries and verify them. # Save the children in backwards order to coincide with the # display of the dialogs (LIFO) for n in range (len(self.top.childList)-1, -1, -1): self.badEntriesList = self.top.childList[n]. \ checkSetSaveEntries(doSave=doSave) if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.top.childList[n].taskName) if not ansOKCANCEL: return self.badEntriesList # If there were no invalid entries or the user says OK, # close down the child and increment to the next child self.top.childList[n].top.focus_set() self.top.childList[n].top.withdraw() del self.top.childList[n] # all windows saved successfully return
['def', 'checkSetSaveChildren', '(', 'self', ',', 'doSave', '=', 'True', ')', ':', 'if', 'self', '.', 'isChild', ':', 'return', '# Need to get all the entries and verify them.', '# Save the children in backwards order to coincide with the', '# display of the dialogs (LIFO)', 'for', 'n', 'in', 'range', '(', 'len', '(', 'self', '.', 'top', '.', 'childList', ')', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'self', '.', 'badEntriesList', '=', 'self', '.', 'top', '.', 'childList', '[', 'n', ']', '.', 'checkSetSaveEntries', '(', 'doSave', '=', 'doSave', ')', 'if', 'self', '.', 'badEntriesList', ':', 'ansOKCANCEL', '=', 'self', '.', 'processBadEntries', '(', 'self', '.', 'badEntriesList', ',', 'self', '.', 'top', '.', 'childList', '[', 'n', ']', '.', 'taskName', ')', 'if', 'not', 'ansOKCANCEL', ':', 'return', 'self', '.', 'badEntriesList', '# If there were no invalid entries or the user says OK,', '# close down the child and increment to the next child', 'self', '.', 'top', '.', 'childList', '[', 'n', ']', '.', 'top', '.', 'focus_set', '(', ')', 'self', '.', 'top', '.', 'childList', '[', 'n', ']', '.', 'top', '.', 'withdraw', '(', ')', 'del', 'self', '.', 'top', '.', 'childList', '[', 'n', ']', '# all windows saved successfully', 'return']
Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure.
['Check', 'then', 'set', 'then', 'save', 'the', 'parameter', 'settings', 'for', 'all', 'child', '(', 'pset', ')', 'windows', '.']
train
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/editpar.py#L1658-L1685
5,689
IdentityPython/pysaml2
src/saml2/argtree.py
add_path
def add_path(tdict, path): """ Create or extend an argument tree `tdict` from `path`. :param tdict: a dictionary representing a argument tree :param path: a path list :return: a dictionary Convert a list of items in a 'path' into a nested dict, where the second to last item becomes the key for the final item. The remaining items in the path become keys in the nested dict around that final pair of items. For example, for input values of: tdict={} path = ['assertion', 'subject', 'subject_confirmation', 'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer'] Returns an output value of: {'assertion': {'subject': {'subject_confirmation': {'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}} Another example, this time with a non-empty tdict input: tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}, path=['subject_confirmation_data', 'in_response_to', '_012345'] Returns an output value of: {'subject_confirmation_data': {'in_response_to': '_012345'}, 'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'} """ t = tdict for step in path[:-2]: try: t = t[step] except KeyError: t[step] = {} t = t[step] t[path[-2]] = path[-1] return tdict
python
def add_path(tdict, path): """ Create or extend an argument tree `tdict` from `path`. :param tdict: a dictionary representing a argument tree :param path: a path list :return: a dictionary Convert a list of items in a 'path' into a nested dict, where the second to last item becomes the key for the final item. The remaining items in the path become keys in the nested dict around that final pair of items. For example, for input values of: tdict={} path = ['assertion', 'subject', 'subject_confirmation', 'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer'] Returns an output value of: {'assertion': {'subject': {'subject_confirmation': {'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}} Another example, this time with a non-empty tdict input: tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}, path=['subject_confirmation_data', 'in_response_to', '_012345'] Returns an output value of: {'subject_confirmation_data': {'in_response_to': '_012345'}, 'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'} """ t = tdict for step in path[:-2]: try: t = t[step] except KeyError: t[step] = {} t = t[step] t[path[-2]] = path[-1] return tdict
['def', 'add_path', '(', 'tdict', ',', 'path', ')', ':', 't', '=', 'tdict', 'for', 'step', 'in', 'path', '[', ':', '-', '2', ']', ':', 'try', ':', 't', '=', 't', '[', 'step', ']', 'except', 'KeyError', ':', 't', '[', 'step', ']', '=', '{', '}', 't', '=', 't', '[', 'step', ']', 't', '[', 'path', '[', '-', '2', ']', ']', '=', 'path', '[', '-', '1', ']', 'return', 'tdict']
Create or extend an argument tree `tdict` from `path`. :param tdict: a dictionary representing a argument tree :param path: a path list :return: a dictionary Convert a list of items in a 'path' into a nested dict, where the second to last item becomes the key for the final item. The remaining items in the path become keys in the nested dict around that final pair of items. For example, for input values of: tdict={} path = ['assertion', 'subject', 'subject_confirmation', 'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer'] Returns an output value of: {'assertion': {'subject': {'subject_confirmation': {'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}} Another example, this time with a non-empty tdict input: tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}, path=['subject_confirmation_data', 'in_response_to', '_012345'] Returns an output value of: {'subject_confirmation_data': {'in_response_to': '_012345'}, 'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}
['Create', 'or', 'extend', 'an', 'argument', 'tree', 'tdict', 'from', 'path', '.']
train
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/argtree.py#L54-L94
5,690
UCSBarchlab/PyRTL
pyrtl/rtllib/multipliers.py
complex_mult
def complex_mult(A, B, shifts, start): """ Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal """ alen = len(A) blen = len(B) areg = pyrtl.Register(alen) breg = pyrtl.Register(alen + blen) accum = pyrtl.Register(alen + blen) done = (areg == 0) # Multiplication is finished when a becomes 0 if (shifts > alen) or (shifts > blen): raise pyrtl.PyrtlError("shift is larger than one or both of the parameters A or B," "please choose smaller shift") # During multiplication, shift a right every cycle 'shift' times, # shift b left every cycle 'shift' times with pyrtl.conditional_assignment: with start: # initialization areg.next |= A breg.next |= B accum.next |= 0 with ~done: # don't run when there's no work to do # "Multiply" shifted breg by LSB of areg by cond. adding areg.next |= libutils._shifted_reg_next(areg, 'r', shifts) # right shift breg.next |= libutils._shifted_reg_next(breg, 'l', shifts) # left shift accum.next |= accum + _one_cycle_mult(areg, breg, shifts) return accum, done
python
def complex_mult(A, B, shifts, start): """ Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal """ alen = len(A) blen = len(B) areg = pyrtl.Register(alen) breg = pyrtl.Register(alen + blen) accum = pyrtl.Register(alen + blen) done = (areg == 0) # Multiplication is finished when a becomes 0 if (shifts > alen) or (shifts > blen): raise pyrtl.PyrtlError("shift is larger than one or both of the parameters A or B," "please choose smaller shift") # During multiplication, shift a right every cycle 'shift' times, # shift b left every cycle 'shift' times with pyrtl.conditional_assignment: with start: # initialization areg.next |= A breg.next |= B accum.next |= 0 with ~done: # don't run when there's no work to do # "Multiply" shifted breg by LSB of areg by cond. adding areg.next |= libutils._shifted_reg_next(areg, 'r', shifts) # right shift breg.next |= libutils._shifted_reg_next(breg, 'l', shifts) # left shift accum.next |= accum + _one_cycle_mult(areg, breg, shifts) return accum, done
['def', 'complex_mult', '(', 'A', ',', 'B', ',', 'shifts', ',', 'start', ')', ':', 'alen', '=', 'len', '(', 'A', ')', 'blen', '=', 'len', '(', 'B', ')', 'areg', '=', 'pyrtl', '.', 'Register', '(', 'alen', ')', 'breg', '=', 'pyrtl', '.', 'Register', '(', 'alen', '+', 'blen', ')', 'accum', '=', 'pyrtl', '.', 'Register', '(', 'alen', '+', 'blen', ')', 'done', '=', '(', 'areg', '==', '0', ')', '# Multiplication is finished when a becomes 0', 'if', '(', 'shifts', '>', 'alen', ')', 'or', '(', 'shifts', '>', 'blen', ')', ':', 'raise', 'pyrtl', '.', 'PyrtlError', '(', '"shift is larger than one or both of the parameters A or B,"', '"please choose smaller shift"', ')', "# During multiplication, shift a right every cycle 'shift' times,", "# shift b left every cycle 'shift' times", 'with', 'pyrtl', '.', 'conditional_assignment', ':', 'with', 'start', ':', '# initialization', 'areg', '.', 'next', '|=', 'A', 'breg', '.', 'next', '|=', 'B', 'accum', '.', 'next', '|=', '0', 'with', '~', 'done', ':', "# don't run when there's no work to do", '# "Multiply" shifted breg by LSB of areg by cond. adding', 'areg', '.', 'next', '|=', 'libutils', '.', '_shifted_reg_next', '(', 'areg', ',', "'r'", ',', 'shifts', ')', '# right shift', 'breg', '.', 'next', '|=', 'libutils', '.', '_shifted_reg_next', '(', 'breg', ',', "'l'", ',', 'shifts', ')', '# left shift', 'accum', '.', 'next', '|=', 'accum', '+', '_one_cycle_mult', '(', 'areg', ',', 'breg', ',', 'shifts', ')', 'return', 'accum', ',', 'done']
Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal
['Generate', 'shift', '-', 'and', '-', 'add', 'multiplier', 'that', 'can', 'shift', 'and', 'add', 'multiple', 'bits', 'per', 'clock', 'cycle', '.', 'Uses', 'substantially', 'more', 'space', 'than', 'simple_mult', '()', 'but', 'is', 'much', 'faster', '.']
train
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/multipliers.py#L67-L102
5,691
noahbenson/neuropythy
neuropythy/commands/surface_to_image.py
main
def main(args): ''' surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info. ''' # Parse the arguments (args, opts) = _surface_to_ribbon_parser(args) # First, help? if opts['help']: print(info, file=sys.stdout) return 1 # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) return verbose # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # figure out our arguments: (lhfl, rhfl) = (opts['lh_file'], opts['rh_file']) if len(args) == 0: raise ValueError('Not enough arguments provided!') elif len(args) == 1: # must be that the subject is in the env? sub = find_subject_path(os.getenv('SUBJECT')) outfl = args[0] elif len(args) == 2: sbpth = find_subject_path(args[0]) if sbpth is not None: sub = sbpth else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Given arg is not a subject: %s' % args[0]) outfl = args[1] elif len(args) == 3: sbpth0 = find_subject_path(args[0]) sbpth1 = find_subject_path(args[1]) if sbpth0 is not None: sub = sbpth0 if lhfl is not None: rhfl = args[1] elif rhfl is not None: lhfl = args[1] else: raise ValueError('Too many arguments given: %s' % args[1]) elif sbpth1 is not None: sub = sbpth1 if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Too many arguments given: %s' % args[0]) else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') (lhfl, rhfl) = args outfl = args[2] elif len(args) == 4: if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None) if subidx is None: raise ValueError('No subject given') sub = find_subject_path(args[subidx]) del args[subidx] (lhfl, rhfl, outfl) = args else: raise ValueError('Too many arguments provided!') if sub is None: raise ValueError('No subject specified or found in $SUBJECT') if lhfl is None and rhfl is None: raise ValueError('No surfaces provided') # check the method method = opts['method'].lower() if method not in ['linear', 'lines', 'nearest', 'auto']: raise ValueError('Unsupported method: %s' % method) # and the datatype if opts['dtype'] is None: dtyp = None elif opts['dtype'].lower() == 'float': dtyp = np.float32 elif opts['dtype'].lower() == 'int': dtyp = np.int32 else: raise ValueError('Type argument must be float or int') if method == 'auto': if dtyp is np.float32: method = 'linear' elif dtyp is np.int32: method = 'nearest' else: method = 'linear' # Now, load the data: note('Reading surfaces...') (lhdat, rhdat) = (None, None) if lhfl is not None: note(' - Reading LH file: %s' % lhfl) lhdat = read_surf_file(lhfl) if rhfl is not None: note(' - Reading RH file: %s' % rhfl) rhdat = read_surf_file(rhfl) (dat, hemi) = (rhdat, 'rh') if lhdat is None else \ (lhdat, 'lh') if rhdat is None else \ ((lhdat, rhdat), None) sub = subject(sub) # okay, make the volume... note('Generating volume...') vol = sub.cortex_to_image(dat, hemi=hemi, method=method, fill=opts['fill'], dtype=dtyp) # and write out the file note('Exporting volume file: %s' % outfl) save(outfl, vol, affine=sub.voxel_to_native_matrix) note('surface_to_image complete!') return 0
python
def main(args): ''' surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info. ''' # Parse the arguments (args, opts) = _surface_to_ribbon_parser(args) # First, help? if opts['help']: print(info, file=sys.stdout) return 1 # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) return verbose # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # figure out our arguments: (lhfl, rhfl) = (opts['lh_file'], opts['rh_file']) if len(args) == 0: raise ValueError('Not enough arguments provided!') elif len(args) == 1: # must be that the subject is in the env? sub = find_subject_path(os.getenv('SUBJECT')) outfl = args[0] elif len(args) == 2: sbpth = find_subject_path(args[0]) if sbpth is not None: sub = sbpth else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Given arg is not a subject: %s' % args[0]) outfl = args[1] elif len(args) == 3: sbpth0 = find_subject_path(args[0]) sbpth1 = find_subject_path(args[1]) if sbpth0 is not None: sub = sbpth0 if lhfl is not None: rhfl = args[1] elif rhfl is not None: lhfl = args[1] else: raise ValueError('Too many arguments given: %s' % args[1]) elif sbpth1 is not None: sub = sbpth1 if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Too many arguments given: %s' % args[0]) else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') (lhfl, rhfl) = args outfl = args[2] elif len(args) == 4: if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None) if subidx is None: raise ValueError('No subject given') sub = find_subject_path(args[subidx]) del args[subidx] (lhfl, rhfl, outfl) = args else: raise ValueError('Too many arguments provided!') if sub is None: raise ValueError('No subject specified or found in $SUBJECT') if lhfl is None and rhfl is None: raise ValueError('No surfaces provided') # check the method method = opts['method'].lower() if method not in ['linear', 'lines', 'nearest', 'auto']: raise ValueError('Unsupported method: %s' % method) # and the datatype if opts['dtype'] is None: dtyp = None elif opts['dtype'].lower() == 'float': dtyp = np.float32 elif opts['dtype'].lower() == 'int': dtyp = np.int32 else: raise ValueError('Type argument must be float or int') if method == 'auto': if dtyp is np.float32: method = 'linear' elif dtyp is np.int32: method = 'nearest' else: method = 'linear' # Now, load the data: note('Reading surfaces...') (lhdat, rhdat) = (None, None) if lhfl is not None: note(' - Reading LH file: %s' % lhfl) lhdat = read_surf_file(lhfl) if rhfl is not None: note(' - Reading RH file: %s' % rhfl) rhdat = read_surf_file(rhfl) (dat, hemi) = (rhdat, 'rh') if lhdat is None else \ (lhdat, 'lh') if rhdat is None else \ ((lhdat, rhdat), None) sub = subject(sub) # okay, make the volume... note('Generating volume...') vol = sub.cortex_to_image(dat, hemi=hemi, method=method, fill=opts['fill'], dtype=dtyp) # and write out the file note('Exporting volume file: %s' % outfl) save(outfl, vol, affine=sub.voxel_to_native_matrix) note('surface_to_image complete!') return 0
['def', 'main', '(', 'args', ')', ':', '# Parse the arguments', '(', 'args', ',', 'opts', ')', '=', '_surface_to_ribbon_parser', '(', 'args', ')', '# First, help?', 'if', 'opts', '[', "'help'", ']', ':', 'print', '(', 'info', ',', 'file', '=', 'sys', '.', 'stdout', ')', 'return', '1', '# and if we are verbose, lets setup a note function', 'verbose', '=', 'opts', '[', "'verbose'", ']', 'def', 'note', '(', 's', ')', ':', 'if', 'verbose', ':', 'print', '(', 's', ',', 'file', '=', 'sys', '.', 'stdout', ')', 'return', 'verbose', '# Add the subjects directory, if there is one', 'if', "'subjects_dir'", 'in', 'opts', 'and', 'opts', '[', "'subjects_dir'", ']', 'is', 'not', 'None', ':', 'add_subject_path', '(', 'opts', '[', "'subjects_dir'", ']', ')', '# figure out our arguments:', '(', 'lhfl', ',', 'rhfl', ')', '=', '(', 'opts', '[', "'lh_file'", ']', ',', 'opts', '[', "'rh_file'", ']', ')', 'if', 'len', '(', 'args', ')', '==', '0', ':', 'raise', 'ValueError', '(', "'Not enough arguments provided!'", ')', 'elif', 'len', '(', 'args', ')', '==', '1', ':', '# must be that the subject is in the env?', 'sub', '=', 'find_subject_path', '(', 'os', '.', 'getenv', '(', "'SUBJECT'", ')', ')', 'outfl', '=', 'args', '[', '0', ']', 'elif', 'len', '(', 'args', ')', '==', '2', ':', 'sbpth', '=', 'find_subject_path', '(', 'args', '[', '0', ']', ')', 'if', 'sbpth', 'is', 'not', 'None', ':', 'sub', '=', 'sbpth', 'else', ':', 'sub', '=', 'find_subject_path', '(', 'os', '.', 'getenv', '(', "'SUBJECT'", ')', ')', 'if', 'lhfl', 'is', 'not', 'None', ':', 'rhfl', '=', 'args', '[', '0', ']', 'elif', 'rhfl', 'is', 'not', 'None', ':', 'lhfl', '=', 'args', '[', '0', ']', 'else', ':', 'raise', 'ValueError', '(', "'Given arg is not a subject: %s'", '%', 'args', '[', '0', ']', ')', 'outfl', '=', 'args', '[', '1', ']', 'elif', 'len', '(', 'args', ')', '==', '3', ':', 'sbpth0', '=', 'find_subject_path', '(', 'args', '[', '0', ']', ')', 'sbpth1', '=', 'find_subject_path', '(', 'args', '[', '1', ']', ')', 'if', 'sbpth0', 'is', 'not', 'None', ':', 'sub', '=', 'sbpth0', 'if', 'lhfl', 'is', 'not', 'None', ':', 'rhfl', '=', 'args', '[', '1', ']', 'elif', 'rhfl', 'is', 'not', 'None', ':', 'lhfl', '=', 'args', '[', '1', ']', 'else', ':', 'raise', 'ValueError', '(', "'Too many arguments given: %s'", '%', 'args', '[', '1', ']', ')', 'elif', 'sbpth1', 'is', 'not', 'None', ':', 'sub', '=', 'sbpth1', 'if', 'lhfl', 'is', 'not', 'None', ':', 'rhfl', '=', 'args', '[', '0', ']', 'elif', 'rhfl', 'is', 'not', 'None', ':', 'lhfl', '=', 'args', '[', '0', ']', 'else', ':', 'raise', 'ValueError', '(', "'Too many arguments given: %s'", '%', 'args', '[', '0', ']', ')', 'else', ':', 'sub', '=', 'find_subject_path', '(', 'os', '.', 'getenv', '(', "'SUBJECT'", ')', ')', 'if', 'lhfl', 'is', 'not', 'None', 'or', 'rhfl', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'Too many arguments and no subject given'", ')', '(', 'lhfl', ',', 'rhfl', ')', '=', 'args', 'outfl', '=', 'args', '[', '2', ']', 'elif', 'len', '(', 'args', ')', '==', '4', ':', 'if', 'lhfl', 'is', 'not', 'None', 'or', 'rhfl', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'Too many arguments and no subject given'", ')', 'subidx', '=', 'next', '(', '(', 'i', 'for', '(', 'i', ',', 'a', ')', 'in', 'enumerate', '(', 'args', ')', 'if', 'find_subject_path', '(', 'a', ')', 'is', 'not', 'None', ')', ',', 'None', ')', 'if', 'subidx', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No subject given'", ')', 'sub', '=', 'find_subject_path', '(', 'args', '[', 'subidx', ']', ')', 'del', 'args', '[', 'subidx', ']', '(', 'lhfl', ',', 'rhfl', ',', 'outfl', ')', '=', 'args', 'else', ':', 'raise', 'ValueError', '(', "'Too many arguments provided!'", ')', 'if', 'sub', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No subject specified or found in $SUBJECT'", ')', 'if', 'lhfl', 'is', 'None', 'and', 'rhfl', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No surfaces provided'", ')', '# check the method', 'method', '=', 'opts', '[', "'method'", ']', '.', 'lower', '(', ')', 'if', 'method', 'not', 'in', '[', "'linear'", ',', "'lines'", ',', "'nearest'", ',', "'auto'", ']', ':', 'raise', 'ValueError', '(', "'Unsupported method: %s'", '%', 'method', ')', '# and the datatype', 'if', 'opts', '[', "'dtype'", ']', 'is', 'None', ':', 'dtyp', '=', 'None', 'elif', 'opts', '[', "'dtype'", ']', '.', 'lower', '(', ')', '==', "'float'", ':', 'dtyp', '=', 'np', '.', 'float32', 'elif', 'opts', '[', "'dtype'", ']', '.', 'lower', '(', ')', '==', "'int'", ':', 'dtyp', '=', 'np', '.', 'int32', 'else', ':', 'raise', 'ValueError', '(', "'Type argument must be float or int'", ')', 'if', 'method', '==', "'auto'", ':', 'if', 'dtyp', 'is', 'np', '.', 'float32', ':', 'method', '=', "'linear'", 'elif', 'dtyp', 'is', 'np', '.', 'int32', ':', 'method', '=', "'nearest'", 'else', ':', 'method', '=', "'linear'", '# Now, load the data:', 'note', '(', "'Reading surfaces...'", ')', '(', 'lhdat', ',', 'rhdat', ')', '=', '(', 'None', ',', 'None', ')', 'if', 'lhfl', 'is', 'not', 'None', ':', 'note', '(', "' - Reading LH file: %s'", '%', 'lhfl', ')', 'lhdat', '=', 'read_surf_file', '(', 'lhfl', ')', 'if', 'rhfl', 'is', 'not', 'None', ':', 'note', '(', "' - Reading RH file: %s'", '%', 'rhfl', ')', 'rhdat', '=', 'read_surf_file', '(', 'rhfl', ')', '(', 'dat', ',', 'hemi', ')', '=', '(', 'rhdat', ',', "'rh'", ')', 'if', 'lhdat', 'is', 'None', 'else', '(', 'lhdat', ',', "'lh'", ')', 'if', 'rhdat', 'is', 'None', 'else', '(', '(', 'lhdat', ',', 'rhdat', ')', ',', 'None', ')', 'sub', '=', 'subject', '(', 'sub', ')', '# okay, make the volume...', 'note', '(', "'Generating volume...'", ')', 'vol', '=', 'sub', '.', 'cortex_to_image', '(', 'dat', ',', 'hemi', '=', 'hemi', ',', 'method', '=', 'method', ',', 'fill', '=', 'opts', '[', "'fill'", ']', ',', 'dtype', '=', 'dtyp', ')', '# and write out the file', 'note', '(', "'Exporting volume file: %s'", '%', 'outfl', ')', 'save', '(', 'outfl', ',', 'vol', ',', 'affine', '=', 'sub', '.', 'voxel_to_native_matrix', ')', 'note', '(', "'surface_to_image complete!'", ')', 'return', '0']
surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info.
['surface_to_rubbon', '.', 'main', '(', 'args', ')', 'can', 'be', 'given', 'a', 'list', 'of', 'arguments', 'such', 'as', 'sys', '.', 'argv', '[', '1', ':', ']', ';', 'these', 'arguments', 'may', 'include', 'any', 'options', 'and', 'must', 'include', 'exactly', 'one', 'subject', 'id', 'and', 'one', 'output', 'filename', '.', 'Additionally', 'one', 'or', 'two', 'surface', 'input', 'filenames', 'must', 'be', 'given', '.', 'The', 'surface', 'files', 'are', 'projected', 'into', 'the', 'ribbon', 'and', 'written', 'to', 'the', 'output', 'filename', '.', 'For', 'more', 'information', 'see', 'the', 'string', 'stored', 'in', 'surface_to_image', '.', 'info', '.']
train
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/surface_to_image.py#L83-L187
5,692
pricingassistant/mongokat
mongokat/collection.py
Collection.find_one
def find_one(self, *args, **kwargs): """ Get a single document from the database. """ doc = self._collection_with_options(kwargs).find_one(*args, **kwargs) if doc is None: return None return doc
python
def find_one(self, *args, **kwargs): """ Get a single document from the database. """ doc = self._collection_with_options(kwargs).find_one(*args, **kwargs) if doc is None: return None return doc
['def', 'find_one', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'doc', '=', 'self', '.', '_collection_with_options', '(', 'kwargs', ')', '.', 'find_one', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'doc', 'is', 'None', ':', 'return', 'None', 'return', 'doc']
Get a single document from the database.
['Get', 'a', 'single', 'document', 'from', 'the', 'database', '.']
train
https://github.com/pricingassistant/mongokat/blob/61eaf4bc1c4cc359c6f9592ec97b9a04d9561411/mongokat/collection.py#L203-L211
5,693
IzunaDevs/SnekChek
snekchek/config_gen.py
ConfigGenerator.main
def main(self) -> None: """The main function for generating the config file""" path = ask_path("where should the config be stored?", ".snekrc") conf = configobj.ConfigObj() tools = self.get_tools() for tool in tools: conf[tool] = getattr(self, tool)() # pylint: disable=assignment-from-no-return conf.filename = path conf.write() print("Written config file!") if "pylint" in tools: print( "Please also run `pylint --generate-rcfile` to complete setup")
python
def main(self) -> None: """The main function for generating the config file""" path = ask_path("where should the config be stored?", ".snekrc") conf = configobj.ConfigObj() tools = self.get_tools() for tool in tools: conf[tool] = getattr(self, tool)() # pylint: disable=assignment-from-no-return conf.filename = path conf.write() print("Written config file!") if "pylint" in tools: print( "Please also run `pylint --generate-rcfile` to complete setup")
['def', 'main', '(', 'self', ')', '->', 'None', ':', 'path', '=', 'ask_path', '(', '"where should the config be stored?"', ',', '".snekrc"', ')', 'conf', '=', 'configobj', '.', 'ConfigObj', '(', ')', 'tools', '=', 'self', '.', 'get_tools', '(', ')', 'for', 'tool', 'in', 'tools', ':', 'conf', '[', 'tool', ']', '=', 'getattr', '(', 'self', ',', 'tool', ')', '(', ')', '# pylint: disable=assignment-from-no-return', 'conf', '.', 'filename', '=', 'path', 'conf', '.', 'write', '(', ')', 'print', '(', '"Written config file!"', ')', 'if', '"pylint"', 'in', 'tools', ':', 'print', '(', '"Please also run `pylint --generate-rcfile` to complete setup"', ')']
The main function for generating the config file
['The', 'main', 'function', 'for', 'generating', 'the', 'config', 'file']
train
https://github.com/IzunaDevs/SnekChek/blob/fdb01bdf1ec8e79d9aae2a11d96bfb27e53a97a9/snekchek/config_gen.py#L68-L84
5,694
apple/turicreate
src/unity/python/turicreate/util/__init__.py
_assert_sframe_equal
def _assert_sframe_equal(sf1, sf2, check_column_names=True, check_column_order=True, check_row_order=True, float_column_delta=None): """ Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns. """ from .. import SFrame as _SFrame if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame): raise TypeError("Cannot function on types other than SFrames.") if not check_column_order and not check_column_names: raise ValueError("Cannot ignore both column order and column names.") sf1.__materialize__() sf2.__materialize__() if sf1.num_columns() != sf2.num_columns(): raise AssertionError("Number of columns mismatched: " + str(sf1.num_columns()) + " != " + str(sf2.num_columns())) s1_names = sf1.column_names() s2_names = sf2.column_names() sorted_s1_names = sorted(s1_names) sorted_s2_names = sorted(s2_names) if check_column_names: if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names): raise AssertionError("SFrame does not have same column names: " + str(sf1.column_names()) + " != " + str(sf2.column_names())) if sf1.num_rows() != sf2.num_rows(): raise AssertionError("Number of rows mismatched: " + str(sf1.num_rows()) + " != " + str(sf2.num_rows())) if not check_row_order and (sf1.num_rows() > 1): sf1 = sf1.sort(s1_names) sf2 = sf2.sort(s2_names) names_to_check = None if check_column_names: names_to_check = list(zip(sorted_s1_names, sorted_s2_names)) else: names_to_check = list(zip(s1_names, s2_names)) for i in names_to_check: col1 = sf1[i[0]] col2 = sf2[i[1]] if col1.dtype != col2.dtype: raise AssertionError("Columns " + str(i) + " types mismatched.") compare_ary = None if col1.dtype == float and float_column_delta is not None: dt = float_column_delta compare_ary = ((col1 > col2-dt) & (col1 < col2+dt)) else: compare_ary = (sf1[i[0]] == sf2[i[1]]) if not compare_ary.all(): count = 0 for j in compare_ary: if not j: first_row = count break count += 1 raise AssertionError("Columns " + str(i) + " are not equal! First differing element is at row " + str(first_row) + ": " + str((col1[first_row],col2[first_row])))
python
def _assert_sframe_equal(sf1, sf2, check_column_names=True, check_column_order=True, check_row_order=True, float_column_delta=None): """ Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns. """ from .. import SFrame as _SFrame if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame): raise TypeError("Cannot function on types other than SFrames.") if not check_column_order and not check_column_names: raise ValueError("Cannot ignore both column order and column names.") sf1.__materialize__() sf2.__materialize__() if sf1.num_columns() != sf2.num_columns(): raise AssertionError("Number of columns mismatched: " + str(sf1.num_columns()) + " != " + str(sf2.num_columns())) s1_names = sf1.column_names() s2_names = sf2.column_names() sorted_s1_names = sorted(s1_names) sorted_s2_names = sorted(s2_names) if check_column_names: if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names): raise AssertionError("SFrame does not have same column names: " + str(sf1.column_names()) + " != " + str(sf2.column_names())) if sf1.num_rows() != sf2.num_rows(): raise AssertionError("Number of rows mismatched: " + str(sf1.num_rows()) + " != " + str(sf2.num_rows())) if not check_row_order and (sf1.num_rows() > 1): sf1 = sf1.sort(s1_names) sf2 = sf2.sort(s2_names) names_to_check = None if check_column_names: names_to_check = list(zip(sorted_s1_names, sorted_s2_names)) else: names_to_check = list(zip(s1_names, s2_names)) for i in names_to_check: col1 = sf1[i[0]] col2 = sf2[i[1]] if col1.dtype != col2.dtype: raise AssertionError("Columns " + str(i) + " types mismatched.") compare_ary = None if col1.dtype == float and float_column_delta is not None: dt = float_column_delta compare_ary = ((col1 > col2-dt) & (col1 < col2+dt)) else: compare_ary = (sf1[i[0]] == sf2[i[1]]) if not compare_ary.all(): count = 0 for j in compare_ary: if not j: first_row = count break count += 1 raise AssertionError("Columns " + str(i) + " are not equal! First differing element is at row " + str(first_row) + ": " + str((col1[first_row],col2[first_row])))
['def', '_assert_sframe_equal', '(', 'sf1', ',', 'sf2', ',', 'check_column_names', '=', 'True', ',', 'check_column_order', '=', 'True', ',', 'check_row_order', '=', 'True', ',', 'float_column_delta', '=', 'None', ')', ':', 'from', '.', '.', 'import', 'SFrame', 'as', '_SFrame', 'if', '(', 'type', '(', 'sf1', ')', 'is', 'not', '_SFrame', ')', 'or', '(', 'type', '(', 'sf2', ')', 'is', 'not', '_SFrame', ')', ':', 'raise', 'TypeError', '(', '"Cannot function on types other than SFrames."', ')', 'if', 'not', 'check_column_order', 'and', 'not', 'check_column_names', ':', 'raise', 'ValueError', '(', '"Cannot ignore both column order and column names."', ')', 'sf1', '.', '__materialize__', '(', ')', 'sf2', '.', '__materialize__', '(', ')', 'if', 'sf1', '.', 'num_columns', '(', ')', '!=', 'sf2', '.', 'num_columns', '(', ')', ':', 'raise', 'AssertionError', '(', '"Number of columns mismatched: "', '+', 'str', '(', 'sf1', '.', 'num_columns', '(', ')', ')', '+', '" != "', '+', 'str', '(', 'sf2', '.', 'num_columns', '(', ')', ')', ')', 's1_names', '=', 'sf1', '.', 'column_names', '(', ')', 's2_names', '=', 'sf2', '.', 'column_names', '(', ')', 'sorted_s1_names', '=', 'sorted', '(', 's1_names', ')', 'sorted_s2_names', '=', 'sorted', '(', 's2_names', ')', 'if', 'check_column_names', ':', 'if', '(', 'check_column_order', 'and', '(', 's1_names', '!=', 's2_names', ')', ')', 'or', '(', 'sorted_s1_names', '!=', 'sorted_s2_names', ')', ':', 'raise', 'AssertionError', '(', '"SFrame does not have same column names: "', '+', 'str', '(', 'sf1', '.', 'column_names', '(', ')', ')', '+', '" != "', '+', 'str', '(', 'sf2', '.', 'column_names', '(', ')', ')', ')', 'if', 'sf1', '.', 'num_rows', '(', ')', '!=', 'sf2', '.', 'num_rows', '(', ')', ':', 'raise', 'AssertionError', '(', '"Number of rows mismatched: "', '+', 'str', '(', 'sf1', '.', 'num_rows', '(', ')', ')', '+', '" != "', '+', 'str', '(', 'sf2', '.', 'num_rows', '(', ')', ')', ')', 'if', 'not', 'check_row_order', 'and', '(', 'sf1', '.', 'num_rows', '(', ')', '>', '1', ')', ':', 'sf1', '=', 'sf1', '.', 'sort', '(', 's1_names', ')', 'sf2', '=', 'sf2', '.', 'sort', '(', 's2_names', ')', 'names_to_check', '=', 'None', 'if', 'check_column_names', ':', 'names_to_check', '=', 'list', '(', 'zip', '(', 'sorted_s1_names', ',', 'sorted_s2_names', ')', ')', 'else', ':', 'names_to_check', '=', 'list', '(', 'zip', '(', 's1_names', ',', 's2_names', ')', ')', 'for', 'i', 'in', 'names_to_check', ':', 'col1', '=', 'sf1', '[', 'i', '[', '0', ']', ']', 'col2', '=', 'sf2', '[', 'i', '[', '1', ']', ']', 'if', 'col1', '.', 'dtype', '!=', 'col2', '.', 'dtype', ':', 'raise', 'AssertionError', '(', '"Columns "', '+', 'str', '(', 'i', ')', '+', '" types mismatched."', ')', 'compare_ary', '=', 'None', 'if', 'col1', '.', 'dtype', '==', 'float', 'and', 'float_column_delta', 'is', 'not', 'None', ':', 'dt', '=', 'float_column_delta', 'compare_ary', '=', '(', '(', 'col1', '>', 'col2', '-', 'dt', ')', '&', '(', 'col1', '<', 'col2', '+', 'dt', ')', ')', 'else', ':', 'compare_ary', '=', '(', 'sf1', '[', 'i', '[', '0', ']', ']', '==', 'sf2', '[', 'i', '[', '1', ']', ']', ')', 'if', 'not', 'compare_ary', '.', 'all', '(', ')', ':', 'count', '=', '0', 'for', 'j', 'in', 'compare_ary', ':', 'if', 'not', 'j', ':', 'first_row', '=', 'count', 'break', 'count', '+=', '1', 'raise', 'AssertionError', '(', '"Columns "', '+', 'str', '(', 'i', ')', '+', '" are not equal! First differing element is at row "', '+', 'str', '(', 'first_row', ')', '+', '": "', '+', 'str', '(', '(', 'col1', '[', 'first_row', ']', ',', 'col2', '[', 'first_row', ']', ')', ')', ')']
Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns.
['Assert', 'the', 'two', 'SFrames', 'are', 'equal', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/__init__.py#L263-L365
5,695
wbond/asn1crypto
asn1crypto/core.py
Asn1Value._copy
def _copy(self, other, copy_func): """ Copies the contents of another Asn1Value object to itself :param object: Another instance of the same class :param copy_func: An reference of copy.copy() or copy.deepcopy() to use when copying lists, dicts and objects """ if self.__class__ != other.__class__: raise TypeError(unwrap( ''' Can not copy values from %s object to %s object ''', type_name(other), type_name(self) )) self.contents = other.contents self._native = copy_func(other._native)
python
def _copy(self, other, copy_func): """ Copies the contents of another Asn1Value object to itself :param object: Another instance of the same class :param copy_func: An reference of copy.copy() or copy.deepcopy() to use when copying lists, dicts and objects """ if self.__class__ != other.__class__: raise TypeError(unwrap( ''' Can not copy values from %s object to %s object ''', type_name(other), type_name(self) )) self.contents = other.contents self._native = copy_func(other._native)
['def', '_copy', '(', 'self', ',', 'other', ',', 'copy_func', ')', ':', 'if', 'self', '.', '__class__', '!=', 'other', '.', '__class__', ':', 'raise', 'TypeError', '(', 'unwrap', '(', "'''\n Can not copy values from %s object to %s object\n '''", ',', 'type_name', '(', 'other', ')', ',', 'type_name', '(', 'self', ')', ')', ')', 'self', '.', 'contents', '=', 'other', '.', 'contents', 'self', '.', '_native', '=', 'copy_func', '(', 'other', '.', '_native', ')']
Copies the contents of another Asn1Value object to itself :param object: Another instance of the same class :param copy_func: An reference of copy.copy() or copy.deepcopy() to use when copying lists, dicts and objects
['Copies', 'the', 'contents', 'of', 'another', 'Asn1Value', 'object', 'to', 'itself']
train
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L546-L568
5,696
bram85/topydo
topydo/ui/columns/TodoListWidget.py
TodoListWidget.execute_builtin_action
def execute_builtin_action(self, p_action_str, p_size=None): """ Executes built-in action specified in p_action_str. Currently supported actions are: 'up', 'down', 'home', 'end', 'first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', swap_right', 'swap_left', 'postpone', 'postpone_s', 'pri', 'mark', 'mark_all, 'reset' and 'repeat'. """ column_actions = ['first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', 'swap_left', 'swap_right', 'reset', ] if p_action_str in column_actions: urwid.emit_signal(self, 'column_action', p_action_str) elif p_action_str in ['up', 'down']: self.listbox.keypress(p_size, p_action_str) elif p_action_str == 'home': self._scroll_to_top(p_size) elif p_action_str == 'end': self._scroll_to_bottom(p_size) elif p_action_str in ['postpone', 'postpone_s']: pass elif p_action_str == 'pri': pass elif p_action_str == 'mark': self._toggle_marked_status() elif p_action_str == 'mark_all': self._mark_all() elif p_action_str == 'repeat': self._repeat_cmd()
python
def execute_builtin_action(self, p_action_str, p_size=None): """ Executes built-in action specified in p_action_str. Currently supported actions are: 'up', 'down', 'home', 'end', 'first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', swap_right', 'swap_left', 'postpone', 'postpone_s', 'pri', 'mark', 'mark_all, 'reset' and 'repeat'. """ column_actions = ['first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', 'swap_left', 'swap_right', 'reset', ] if p_action_str in column_actions: urwid.emit_signal(self, 'column_action', p_action_str) elif p_action_str in ['up', 'down']: self.listbox.keypress(p_size, p_action_str) elif p_action_str == 'home': self._scroll_to_top(p_size) elif p_action_str == 'end': self._scroll_to_bottom(p_size) elif p_action_str in ['postpone', 'postpone_s']: pass elif p_action_str == 'pri': pass elif p_action_str == 'mark': self._toggle_marked_status() elif p_action_str == 'mark_all': self._mark_all() elif p_action_str == 'repeat': self._repeat_cmd()
['def', 'execute_builtin_action', '(', 'self', ',', 'p_action_str', ',', 'p_size', '=', 'None', ')', ':', 'column_actions', '=', '[', "'first_column'", ',', "'last_column'", ',', "'prev_column'", ',', "'next_column'", ',', "'append_column'", ',', "'insert_column'", ',', "'edit_column'", ',', "'delete_column'", ',', "'copy_column'", ',', "'swap_left'", ',', "'swap_right'", ',', "'reset'", ',', ']', 'if', 'p_action_str', 'in', 'column_actions', ':', 'urwid', '.', 'emit_signal', '(', 'self', ',', "'column_action'", ',', 'p_action_str', ')', 'elif', 'p_action_str', 'in', '[', "'up'", ',', "'down'", ']', ':', 'self', '.', 'listbox', '.', 'keypress', '(', 'p_size', ',', 'p_action_str', ')', 'elif', 'p_action_str', '==', "'home'", ':', 'self', '.', '_scroll_to_top', '(', 'p_size', ')', 'elif', 'p_action_str', '==', "'end'", ':', 'self', '.', '_scroll_to_bottom', '(', 'p_size', ')', 'elif', 'p_action_str', 'in', '[', "'postpone'", ',', "'postpone_s'", ']', ':', 'pass', 'elif', 'p_action_str', '==', "'pri'", ':', 'pass', 'elif', 'p_action_str', '==', "'mark'", ':', 'self', '.', '_toggle_marked_status', '(', ')', 'elif', 'p_action_str', '==', "'mark_all'", ':', 'self', '.', '_mark_all', '(', ')', 'elif', 'p_action_str', '==', "'repeat'", ':', 'self', '.', '_repeat_cmd', '(', ')']
Executes built-in action specified in p_action_str. Currently supported actions are: 'up', 'down', 'home', 'end', 'first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', swap_right', 'swap_left', 'postpone', 'postpone_s', 'pri', 'mark', 'mark_all, 'reset' and 'repeat'.
['Executes', 'built', '-', 'in', 'action', 'specified', 'in', 'p_action_str', '.']
train
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/TodoListWidget.py#L277-L318
5,697
nephila/python-taiga
taiga/models/models.py
Project.add_webhook
def add_webhook(self, name, url, key, **attrs): """ Add a new Webhook and return a :class:`Webhook` object. :param name: name of the :class:`Webhook` :param url: payload url of the :class:`Webhook` :param key: secret key of the :class:`Webhook` :param attrs: optional attributes for :class:`Webhook` """ return Webhooks(self.requester).create( self.id, name, url, key, **attrs )
python
def add_webhook(self, name, url, key, **attrs): """ Add a new Webhook and return a :class:`Webhook` object. :param name: name of the :class:`Webhook` :param url: payload url of the :class:`Webhook` :param key: secret key of the :class:`Webhook` :param attrs: optional attributes for :class:`Webhook` """ return Webhooks(self.requester).create( self.id, name, url, key, **attrs )
['def', 'add_webhook', '(', 'self', ',', 'name', ',', 'url', ',', 'key', ',', '*', '*', 'attrs', ')', ':', 'return', 'Webhooks', '(', 'self', '.', 'requester', ')', '.', 'create', '(', 'self', '.', 'id', ',', 'name', ',', 'url', ',', 'key', ',', '*', '*', 'attrs', ')']
Add a new Webhook and return a :class:`Webhook` object. :param name: name of the :class:`Webhook` :param url: payload url of the :class:`Webhook` :param key: secret key of the :class:`Webhook` :param attrs: optional attributes for :class:`Webhook`
['Add', 'a', 'new', 'Webhook', 'and', 'return', 'a', ':', 'class', ':', 'Webhook', 'object', '.']
train
https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/models.py#L1500-L1511
5,698
saltstack/salt
salt/modules/file.py
extract_hash
def extract_hash(hash_fn, hash_type='sha256', file_name='', source='', source_hash_name=None): ''' .. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo ''' hash_len = HASHES.get(hash_type) if hash_len is None: if hash_type: log.warning( 'file.extract_hash: Unsupported hash_type \'%s\', falling ' 'back to matching any supported hash_type', hash_type ) hash_type = '' hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP)) else: hash_len_expr = six.text_type(hash_len) filename_separators = string.whitespace + r'\/' if source_hash_name: if not isinstance(source_hash_name, six.string_types): source_hash_name = six.text_type(source_hash_name) source_hash_name_idx = (len(source_hash_name) + 1) * -1 log.debug( 'file.extract_hash: Extracting %s hash for file matching ' 'source_hash_name \'%s\'', 'any supported' if not hash_type else hash_type, source_hash_name ) if file_name: if not isinstance(file_name, six.string_types): file_name = six.text_type(file_name) file_name_basename = os.path.basename(file_name) file_name_idx = (len(file_name_basename) + 1) * -1 if source: if not isinstance(source, six.string_types): source = six.text_type(source) urlparsed_source = _urlparse(source) source_basename = os.path.basename( urlparsed_source.path or urlparsed_source.netloc ) source_idx = (len(source_basename) + 1) * -1 basename_searches = [x for x in (file_name, source) if x] if basename_searches: log.debug( 'file.extract_hash: %s %s hash for file matching%s: %s', 'If no source_hash_name match found, will extract' if source_hash_name else 'Extracting', 'any supported' if not hash_type else hash_type, '' if len(basename_searches) == 1 else ' either of the following', ', '.join(basename_searches) ) partial = None found = {} with salt.utils.files.fopen(hash_fn, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line.strip()) hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])' hash_match = re.search(hash_re, line) matched = None if hash_match: matched_hsum = hash_match.group(1) if matched_hsum is not None: matched_type = HASHES_REVMAP.get(len(matched_hsum)) if matched_type is None: # There was a match, but it's not of the correct length # to match one of the supported hash types. matched = None else: matched = {'hsum': matched_hsum, 'hash_type': matched_type} if matched is None: log.debug( 'file.extract_hash: In line \'%s\', no %shash found', line, '' if not hash_type else hash_type + ' ' ) continue if partial is None: partial = matched def _add_to_matches(found, line, match_type, value, matched): log.debug( 'file.extract_hash: Line \'%s\' matches %s \'%s\'', line, match_type, value ) found.setdefault(match_type, []).append(matched) hash_matched = False if source_hash_name: if line.endswith(source_hash_name): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[source_hash_name_idx] in string.whitespace: _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(source_hash_name) + r'\s+', line): _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True if file_name: if line.endswith(file_name_basename): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[file_name_idx] in filename_separators: _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(file_name) + r'\s+', line): _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True if source: if line.endswith(source_basename): # Same as above, we can't just do an rsplit here. try: if line[source_idx] in filename_separators: _add_to_matches(found, line, 'source', source, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(source) + r'\s+', line): _add_to_matches(found, line, 'source', source, matched) hash_matched = True if not hash_matched: log.debug( 'file.extract_hash: Line \'%s\' contains %s hash ' '\'%s\', but line did not meet the search criteria', line, matched['hash_type'], matched['hsum'] ) for found_type, found_str in (('source_hash_name', source_hash_name), ('file_name', file_name), ('source', source)): if found_type in found: if len(found[found_type]) > 1: log.debug( 'file.extract_hash: Multiple %s matches for %s: %s', found_type, found_str, ', '.join( ['{0} ({1})'.format(x['hsum'], x['hash_type']) for x in found[found_type]] ) ) ret = found[found_type][0] log.debug( 'file.extract_hash: Returning %s hash \'%s\' as a match of %s', ret['hash_type'], ret['hsum'], found_str ) return ret if partial: log.debug( 'file.extract_hash: Returning the partially identified %s hash ' '\'%s\'', partial['hash_type'], partial['hsum'] ) return partial log.debug('file.extract_hash: No matches, returning None') return None
python
def extract_hash(hash_fn, hash_type='sha256', file_name='', source='', source_hash_name=None): ''' .. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo ''' hash_len = HASHES.get(hash_type) if hash_len is None: if hash_type: log.warning( 'file.extract_hash: Unsupported hash_type \'%s\', falling ' 'back to matching any supported hash_type', hash_type ) hash_type = '' hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP)) else: hash_len_expr = six.text_type(hash_len) filename_separators = string.whitespace + r'\/' if source_hash_name: if not isinstance(source_hash_name, six.string_types): source_hash_name = six.text_type(source_hash_name) source_hash_name_idx = (len(source_hash_name) + 1) * -1 log.debug( 'file.extract_hash: Extracting %s hash for file matching ' 'source_hash_name \'%s\'', 'any supported' if not hash_type else hash_type, source_hash_name ) if file_name: if not isinstance(file_name, six.string_types): file_name = six.text_type(file_name) file_name_basename = os.path.basename(file_name) file_name_idx = (len(file_name_basename) + 1) * -1 if source: if not isinstance(source, six.string_types): source = six.text_type(source) urlparsed_source = _urlparse(source) source_basename = os.path.basename( urlparsed_source.path or urlparsed_source.netloc ) source_idx = (len(source_basename) + 1) * -1 basename_searches = [x for x in (file_name, source) if x] if basename_searches: log.debug( 'file.extract_hash: %s %s hash for file matching%s: %s', 'If no source_hash_name match found, will extract' if source_hash_name else 'Extracting', 'any supported' if not hash_type else hash_type, '' if len(basename_searches) == 1 else ' either of the following', ', '.join(basename_searches) ) partial = None found = {} with salt.utils.files.fopen(hash_fn, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line.strip()) hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])' hash_match = re.search(hash_re, line) matched = None if hash_match: matched_hsum = hash_match.group(1) if matched_hsum is not None: matched_type = HASHES_REVMAP.get(len(matched_hsum)) if matched_type is None: # There was a match, but it's not of the correct length # to match one of the supported hash types. matched = None else: matched = {'hsum': matched_hsum, 'hash_type': matched_type} if matched is None: log.debug( 'file.extract_hash: In line \'%s\', no %shash found', line, '' if not hash_type else hash_type + ' ' ) continue if partial is None: partial = matched def _add_to_matches(found, line, match_type, value, matched): log.debug( 'file.extract_hash: Line \'%s\' matches %s \'%s\'', line, match_type, value ) found.setdefault(match_type, []).append(matched) hash_matched = False if source_hash_name: if line.endswith(source_hash_name): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[source_hash_name_idx] in string.whitespace: _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(source_hash_name) + r'\s+', line): _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True if file_name: if line.endswith(file_name_basename): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[file_name_idx] in filename_separators: _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(file_name) + r'\s+', line): _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True if source: if line.endswith(source_basename): # Same as above, we can't just do an rsplit here. try: if line[source_idx] in filename_separators: _add_to_matches(found, line, 'source', source, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(source) + r'\s+', line): _add_to_matches(found, line, 'source', source, matched) hash_matched = True if not hash_matched: log.debug( 'file.extract_hash: Line \'%s\' contains %s hash ' '\'%s\', but line did not meet the search criteria', line, matched['hash_type'], matched['hsum'] ) for found_type, found_str in (('source_hash_name', source_hash_name), ('file_name', file_name), ('source', source)): if found_type in found: if len(found[found_type]) > 1: log.debug( 'file.extract_hash: Multiple %s matches for %s: %s', found_type, found_str, ', '.join( ['{0} ({1})'.format(x['hsum'], x['hash_type']) for x in found[found_type]] ) ) ret = found[found_type][0] log.debug( 'file.extract_hash: Returning %s hash \'%s\' as a match of %s', ret['hash_type'], ret['hsum'], found_str ) return ret if partial: log.debug( 'file.extract_hash: Returning the partially identified %s hash ' '\'%s\'', partial['hash_type'], partial['hsum'] ) return partial log.debug('file.extract_hash: No matches, returning None') return None
['def', 'extract_hash', '(', 'hash_fn', ',', 'hash_type', '=', "'sha256'", ',', 'file_name', '=', "''", ',', 'source', '=', "''", ',', 'source_hash_name', '=', 'None', ')', ':', 'hash_len', '=', 'HASHES', '.', 'get', '(', 'hash_type', ')', 'if', 'hash_len', 'is', 'None', ':', 'if', 'hash_type', ':', 'log', '.', 'warning', '(', "'file.extract_hash: Unsupported hash_type \\'%s\\', falling '", "'back to matching any supported hash_type'", ',', 'hash_type', ')', 'hash_type', '=', "''", 'hash_len_expr', '=', "'{0},{1}'", '.', 'format', '(', 'min', '(', 'HASHES_REVMAP', ')', ',', 'max', '(', 'HASHES_REVMAP', ')', ')', 'else', ':', 'hash_len_expr', '=', 'six', '.', 'text_type', '(', 'hash_len', ')', 'filename_separators', '=', 'string', '.', 'whitespace', '+', "r'\\/'", 'if', 'source_hash_name', ':', 'if', 'not', 'isinstance', '(', 'source_hash_name', ',', 'six', '.', 'string_types', ')', ':', 'source_hash_name', '=', 'six', '.', 'text_type', '(', 'source_hash_name', ')', 'source_hash_name_idx', '=', '(', 'len', '(', 'source_hash_name', ')', '+', '1', ')', '*', '-', '1', 'log', '.', 'debug', '(', "'file.extract_hash: Extracting %s hash for file matching '", "'source_hash_name \\'%s\\''", ',', "'any supported'", 'if', 'not', 'hash_type', 'else', 'hash_type', ',', 'source_hash_name', ')', 'if', 'file_name', ':', 'if', 'not', 'isinstance', '(', 'file_name', ',', 'six', '.', 'string_types', ')', ':', 'file_name', '=', 'six', '.', 'text_type', '(', 'file_name', ')', 'file_name_basename', '=', 'os', '.', 'path', '.', 'basename', '(', 'file_name', ')', 'file_name_idx', '=', '(', 'len', '(', 'file_name_basename', ')', '+', '1', ')', '*', '-', '1', 'if', 'source', ':', 'if', 'not', 'isinstance', '(', 'source', ',', 'six', '.', 'string_types', ')', ':', 'source', '=', 'six', '.', 'text_type', '(', 'source', ')', 'urlparsed_source', '=', '_urlparse', '(', 'source', ')', 'source_basename', '=', 'os', '.', 'path', '.', 'basename', '(', 'urlparsed_source', '.', 'path', 'or', 'urlparsed_source', '.', 'netloc', ')', 'source_idx', '=', '(', 'len', '(', 'source_basename', ')', '+', '1', ')', '*', '-', '1', 'basename_searches', '=', '[', 'x', 'for', 'x', 'in', '(', 'file_name', ',', 'source', ')', 'if', 'x', ']', 'if', 'basename_searches', ':', 'log', '.', 'debug', '(', "'file.extract_hash: %s %s hash for file matching%s: %s'", ',', "'If no source_hash_name match found, will extract'", 'if', 'source_hash_name', 'else', "'Extracting'", ',', "'any supported'", 'if', 'not', 'hash_type', 'else', 'hash_type', ',', "''", 'if', 'len', '(', 'basename_searches', ')', '==', '1', 'else', "' either of the following'", ',', "', '", '.', 'join', '(', 'basename_searches', ')', ')', 'partial', '=', 'None', 'found', '=', '{', '}', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'hash_fn', ',', "'r'", ')', 'as', 'fp_', ':', 'for', 'line', 'in', 'fp_', ':', 'line', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'line', '.', 'strip', '(', ')', ')', 'hash_re', '=', "r'(?i)(?<![a-z0-9])([a-f0-9]{'", '+', 'hash_len_expr', '+', "'})(?![a-z0-9])'", 'hash_match', '=', 're', '.', 'search', '(', 'hash_re', ',', 'line', ')', 'matched', '=', 'None', 'if', 'hash_match', ':', 'matched_hsum', '=', 'hash_match', '.', 'group', '(', '1', ')', 'if', 'matched_hsum', 'is', 'not', 'None', ':', 'matched_type', '=', 'HASHES_REVMAP', '.', 'get', '(', 'len', '(', 'matched_hsum', ')', ')', 'if', 'matched_type', 'is', 'None', ':', "# There was a match, but it's not of the correct length", '# to match one of the supported hash types.', 'matched', '=', 'None', 'else', ':', 'matched', '=', '{', "'hsum'", ':', 'matched_hsum', ',', "'hash_type'", ':', 'matched_type', '}', 'if', 'matched', 'is', 'None', ':', 'log', '.', 'debug', '(', "'file.extract_hash: In line \\'%s\\', no %shash found'", ',', 'line', ',', "''", 'if', 'not', 'hash_type', 'else', 'hash_type', '+', "' '", ')', 'continue', 'if', 'partial', 'is', 'None', ':', 'partial', '=', 'matched', 'def', '_add_to_matches', '(', 'found', ',', 'line', ',', 'match_type', ',', 'value', ',', 'matched', ')', ':', 'log', '.', 'debug', '(', "'file.extract_hash: Line \\'%s\\' matches %s \\'%s\\''", ',', 'line', ',', 'match_type', ',', 'value', ')', 'found', '.', 'setdefault', '(', 'match_type', ',', '[', ']', ')', '.', 'append', '(', 'matched', ')', 'hash_matched', '=', 'False', 'if', 'source_hash_name', ':', 'if', 'line', '.', 'endswith', '(', 'source_hash_name', ')', ':', '# Checking the character before where the basename', '# should start for either whitespace or a path', "# separator. We can't just rsplit on spaces/whitespace,", '# because the filename may contain spaces.', 'try', ':', 'if', 'line', '[', 'source_hash_name_idx', ']', 'in', 'string', '.', 'whitespace', ':', '_add_to_matches', '(', 'found', ',', 'line', ',', "'source_hash_name'", ',', 'source_hash_name', ',', 'matched', ')', 'hash_matched', '=', 'True', 'except', 'IndexError', ':', 'pass', 'elif', 're', '.', 'match', '(', 're', '.', 'escape', '(', 'source_hash_name', ')', '+', "r'\\s+'", ',', 'line', ')', ':', '_add_to_matches', '(', 'found', ',', 'line', ',', "'source_hash_name'", ',', 'source_hash_name', ',', 'matched', ')', 'hash_matched', '=', 'True', 'if', 'file_name', ':', 'if', 'line', '.', 'endswith', '(', 'file_name_basename', ')', ':', '# Checking the character before where the basename', '# should start for either whitespace or a path', "# separator. We can't just rsplit on spaces/whitespace,", '# because the filename may contain spaces.', 'try', ':', 'if', 'line', '[', 'file_name_idx', ']', 'in', 'filename_separators', ':', '_add_to_matches', '(', 'found', ',', 'line', ',', "'file_name'", ',', 'file_name', ',', 'matched', ')', 'hash_matched', '=', 'True', 'except', 'IndexError', ':', 'pass', 'elif', 're', '.', 'match', '(', 're', '.', 'escape', '(', 'file_name', ')', '+', "r'\\s+'", ',', 'line', ')', ':', '_add_to_matches', '(', 'found', ',', 'line', ',', "'file_name'", ',', 'file_name', ',', 'matched', ')', 'hash_matched', '=', 'True', 'if', 'source', ':', 'if', 'line', '.', 'endswith', '(', 'source_basename', ')', ':', "# Same as above, we can't just do an rsplit here.", 'try', ':', 'if', 'line', '[', 'source_idx', ']', 'in', 'filename_separators', ':', '_add_to_matches', '(', 'found', ',', 'line', ',', "'source'", ',', 'source', ',', 'matched', ')', 'hash_matched', '=', 'True', 'except', 'IndexError', ':', 'pass', 'elif', 're', '.', 'match', '(', 're', '.', 'escape', '(', 'source', ')', '+', "r'\\s+'", ',', 'line', ')', ':', '_add_to_matches', '(', 'found', ',', 'line', ',', "'source'", ',', 'source', ',', 'matched', ')', 'hash_matched', '=', 'True', 'if', 'not', 'hash_matched', ':', 'log', '.', 'debug', '(', "'file.extract_hash: Line \\'%s\\' contains %s hash '", "'\\'%s\\', but line did not meet the search criteria'", ',', 'line', ',', 'matched', '[', "'hash_type'", ']', ',', 'matched', '[', "'hsum'", ']', ')', 'for', 'found_type', ',', 'found_str', 'in', '(', '(', "'source_hash_name'", ',', 'source_hash_name', ')', ',', '(', "'file_name'", ',', 'file_name', ')', ',', '(', "'source'", ',', 'source', ')', ')', ':', 'if', 'found_type', 'in', 'found', ':', 'if', 'len', '(', 'found', '[', 'found_type', ']', ')', '>', '1', ':', 'log', '.', 'debug', '(', "'file.extract_hash: Multiple %s matches for %s: %s'", ',', 'found_type', ',', 'found_str', ',', "', '", '.', 'join', '(', '[', "'{0} ({1})'", '.', 'format', '(', 'x', '[', "'hsum'", ']', ',', 'x', '[', "'hash_type'", ']', ')', 'for', 'x', 'in', 'found', '[', 'found_type', ']', ']', ')', ')', 'ret', '=', 'found', '[', 'found_type', ']', '[', '0', ']', 'log', '.', 'debug', '(', "'file.extract_hash: Returning %s hash \\'%s\\' as a match of %s'", ',', 'ret', '[', "'hash_type'", ']', ',', 'ret', '[', "'hsum'", ']', ',', 'found_str', ')', 'return', 'ret', 'if', 'partial', ':', 'log', '.', 'debug', '(', "'file.extract_hash: Returning the partially identified %s hash '", "'\\'%s\\''", ',', 'partial', '[', "'hash_type'", ']', ',', 'partial', '[', "'hsum'", ']', ')', 'return', 'partial', 'log', '.', 'debug', '(', "'file.extract_hash: No matches, returning None'", ')', 'return', 'None']
.. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo
['..', 'versionchanged', '::', '2016', '.', '3', '.', '5', 'Prior', 'to', 'this', 'version', 'only', 'the', 'file_name', 'argument', 'was', 'considered', 'for', 'filename', 'matches', 'in', 'the', 'hash', 'file', '.', 'This', 'would', 'be', 'problematic', 'for', 'cases', 'in', 'which', 'the', 'user', 'was', 'relying', 'on', 'a', 'remote', 'checksum', 'file', 'that', 'they', 'do', 'not', 'control', 'and', 'they', 'wished', 'to', 'use', 'a', 'different', 'name', 'for', 'that', 'file', 'on', 'the', 'minion', 'from', 'the', 'filename', 'on', 'the', 'remote', 'server', '(', 'and', 'in', 'the', 'checksum', 'file', ')', '.', 'For', 'example', 'managing', '/', 'tmp', '/', 'myfile', '.', 'tar', '.', 'gz', 'when', 'the', 'remote', 'file', 'was', 'at', 'https', ':', '//', 'mydomain', '.', 'tld', '/', 'different_name', '.', 'tar', '.', 'gz', '.', 'The', ':', 'py', ':', 'func', ':', 'file', '.', 'managed', '<salt', '.', 'states', '.', 'file', '.', 'managed', '>', 'state', 'now', 'also', 'passes', 'this', 'function', 'the', 'source', 'URI', 'as', 'well', 'as', 'the', 'source_hash_name', '(', 'if', 'specified', ')', '.', 'In', 'cases', 'where', 'source_hash_name', 'is', 'specified', 'it', 'takes', 'precedence', 'over', 'both', 'the', 'file_name', 'and', 'source', '.', 'When', 'it', 'is', 'not', 'specified', 'file_name', 'takes', 'precedence', 'over', 'source', '.', 'This', 'allows', 'for', 'better', 'capability', 'for', 'matching', 'hashes', '.', '..', 'versionchanged', '::', '2016', '.', '11', '.', '0', 'File', 'name', 'and', 'source', 'URI', 'matches', 'are', 'no', 'longer', 'disregarded', 'when', 'source_hash_name', 'is', 'specified', '.', 'They', 'will', 'be', 'used', 'as', 'fallback', 'matches', 'if', 'there', 'is', 'no', 'match', 'to', 'the', 'source_hash_name', 'value', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L4336-L4563
5,699
klahnakoski/pyLibrary
jx_python/namespace/rename.py
Rename.convert
def convert(self, expr): """ EXPAND INSTANCES OF name TO value """ if expr is True or expr == None or expr is False: return expr elif is_number(expr): return expr elif expr == ".": return "." elif is_variable_name(expr): return coalesce(self.dimensions[expr], expr) elif is_text(expr): Log.error("{{name|quote}} is not a valid variable name", name=expr) elif isinstance(expr, Date): return expr elif is_op(expr, QueryOp): return self._convert_query(expr) elif is_data(expr): if expr["from"]: return self._convert_query(expr) elif len(expr) >= 2: #ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION return wrap({name: self.convert(value) for name, value in expr.leaves()}) else: # ASSUME SINGLE-CLAUSE EXPRESSION k, v = expr.items()[0] return converter_map.get(k, self._convert_bop)(self, k, v) elif is_many(expr): return wrap([self.convert(value) for value in expr]) else: return expr
python
def convert(self, expr): """ EXPAND INSTANCES OF name TO value """ if expr is True or expr == None or expr is False: return expr elif is_number(expr): return expr elif expr == ".": return "." elif is_variable_name(expr): return coalesce(self.dimensions[expr], expr) elif is_text(expr): Log.error("{{name|quote}} is not a valid variable name", name=expr) elif isinstance(expr, Date): return expr elif is_op(expr, QueryOp): return self._convert_query(expr) elif is_data(expr): if expr["from"]: return self._convert_query(expr) elif len(expr) >= 2: #ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION return wrap({name: self.convert(value) for name, value in expr.leaves()}) else: # ASSUME SINGLE-CLAUSE EXPRESSION k, v = expr.items()[0] return converter_map.get(k, self._convert_bop)(self, k, v) elif is_many(expr): return wrap([self.convert(value) for value in expr]) else: return expr
['def', 'convert', '(', 'self', ',', 'expr', ')', ':', 'if', 'expr', 'is', 'True', 'or', 'expr', '==', 'None', 'or', 'expr', 'is', 'False', ':', 'return', 'expr', 'elif', 'is_number', '(', 'expr', ')', ':', 'return', 'expr', 'elif', 'expr', '==', '"."', ':', 'return', '"."', 'elif', 'is_variable_name', '(', 'expr', ')', ':', 'return', 'coalesce', '(', 'self', '.', 'dimensions', '[', 'expr', ']', ',', 'expr', ')', 'elif', 'is_text', '(', 'expr', ')', ':', 'Log', '.', 'error', '(', '"{{name|quote}} is not a valid variable name"', ',', 'name', '=', 'expr', ')', 'elif', 'isinstance', '(', 'expr', ',', 'Date', ')', ':', 'return', 'expr', 'elif', 'is_op', '(', 'expr', ',', 'QueryOp', ')', ':', 'return', 'self', '.', '_convert_query', '(', 'expr', ')', 'elif', 'is_data', '(', 'expr', ')', ':', 'if', 'expr', '[', '"from"', ']', ':', 'return', 'self', '.', '_convert_query', '(', 'expr', ')', 'elif', 'len', '(', 'expr', ')', '>=', '2', ':', '#ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION', 'return', 'wrap', '(', '{', 'name', ':', 'self', '.', 'convert', '(', 'value', ')', 'for', 'name', ',', 'value', 'in', 'expr', '.', 'leaves', '(', ')', '}', ')', 'else', ':', '# ASSUME SINGLE-CLAUSE EXPRESSION', 'k', ',', 'v', '=', 'expr', '.', 'items', '(', ')', '[', '0', ']', 'return', 'converter_map', '.', 'get', '(', 'k', ',', 'self', '.', '_convert_bop', ')', '(', 'self', ',', 'k', ',', 'v', ')', 'elif', 'is_many', '(', 'expr', ')', ':', 'return', 'wrap', '(', '[', 'self', '.', 'convert', '(', 'value', ')', 'for', 'value', 'in', 'expr', ']', ')', 'else', ':', 'return', 'expr']
EXPAND INSTANCES OF name TO value
['EXPAND', 'INSTANCES', 'OF', 'name', 'TO', 'value']
train
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/namespace/rename.py#L39-L70