Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
3,700
saltstack/salt
salt/modules/boto_elbv2.py
create_target_group
def create_target_group(name, protocol, port, vpc_id, region=None, key=None, keyid=None, profile=None, health_check_protocol='HTTP', health_check_port='traffic-port', health_check_path='/', health_check_interval_seconds=30, health_check_timeout_seconds=5, healthy_threshold_count=5, unhealthy_threshold_count=2): ''' Create target group if not present. name (string) - The name of the target group. protocol (string) - The protocol to use for routing traffic to the targets port (int) - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the traffic. vpc_id (string) - The identifier of the virtual private cloud (VPC). health_check_protocol (string) - The protocol the load balancer uses when performing health check on targets. The default is the HTTP protocol. health_check_port (string) - The port the load balancer uses when performing health checks on targets. The default is 'traffic-port', which indicates the port on which each target receives traffic from the load balancer. health_check_path (string) - The ping path that is the destination on the targets for health checks. The default is /. health_check_interval_seconds (integer) - The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds. health_check_timeout_seconds (integer) - The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds. healthy_threshold_count (integer) - The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5. unhealthy_threshold_count (integer) - The number of consecutive health check failures required before considering a target unhealthy. The default is 2. returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if target_group_exists(name, region, key, keyid, profile): return True try: alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port, VpcId=vpc_id, HealthCheckProtocol=health_check_protocol, HealthCheckPort=health_check_port, HealthCheckPath=health_check_path, HealthCheckIntervalSeconds=health_check_interval_seconds, HealthCheckTimeoutSeconds=health_check_timeout_seconds, HealthyThresholdCount=healthy_threshold_count, UnhealthyThresholdCount=unhealthy_threshold_count) if alb: log.info('Created ALB %s: %s', name, alb['TargetGroups'][0]['TargetGroupArn']) return True else: log.error('Failed to create ALB %s', name) return False except ClientError as error: log.error( 'Failed to create ALB %s: %s: %s', name, error.response['Error']['Code'], error.response['Error']['Message'], exc_info_on_loglevel=logging.DEBUG )
python
def create_target_group(name, protocol, port, vpc_id, region=None, key=None, keyid=None, profile=None, health_check_protocol='HTTP', health_check_port='traffic-port', health_check_path='/', health_check_interval_seconds=30, health_check_timeout_seconds=5, healthy_threshold_count=5, unhealthy_threshold_count=2): ''' Create target group if not present. name (string) - The name of the target group. protocol (string) - The protocol to use for routing traffic to the targets port (int) - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the traffic. vpc_id (string) - The identifier of the virtual private cloud (VPC). health_check_protocol (string) - The protocol the load balancer uses when performing health check on targets. The default is the HTTP protocol. health_check_port (string) - The port the load balancer uses when performing health checks on targets. The default is 'traffic-port', which indicates the port on which each target receives traffic from the load balancer. health_check_path (string) - The ping path that is the destination on the targets for health checks. The default is /. health_check_interval_seconds (integer) - The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds. health_check_timeout_seconds (integer) - The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds. healthy_threshold_count (integer) - The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5. unhealthy_threshold_count (integer) - The number of consecutive health check failures required before considering a target unhealthy. The default is 2. returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if target_group_exists(name, region, key, keyid, profile): return True try: alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port, VpcId=vpc_id, HealthCheckProtocol=health_check_protocol, HealthCheckPort=health_check_port, HealthCheckPath=health_check_path, HealthCheckIntervalSeconds=health_check_interval_seconds, HealthCheckTimeoutSeconds=health_check_timeout_seconds, HealthyThresholdCount=healthy_threshold_count, UnhealthyThresholdCount=unhealthy_threshold_count) if alb: log.info('Created ALB %s: %s', name, alb['TargetGroups'][0]['TargetGroupArn']) return True else: log.error('Failed to create ALB %s', name) return False except ClientError as error: log.error( 'Failed to create ALB %s: %s: %s', name, error.response['Error']['Code'], error.response['Error']['Message'], exc_info_on_loglevel=logging.DEBUG )
['def', 'create_target_group', '(', 'name', ',', 'protocol', ',', 'port', ',', 'vpc_id', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ',', 'health_check_protocol', '=', "'HTTP'", ',', 'health_check_port', '=', "'traffic-port'", ',', 'health_check_path', '=', "'/'", ',', 'health_check_interval_seconds', '=', '30', ',', 'health_check_timeout_seconds', '=', '5', ',', 'healthy_threshold_count', '=', '5', ',', 'unhealthy_threshold_count', '=', '2', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'if', 'target_group_exists', '(', 'name', ',', 'region', ',', 'key', ',', 'keyid', ',', 'profile', ')', ':', 'return', 'True', 'try', ':', 'alb', '=', 'conn', '.', 'create_target_group', '(', 'Name', '=', 'name', ',', 'Protocol', '=', 'protocol', ',', 'Port', '=', 'port', ',', 'VpcId', '=', 'vpc_id', ',', 'HealthCheckProtocol', '=', 'health_check_protocol', ',', 'HealthCheckPort', '=', 'health_check_port', ',', 'HealthCheckPath', '=', 'health_check_path', ',', 'HealthCheckIntervalSeconds', '=', 'health_check_interval_seconds', ',', 'HealthCheckTimeoutSeconds', '=', 'health_check_timeout_seconds', ',', 'HealthyThresholdCount', '=', 'healthy_threshold_count', ',', 'UnhealthyThresholdCount', '=', 'unhealthy_threshold_count', ')', 'if', 'alb', ':', 'log', '.', 'info', '(', "'Created ALB %s: %s'", ',', 'name', ',', 'alb', '[', "'TargetGroups'", ']', '[', '0', ']', '[', "'TargetGroupArn'", ']', ')', 'return', 'True', 'else', ':', 'log', '.', 'error', '(', "'Failed to create ALB %s'", ',', 'name', ')', 'return', 'False', 'except', 'ClientError', 'as', 'error', ':', 'log', '.', 'error', '(', "'Failed to create ALB %s: %s: %s'", ',', 'name', ',', 'error', '.', 'response', '[', "'Error'", ']', '[', "'Code'", ']', ',', 'error', '.', 'response', '[', "'Error'", ']', '[', "'Message'", ']', ',', 'exc_info_on_loglevel', '=', 'logging', '.', 'DEBUG', ')']
Create target group if not present. name (string) - The name of the target group. protocol (string) - The protocol to use for routing traffic to the targets port (int) - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the traffic. vpc_id (string) - The identifier of the virtual private cloud (VPC). health_check_protocol (string) - The protocol the load balancer uses when performing health check on targets. The default is the HTTP protocol. health_check_port (string) - The port the load balancer uses when performing health checks on targets. The default is 'traffic-port', which indicates the port on which each target receives traffic from the load balancer. health_check_path (string) - The ping path that is the destination on the targets for health checks. The default is /. health_check_interval_seconds (integer) - The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds. health_check_timeout_seconds (integer) - The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds. healthy_threshold_count (integer) - The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5. unhealthy_threshold_count (integer) - The number of consecutive health check failures required before considering a target unhealthy. The default is 2. returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef
['Create', 'target', 'group', 'if', 'not', 'present', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elbv2.py#L79-L163
3,701
ihmeuw/vivarium
src/vivarium/examples/disease_model/mortality.py
Mortality.base_mortality_rate
def base_mortality_rate(self, index: pd.Index) -> pd.Series: """Computes the base mortality rate for every individual. Parameters ---------- index : A representation of the simulants to compute the base mortality rate for. Returns ------- The base mortality rate for all simulants in the index. """ return pd.Series(self.config.mortality_rate, index=index)
python
def base_mortality_rate(self, index: pd.Index) -> pd.Series: """Computes the base mortality rate for every individual. Parameters ---------- index : A representation of the simulants to compute the base mortality rate for. Returns ------- The base mortality rate for all simulants in the index. """ return pd.Series(self.config.mortality_rate, index=index)
['def', 'base_mortality_rate', '(', 'self', ',', 'index', ':', 'pd', '.', 'Index', ')', '->', 'pd', '.', 'Series', ':', 'return', 'pd', '.', 'Series', '(', 'self', '.', 'config', '.', 'mortality_rate', ',', 'index', '=', 'index', ')']
Computes the base mortality rate for every individual. Parameters ---------- index : A representation of the simulants to compute the base mortality rate for. Returns ------- The base mortality rate for all simulants in the index.
['Computes', 'the', 'base', 'mortality', 'rate', 'for', 'every', 'individual', '.']
train
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/examples/disease_model/mortality.py#L45-L58
3,702
pyblish/pyblish-qml
pyblish_qml/util.py
schedule
def schedule(func, time, channel="default"): """Run `func` at a later `time` in a dedicated `channel` Given an arbitrary function, call this function after a given timeout. It will ensure that only one "job" is running within the given channel at any one time and cancel any currently running job if a new job is submitted before the timeout. """ try: _jobs[channel].stop() except (AttributeError, KeyError): pass timer = QtCore.QTimer() timer.setSingleShot(True) timer.timeout.connect(func) timer.start(time) _jobs[channel] = timer
python
def schedule(func, time, channel="default"): """Run `func` at a later `time` in a dedicated `channel` Given an arbitrary function, call this function after a given timeout. It will ensure that only one "job" is running within the given channel at any one time and cancel any currently running job if a new job is submitted before the timeout. """ try: _jobs[channel].stop() except (AttributeError, KeyError): pass timer = QtCore.QTimer() timer.setSingleShot(True) timer.timeout.connect(func) timer.start(time) _jobs[channel] = timer
['def', 'schedule', '(', 'func', ',', 'time', ',', 'channel', '=', '"default"', ')', ':', 'try', ':', '_jobs', '[', 'channel', ']', '.', 'stop', '(', ')', 'except', '(', 'AttributeError', ',', 'KeyError', ')', ':', 'pass', 'timer', '=', 'QtCore', '.', 'QTimer', '(', ')', 'timer', '.', 'setSingleShot', '(', 'True', ')', 'timer', '.', 'timeout', '.', 'connect', '(', 'func', ')', 'timer', '.', 'start', '(', 'time', ')', '_jobs', '[', 'channel', ']', '=', 'timer']
Run `func` at a later `time` in a dedicated `channel` Given an arbitrary function, call this function after a given timeout. It will ensure that only one "job" is running within the given channel at any one time and cancel any currently running job if a new job is submitted before the timeout.
['Run', 'func', 'at', 'a', 'later', 'time', 'in', 'a', 'dedicated', 'channel']
train
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/util.py#L176-L196
3,703
ska-sa/kittens
Kittens/pixmaps.py
QPixmapWrapper.icon
def icon(self): """Get QIcon from wrapper""" if self._icon is None: self._icon = QIcon(self.pm()) return self._icon
python
def icon(self): """Get QIcon from wrapper""" if self._icon is None: self._icon = QIcon(self.pm()) return self._icon
['def', 'icon', '(', 'self', ')', ':', 'if', 'self', '.', '_icon', 'is', 'None', ':', 'self', '.', '_icon', '=', 'QIcon', '(', 'self', '.', 'pm', '(', ')', ')', 'return', 'self', '.', '_icon']
Get QIcon from wrapper
['Get', 'QIcon', 'from', 'wrapper']
train
https://github.com/ska-sa/kittens/blob/92058e065ddffa5d00a44749145a6f917e0f31dc/Kittens/pixmaps.py#L100-L104
3,704
JnyJny/Geometry
Geometry/line.py
Line.radiansBetween
def radiansBetween(self, other): ''' :param: other - Line subclass :return: float Returns the angle measured between two lines in radians with a range of [0, 2 * math.pi]. ''' # a dot b = |a||b| * cos(theta) # a dot b / |a||b| = cos(theta) # cos-1(a dot b / |a||b|) = theta # translate each line so that it passes through the origin and # produce a new point whose distance (magnitude) from the # origin is 1. # a = Point.unit(self.A, self.B) b = Point.unit(other.A, other.B) # in a perfect world, after unit: |A| = |B| = 1 # which is a noop when dividing the dot product of A,B # but sometimes the lengths are different. # # let's just assume things are perfect and the lengths equal 1. return math.acos(a.dot(b))
python
def radiansBetween(self, other): ''' :param: other - Line subclass :return: float Returns the angle measured between two lines in radians with a range of [0, 2 * math.pi]. ''' # a dot b = |a||b| * cos(theta) # a dot b / |a||b| = cos(theta) # cos-1(a dot b / |a||b|) = theta # translate each line so that it passes through the origin and # produce a new point whose distance (magnitude) from the # origin is 1. # a = Point.unit(self.A, self.B) b = Point.unit(other.A, other.B) # in a perfect world, after unit: |A| = |B| = 1 # which is a noop when dividing the dot product of A,B # but sometimes the lengths are different. # # let's just assume things are perfect and the lengths equal 1. return math.acos(a.dot(b))
['def', 'radiansBetween', '(', 'self', ',', 'other', ')', ':', '# a dot b = |a||b| * cos(theta)', '# a dot b / |a||b| = cos(theta)', '# cos-1(a dot b / |a||b|) = theta', '# translate each line so that it passes through the origin and', '# produce a new point whose distance (magnitude) from the', '# origin is 1.', '#', 'a', '=', 'Point', '.', 'unit', '(', 'self', '.', 'A', ',', 'self', '.', 'B', ')', 'b', '=', 'Point', '.', 'unit', '(', 'other', '.', 'A', ',', 'other', '.', 'B', ')', '# in a perfect world, after unit: |A| = |B| = 1', '# which is a noop when dividing the dot product of A,B', '# but sometimes the lengths are different.', '#', "# let's just assume things are perfect and the lengths equal 1.", 'return', 'math', '.', 'acos', '(', 'a', '.', 'dot', '(', 'b', ')', ')']
:param: other - Line subclass :return: float Returns the angle measured between two lines in radians with a range of [0, 2 * math.pi].
[':', 'param', ':', 'other', '-', 'Line', 'subclass', ':', 'return', ':', 'float']
train
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/line.py#L403-L430
3,705
log2timeline/plaso
plaso/formatters/imessage.py
IMessageFormatter.GetMessages
def GetMessages(self, formatter_mediator, event): """Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter. """ if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() read_receipt = event_values.get('read_receipt', None) if read_receipt is not None: event_values['read_receipt'] = ( self._READ_RECEIPT.get(read_receipt, 'UNKNOWN')) message_type = event_values.get('message_type', None) if message_type is not None: event_values['message_type'] = ( self._MESSAGE_TYPE.get(message_type, 'UNKNOWN')) return self._ConditionalFormatMessages(event_values)
python
def GetMessages(self, formatter_mediator, event): """Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter. """ if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() read_receipt = event_values.get('read_receipt', None) if read_receipt is not None: event_values['read_receipt'] = ( self._READ_RECEIPT.get(read_receipt, 'UNKNOWN')) message_type = event_values.get('message_type', None) if message_type is not None: event_values['message_type'] = ( self._MESSAGE_TYPE.get(message_type, 'UNKNOWN')) return self._ConditionalFormatMessages(event_values)
['def', 'GetMessages', '(', 'self', ',', 'formatter_mediator', ',', 'event', ')', ':', 'if', 'self', '.', 'DATA_TYPE', '!=', 'event', '.', 'data_type', ':', 'raise', 'errors', '.', 'WrongFormatter', '(', "'Unsupported data type: {0:s}.'", '.', 'format', '(', 'event', '.', 'data_type', ')', ')', 'event_values', '=', 'event', '.', 'CopyToDict', '(', ')', 'read_receipt', '=', 'event_values', '.', 'get', '(', "'read_receipt'", ',', 'None', ')', 'if', 'read_receipt', 'is', 'not', 'None', ':', 'event_values', '[', "'read_receipt'", ']', '=', '(', 'self', '.', '_READ_RECEIPT', '.', 'get', '(', 'read_receipt', ',', "'UNKNOWN'", ')', ')', 'message_type', '=', 'event_values', '.', 'get', '(', "'message_type'", ',', 'None', ')', 'if', 'message_type', 'is', 'not', 'None', ':', 'event_values', '[', "'message_type'", ']', '=', '(', 'self', '.', '_MESSAGE_TYPE', '.', 'get', '(', 'message_type', ',', "'UNKNOWN'", ')', ')', 'return', 'self', '.', '_ConditionalFormatMessages', '(', 'event_values', ')']
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
['Determines', 'the', 'formatted', 'message', 'strings', 'for', 'an', 'event', 'object', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/imessage.py#L40-L71
3,706
briney/abutils
abutils/core/pair.py
Pair.fasta
def fasta(self, key='vdj_nt', append_chain=True): ''' Returns the sequence pair as a fasta string. If the Pair object contains both heavy and light chain sequences, both will be returned as a single string. By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change, use the <key> option to select an alternate sequence. By default, the chain (heavy or light) will be appended to the sequence name: >MySequence_heavy To just use the pair name (which will result in duplicate sequence names for Pair objects with both heavy and light chains), set <append_chain> to False. ''' fastas = [] for s, chain in [(self.heavy, 'heavy'), (self.light, 'light')]: if s is not None: c = '_{}'.format(chain) if append_chain else '' fastas.append('>{}{}\n{}'.format(s['seq_id'], c, s[key])) return '\n'.join(fastas)
python
def fasta(self, key='vdj_nt', append_chain=True): ''' Returns the sequence pair as a fasta string. If the Pair object contains both heavy and light chain sequences, both will be returned as a single string. By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change, use the <key> option to select an alternate sequence. By default, the chain (heavy or light) will be appended to the sequence name: >MySequence_heavy To just use the pair name (which will result in duplicate sequence names for Pair objects with both heavy and light chains), set <append_chain> to False. ''' fastas = [] for s, chain in [(self.heavy, 'heavy'), (self.light, 'light')]: if s is not None: c = '_{}'.format(chain) if append_chain else '' fastas.append('>{}{}\n{}'.format(s['seq_id'], c, s[key])) return '\n'.join(fastas)
['def', 'fasta', '(', 'self', ',', 'key', '=', "'vdj_nt'", ',', 'append_chain', '=', 'True', ')', ':', 'fastas', '=', '[', ']', 'for', 's', ',', 'chain', 'in', '[', '(', 'self', '.', 'heavy', ',', "'heavy'", ')', ',', '(', 'self', '.', 'light', ',', "'light'", ')', ']', ':', 'if', 's', 'is', 'not', 'None', ':', 'c', '=', "'_{}'", '.', 'format', '(', 'chain', ')', 'if', 'append_chain', 'else', "''", 'fastas', '.', 'append', '(', "'>{}{}\\n{}'", '.', 'format', '(', 's', '[', "'seq_id'", ']', ',', 'c', ',', 's', '[', 'key', ']', ')', ')', 'return', "'\\n'", '.', 'join', '(', 'fastas', ')']
Returns the sequence pair as a fasta string. If the Pair object contains both heavy and light chain sequences, both will be returned as a single string. By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change, use the <key> option to select an alternate sequence. By default, the chain (heavy or light) will be appended to the sequence name: >MySequence_heavy To just use the pair name (which will result in duplicate sequence names for Pair objects with both heavy and light chains), set <append_chain> to False.
['Returns', 'the', 'sequence', 'pair', 'as', 'a', 'fasta', 'string', '.', 'If', 'the', 'Pair', 'object', 'contains', 'both', 'heavy', 'and', 'light', 'chain', 'sequences', 'both', 'will', 'be', 'returned', 'as', 'a', 'single', 'string', '.']
train
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/core/pair.py#L305-L325
3,707
zimeon/iiif
iiif/info.py
IIIFInfo.level
def level(self, value): """Build profile URI from level. Level should be an integer 0,1,2 """ self.compliance = self.compliance_prefix + \ ("%d" % value) + self.compliance_suffix
python
def level(self, value): """Build profile URI from level. Level should be an integer 0,1,2 """ self.compliance = self.compliance_prefix + \ ("%d" % value) + self.compliance_suffix
['def', 'level', '(', 'self', ',', 'value', ')', ':', 'self', '.', 'compliance', '=', 'self', '.', 'compliance_prefix', '+', '(', '"%d"', '%', 'value', ')', '+', 'self', '.', 'compliance_suffix']
Build profile URI from level. Level should be an integer 0,1,2
['Build', 'profile', 'URI', 'from', 'level', '.']
train
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L313-L319
3,708
Bogdanp/anom-py
anom/model.py
Property.prepare_to_store
def prepare_to_store(self, entity, value): """Prepare `value` for storage. Called by the Model for each Property, value pair it contains before handing the data off to an adapter. Parameters: entity(Model): The entity to which the value belongs. value: The value being stored. Raises: RuntimeError: If this property is required but no value was assigned to it. Returns: The value that should be persisted. """ if value is None and not self.optional: raise RuntimeError(f"Property {self.name_on_model} requires a value.") return value
python
def prepare_to_store(self, entity, value): """Prepare `value` for storage. Called by the Model for each Property, value pair it contains before handing the data off to an adapter. Parameters: entity(Model): The entity to which the value belongs. value: The value being stored. Raises: RuntimeError: If this property is required but no value was assigned to it. Returns: The value that should be persisted. """ if value is None and not self.optional: raise RuntimeError(f"Property {self.name_on_model} requires a value.") return value
['def', 'prepare_to_store', '(', 'self', ',', 'entity', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', 'and', 'not', 'self', '.', 'optional', ':', 'raise', 'RuntimeError', '(', 'f"Property {self.name_on_model} requires a value."', ')', 'return', 'value']
Prepare `value` for storage. Called by the Model for each Property, value pair it contains before handing the data off to an adapter. Parameters: entity(Model): The entity to which the value belongs. value: The value being stored. Raises: RuntimeError: If this property is required but no value was assigned to it. Returns: The value that should be persisted.
['Prepare', 'value', 'for', 'storage', '.', 'Called', 'by', 'the', 'Model', 'for', 'each', 'Property', 'value', 'pair', 'it', 'contains', 'before', 'handing', 'the', 'data', 'off', 'to', 'an', 'adapter', '.']
train
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L268-L286
3,709
Telefonica/toolium
toolium/behave/env_utils.py
Logger.error
def error(self, exc): """ log an error message: :param exc: exception message """ msg = 'trying to execute a step in the environment: \n' \ ' - Exception: %s' % exc if self.logger is not None: self.logger.error(msg) self.by_console(' ERROR - %s' % msg)
python
def error(self, exc): """ log an error message: :param exc: exception message """ msg = 'trying to execute a step in the environment: \n' \ ' - Exception: %s' % exc if self.logger is not None: self.logger.error(msg) self.by_console(' ERROR - %s' % msg)
['def', 'error', '(', 'self', ',', 'exc', ')', ':', 'msg', '=', "'trying to execute a step in the environment: \\n'", "' - Exception: %s'", '%', 'exc', 'if', 'self', '.', 'logger', 'is', 'not', 'None', ':', 'self', '.', 'logger', '.', 'error', '(', 'msg', ')', 'self', '.', 'by_console', '(', "' ERROR - %s'", '%', 'msg', ')']
log an error message: :param exc: exception message
['log', 'an', 'error', 'message', ':', ':', 'param', 'exc', ':', 'exception', 'message']
train
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/behave/env_utils.py#L58-L67
3,710
rameshg87/pyremotevbox
pyremotevbox/ZSI/twisted/reverse.py
CallbackHandler.processRequest
def processRequest(cls, ps, **kw): """invokes callback that should return a (request,response) tuple. representing the SOAP request and response respectively. ps -- ParsedSoap instance representing HTTP Body. request -- twisted.web.server.Request """ resource = kw['resource'] request = kw['request'] method = getattr(resource, 'soap_%s' % _get_element_nsuri_name(ps.body_root)[-1]) try: req,rsp = method(ps, request=request) except Exception, ex: raise return rsp
python
def processRequest(cls, ps, **kw): """invokes callback that should return a (request,response) tuple. representing the SOAP request and response respectively. ps -- ParsedSoap instance representing HTTP Body. request -- twisted.web.server.Request """ resource = kw['resource'] request = kw['request'] method = getattr(resource, 'soap_%s' % _get_element_nsuri_name(ps.body_root)[-1]) try: req,rsp = method(ps, request=request) except Exception, ex: raise return rsp
['def', 'processRequest', '(', 'cls', ',', 'ps', ',', '*', '*', 'kw', ')', ':', 'resource', '=', 'kw', '[', "'resource'", ']', 'request', '=', 'kw', '[', "'request'", ']', 'method', '=', 'getattr', '(', 'resource', ',', "'soap_%s'", '%', '_get_element_nsuri_name', '(', 'ps', '.', 'body_root', ')', '[', '-', '1', ']', ')', 'try', ':', 'req', ',', 'rsp', '=', 'method', '(', 'ps', ',', 'request', '=', 'request', ')', 'except', 'Exception', ',', 'ex', ':', 'raise', 'return', 'rsp']
invokes callback that should return a (request,response) tuple. representing the SOAP request and response respectively. ps -- ParsedSoap instance representing HTTP Body. request -- twisted.web.server.Request
['invokes', 'callback', 'that', 'should', 'return', 'a', '(', 'request', 'response', ')', 'tuple', '.', 'representing', 'the', 'SOAP', 'request', 'and', 'response', 'respectively', '.', 'ps', '--', 'ParsedSoap', 'instance', 'representing', 'HTTP', 'Body', '.', 'request', '--', 'twisted', '.', 'web', '.', 'server', '.', 'Request']
train
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/twisted/reverse.py#L37-L53
3,711
kragniz/python-etcd3
etcd3/client.py
Etcd3Client.transaction
def transaction(self, compare, success=None, failure=None): """ Perform a transaction. Example usage: .. code-block:: python etcd.transaction( compare=[ etcd.transactions.value('/doot/testing') == 'doot', etcd.transactions.version('/doot/testing') > 0, ], success=[ etcd.transactions.put('/doot/testing', 'success'), ], failure=[ etcd.transactions.put('/doot/testing', 'failure'), ] ) :param compare: A list of comparisons to make :param success: A list of operations to perform if all the comparisons are true :param failure: A list of operations to perform if any of the comparisons are false :return: A tuple of (operation status, responses) """ compare = [c.build_message() for c in compare] success_ops = self._ops_to_requests(success) failure_ops = self._ops_to_requests(failure) transaction_request = etcdrpc.TxnRequest(compare=compare, success=success_ops, failure=failure_ops) txn_response = self.kvstub.Txn( transaction_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata ) responses = [] for response in txn_response.responses: response_type = response.WhichOneof('response') if response_type in ['response_put', 'response_delete_range', 'response_txn']: responses.append(response) elif response_type == 'response_range': range_kvs = [] for kv in response.response_range.kvs: range_kvs.append((kv.value, KVMetadata(kv, txn_response.header))) responses.append(range_kvs) return txn_response.succeeded, responses
python
def transaction(self, compare, success=None, failure=None): """ Perform a transaction. Example usage: .. code-block:: python etcd.transaction( compare=[ etcd.transactions.value('/doot/testing') == 'doot', etcd.transactions.version('/doot/testing') > 0, ], success=[ etcd.transactions.put('/doot/testing', 'success'), ], failure=[ etcd.transactions.put('/doot/testing', 'failure'), ] ) :param compare: A list of comparisons to make :param success: A list of operations to perform if all the comparisons are true :param failure: A list of operations to perform if any of the comparisons are false :return: A tuple of (operation status, responses) """ compare = [c.build_message() for c in compare] success_ops = self._ops_to_requests(success) failure_ops = self._ops_to_requests(failure) transaction_request = etcdrpc.TxnRequest(compare=compare, success=success_ops, failure=failure_ops) txn_response = self.kvstub.Txn( transaction_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata ) responses = [] for response in txn_response.responses: response_type = response.WhichOneof('response') if response_type in ['response_put', 'response_delete_range', 'response_txn']: responses.append(response) elif response_type == 'response_range': range_kvs = [] for kv in response.response_range.kvs: range_kvs.append((kv.value, KVMetadata(kv, txn_response.header))) responses.append(range_kvs) return txn_response.succeeded, responses
['def', 'transaction', '(', 'self', ',', 'compare', ',', 'success', '=', 'None', ',', 'failure', '=', 'None', ')', ':', 'compare', '=', '[', 'c', '.', 'build_message', '(', ')', 'for', 'c', 'in', 'compare', ']', 'success_ops', '=', 'self', '.', '_ops_to_requests', '(', 'success', ')', 'failure_ops', '=', 'self', '.', '_ops_to_requests', '(', 'failure', ')', 'transaction_request', '=', 'etcdrpc', '.', 'TxnRequest', '(', 'compare', '=', 'compare', ',', 'success', '=', 'success_ops', ',', 'failure', '=', 'failure_ops', ')', 'txn_response', '=', 'self', '.', 'kvstub', '.', 'Txn', '(', 'transaction_request', ',', 'self', '.', 'timeout', ',', 'credentials', '=', 'self', '.', 'call_credentials', ',', 'metadata', '=', 'self', '.', 'metadata', ')', 'responses', '=', '[', ']', 'for', 'response', 'in', 'txn_response', '.', 'responses', ':', 'response_type', '=', 'response', '.', 'WhichOneof', '(', "'response'", ')', 'if', 'response_type', 'in', '[', "'response_put'", ',', "'response_delete_range'", ',', "'response_txn'", ']', ':', 'responses', '.', 'append', '(', 'response', ')', 'elif', 'response_type', '==', "'response_range'", ':', 'range_kvs', '=', '[', ']', 'for', 'kv', 'in', 'response', '.', 'response_range', '.', 'kvs', ':', 'range_kvs', '.', 'append', '(', '(', 'kv', '.', 'value', ',', 'KVMetadata', '(', 'kv', ',', 'txn_response', '.', 'header', ')', ')', ')', 'responses', '.', 'append', '(', 'range_kvs', ')', 'return', 'txn_response', '.', 'succeeded', ',', 'responses']
Perform a transaction. Example usage: .. code-block:: python etcd.transaction( compare=[ etcd.transactions.value('/doot/testing') == 'doot', etcd.transactions.version('/doot/testing') > 0, ], success=[ etcd.transactions.put('/doot/testing', 'success'), ], failure=[ etcd.transactions.put('/doot/testing', 'failure'), ] ) :param compare: A list of comparisons to make :param success: A list of operations to perform if all the comparisons are true :param failure: A list of operations to perform if any of the comparisons are false :return: A tuple of (operation status, responses)
['Perform', 'a', 'transaction', '.']
train
https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/client.py#L715-L773
3,712
google/grr
grr/core/grr_response_core/lib/rdfvalue.py
RegisterLateBindingCallback
def RegisterLateBindingCallback(target_name, callback, **kwargs): """Registers a callback to be invoked when the RDFValue named is declared.""" _LATE_BINDING_STORE.setdefault(target_name, []).append((callback, kwargs))
python
def RegisterLateBindingCallback(target_name, callback, **kwargs): """Registers a callback to be invoked when the RDFValue named is declared.""" _LATE_BINDING_STORE.setdefault(target_name, []).append((callback, kwargs))
['def', 'RegisterLateBindingCallback', '(', 'target_name', ',', 'callback', ',', '*', '*', 'kwargs', ')', ':', '_LATE_BINDING_STORE', '.', 'setdefault', '(', 'target_name', ',', '[', ']', ')', '.', 'append', '(', '(', 'callback', ',', 'kwargs', ')', ')']
Registers a callback to be invoked when the RDFValue named is declared.
['Registers', 'a', 'callback', 'to', 'be', 'invoked', 'when', 'the', 'RDFValue', 'named', 'is', 'declared', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalue.py#L55-L57
3,713
ArduPilot/MAVProxy
MAVProxy/modules/lib/wxhorizon_ui.py
HorizonFrame.on_timer
def on_timer(self, event): '''Main Loop.''' state = self.state self.loopStartTime = time.time() if state.close_event.wait(0.001): self.timer.Stop() self.Destroy() return # Check for resizing self.checkReszie() if self.resized: self.on_idle(0) # Get attitude information while state.child_pipe_recv.poll(): objList = state.child_pipe_recv.recv() for obj in objList: self.calcFontScaling() if isinstance(obj,Attitude): self.oldRoll = self.roll self.pitch = obj.pitch*180/math.pi self.roll = obj.roll*180/math.pi self.yaw = obj.yaw*180/math.pi # Update Roll, Pitch, Yaw Text Text self.updateRPYText() # Recalculate Horizon Polygons self.calcHorizonPoints() # Update Pitch Markers self.adjustPitchmarkers() elif isinstance(obj,VFR_HUD): self.heading = obj.heading self.airspeed = obj.airspeed self.climbRate = obj.climbRate # Update Airpseed, Altitude, Climb Rate Locations self.updateAARText() # Update Heading North Pointer self.adjustHeadingPointer() self.adjustNorthPointer() elif isinstance(obj,Global_Position_INT): self.relAlt = obj.relAlt self.relAltTime = obj.curTime # Update Airpseed, Altitude, Climb Rate Locations self.updateAARText() # Update Altitude History self.updateAltHistory() elif isinstance(obj,BatteryInfo): self.voltage = obj.voltage self.current = obj.current self.batRemain = obj.batRemain # Update Battery Bar self.updateBatteryBar() elif isinstance(obj,FlightState): self.mode = obj.mode self.armed = obj.armState # Update Mode and Arm State Text self.updateStateText() elif isinstance(obj,WaypointInfo): self.currentWP = obj.current self.finalWP = obj.final self.wpDist = obj.currentDist self.nextWPTime = obj.nextWPTime if obj.wpBearing < 0.0: self.wpBearing = obj.wpBearing + 360 else: self.wpBearing = obj.wpBearing # Update waypoint text self.updateWPText() # Adjust Waypoint Pointer self.adjustWPPointer() elif isinstance(obj, FPS): # Update fps target self.fps = obj.fps # Quit Drawing if too early if (time.time() > self.nextTime): # Update Matplotlib Plot self.canvas.draw() self.canvas.Refresh() self.Refresh() self.Update() # Calculate next frame time if (self.fps > 0): fpsTime = 1/self.fps self.nextTime = fpsTime + self.loopStartTime else: self.nextTime = time.time()
python
def on_timer(self, event): '''Main Loop.''' state = self.state self.loopStartTime = time.time() if state.close_event.wait(0.001): self.timer.Stop() self.Destroy() return # Check for resizing self.checkReszie() if self.resized: self.on_idle(0) # Get attitude information while state.child_pipe_recv.poll(): objList = state.child_pipe_recv.recv() for obj in objList: self.calcFontScaling() if isinstance(obj,Attitude): self.oldRoll = self.roll self.pitch = obj.pitch*180/math.pi self.roll = obj.roll*180/math.pi self.yaw = obj.yaw*180/math.pi # Update Roll, Pitch, Yaw Text Text self.updateRPYText() # Recalculate Horizon Polygons self.calcHorizonPoints() # Update Pitch Markers self.adjustPitchmarkers() elif isinstance(obj,VFR_HUD): self.heading = obj.heading self.airspeed = obj.airspeed self.climbRate = obj.climbRate # Update Airpseed, Altitude, Climb Rate Locations self.updateAARText() # Update Heading North Pointer self.adjustHeadingPointer() self.adjustNorthPointer() elif isinstance(obj,Global_Position_INT): self.relAlt = obj.relAlt self.relAltTime = obj.curTime # Update Airpseed, Altitude, Climb Rate Locations self.updateAARText() # Update Altitude History self.updateAltHistory() elif isinstance(obj,BatteryInfo): self.voltage = obj.voltage self.current = obj.current self.batRemain = obj.batRemain # Update Battery Bar self.updateBatteryBar() elif isinstance(obj,FlightState): self.mode = obj.mode self.armed = obj.armState # Update Mode and Arm State Text self.updateStateText() elif isinstance(obj,WaypointInfo): self.currentWP = obj.current self.finalWP = obj.final self.wpDist = obj.currentDist self.nextWPTime = obj.nextWPTime if obj.wpBearing < 0.0: self.wpBearing = obj.wpBearing + 360 else: self.wpBearing = obj.wpBearing # Update waypoint text self.updateWPText() # Adjust Waypoint Pointer self.adjustWPPointer() elif isinstance(obj, FPS): # Update fps target self.fps = obj.fps # Quit Drawing if too early if (time.time() > self.nextTime): # Update Matplotlib Plot self.canvas.draw() self.canvas.Refresh() self.Refresh() self.Update() # Calculate next frame time if (self.fps > 0): fpsTime = 1/self.fps self.nextTime = fpsTime + self.loopStartTime else: self.nextTime = time.time()
['def', 'on_timer', '(', 'self', ',', 'event', ')', ':', 'state', '=', 'self', '.', 'state', 'self', '.', 'loopStartTime', '=', 'time', '.', 'time', '(', ')', 'if', 'state', '.', 'close_event', '.', 'wait', '(', '0.001', ')', ':', 'self', '.', 'timer', '.', 'Stop', '(', ')', 'self', '.', 'Destroy', '(', ')', 'return', '# Check for resizing', 'self', '.', 'checkReszie', '(', ')', 'if', 'self', '.', 'resized', ':', 'self', '.', 'on_idle', '(', '0', ')', '# Get attitude information', 'while', 'state', '.', 'child_pipe_recv', '.', 'poll', '(', ')', ':', 'objList', '=', 'state', '.', 'child_pipe_recv', '.', 'recv', '(', ')', 'for', 'obj', 'in', 'objList', ':', 'self', '.', 'calcFontScaling', '(', ')', 'if', 'isinstance', '(', 'obj', ',', 'Attitude', ')', ':', 'self', '.', 'oldRoll', '=', 'self', '.', 'roll', 'self', '.', 'pitch', '=', 'obj', '.', 'pitch', '*', '180', '/', 'math', '.', 'pi', 'self', '.', 'roll', '=', 'obj', '.', 'roll', '*', '180', '/', 'math', '.', 'pi', 'self', '.', 'yaw', '=', 'obj', '.', 'yaw', '*', '180', '/', 'math', '.', 'pi', '# Update Roll, Pitch, Yaw Text Text', 'self', '.', 'updateRPYText', '(', ')', '# Recalculate Horizon Polygons', 'self', '.', 'calcHorizonPoints', '(', ')', '# Update Pitch Markers', 'self', '.', 'adjustPitchmarkers', '(', ')', 'elif', 'isinstance', '(', 'obj', ',', 'VFR_HUD', ')', ':', 'self', '.', 'heading', '=', 'obj', '.', 'heading', 'self', '.', 'airspeed', '=', 'obj', '.', 'airspeed', 'self', '.', 'climbRate', '=', 'obj', '.', 'climbRate', '# Update Airpseed, Altitude, Climb Rate Locations', 'self', '.', 'updateAARText', '(', ')', '# Update Heading North Pointer', 'self', '.', 'adjustHeadingPointer', '(', ')', 'self', '.', 'adjustNorthPointer', '(', ')', 'elif', 'isinstance', '(', 'obj', ',', 'Global_Position_INT', ')', ':', 'self', '.', 'relAlt', '=', 'obj', '.', 'relAlt', 'self', '.', 'relAltTime', '=', 'obj', '.', 'curTime', '# Update Airpseed, Altitude, Climb Rate Locations', 'self', '.', 'updateAARText', '(', ')', '# Update Altitude History', 'self', '.', 'updateAltHistory', '(', ')', 'elif', 'isinstance', '(', 'obj', ',', 'BatteryInfo', ')', ':', 'self', '.', 'voltage', '=', 'obj', '.', 'voltage', 'self', '.', 'current', '=', 'obj', '.', 'current', 'self', '.', 'batRemain', '=', 'obj', '.', 'batRemain', '# Update Battery Bar', 'self', '.', 'updateBatteryBar', '(', ')', 'elif', 'isinstance', '(', 'obj', ',', 'FlightState', ')', ':', 'self', '.', 'mode', '=', 'obj', '.', 'mode', 'self', '.', 'armed', '=', 'obj', '.', 'armState', '# Update Mode and Arm State Text', 'self', '.', 'updateStateText', '(', ')', 'elif', 'isinstance', '(', 'obj', ',', 'WaypointInfo', ')', ':', 'self', '.', 'currentWP', '=', 'obj', '.', 'current', 'self', '.', 'finalWP', '=', 'obj', '.', 'final', 'self', '.', 'wpDist', '=', 'obj', '.', 'currentDist', 'self', '.', 'nextWPTime', '=', 'obj', '.', 'nextWPTime', 'if', 'obj', '.', 'wpBearing', '<', '0.0', ':', 'self', '.', 'wpBearing', '=', 'obj', '.', 'wpBearing', '+', '360', 'else', ':', 'self', '.', 'wpBearing', '=', 'obj', '.', 'wpBearing', '# Update waypoint text', 'self', '.', 'updateWPText', '(', ')', '# Adjust Waypoint Pointer', 'self', '.', 'adjustWPPointer', '(', ')', 'elif', 'isinstance', '(', 'obj', ',', 'FPS', ')', ':', '# Update fps target', 'self', '.', 'fps', '=', 'obj', '.', 'fps', '# Quit Drawing if too early', 'if', '(', 'time', '.', 'time', '(', ')', '>', 'self', '.', 'nextTime', ')', ':', '# Update Matplotlib Plot', 'self', '.', 'canvas', '.', 'draw', '(', ')', 'self', '.', 'canvas', '.', 'Refresh', '(', ')', 'self', '.', 'Refresh', '(', ')', 'self', '.', 'Update', '(', ')', '# Calculate next frame time', 'if', '(', 'self', '.', 'fps', '>', '0', ')', ':', 'fpsTime', '=', '1', '/', 'self', '.', 'fps', 'self', '.', 'nextTime', '=', 'fpsTime', '+', 'self', '.', 'loopStartTime', 'else', ':', 'self', '.', 'nextTime', '=', 'time', '.', 'time', '(', ')']
Main Loop.
['Main', 'Loop', '.']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L569-L675
3,714
Robpol86/libnl
libnl/genl/genl.py
genl_send_simple
def genl_send_simple(sk, family, cmd, version, flags): """Send a Generic Netlink message consisting only of a header. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L84 This function is a shortcut for sending a Generic Netlink message without any message payload. The message will only consist of the Netlink and Generic Netlink headers. The header is constructed based on the specified parameters and passed on to nl_send_simple() to send it on the specified socket. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family -- numeric family identifier (integer). cmd -- numeric command identifier (integer). version -- interface version (integer). flags -- additional Netlink message flags (integer). Returns: 0 on success or a negative error code. """ hdr = genlmsghdr(cmd=cmd, version=version) return int(nl_send_simple(sk, family, flags, hdr, hdr.SIZEOF))
python
def genl_send_simple(sk, family, cmd, version, flags): """Send a Generic Netlink message consisting only of a header. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L84 This function is a shortcut for sending a Generic Netlink message without any message payload. The message will only consist of the Netlink and Generic Netlink headers. The header is constructed based on the specified parameters and passed on to nl_send_simple() to send it on the specified socket. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family -- numeric family identifier (integer). cmd -- numeric command identifier (integer). version -- interface version (integer). flags -- additional Netlink message flags (integer). Returns: 0 on success or a negative error code. """ hdr = genlmsghdr(cmd=cmd, version=version) return int(nl_send_simple(sk, family, flags, hdr, hdr.SIZEOF))
['def', 'genl_send_simple', '(', 'sk', ',', 'family', ',', 'cmd', ',', 'version', ',', 'flags', ')', ':', 'hdr', '=', 'genlmsghdr', '(', 'cmd', '=', 'cmd', ',', 'version', '=', 'version', ')', 'return', 'int', '(', 'nl_send_simple', '(', 'sk', ',', 'family', ',', 'flags', ',', 'hdr', ',', 'hdr', '.', 'SIZEOF', ')', ')']
Send a Generic Netlink message consisting only of a header. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L84 This function is a shortcut for sending a Generic Netlink message without any message payload. The message will only consist of the Netlink and Generic Netlink headers. The header is constructed based on the specified parameters and passed on to nl_send_simple() to send it on the specified socket. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family -- numeric family identifier (integer). cmd -- numeric command identifier (integer). version -- interface version (integer). flags -- additional Netlink message flags (integer). Returns: 0 on success or a negative error code.
['Send', 'a', 'Generic', 'Netlink', 'message', 'consisting', 'only', 'of', 'a', 'header', '.']
train
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/genl.py#L44-L64
3,715
sdispater/orator
orator/migrations/migrator.py
Migrator._resolve
def _resolve(self, path, migration_file): """ Resolve a migration instance from a file. :param migration_file: The migration file :type migration_file: str :rtype: orator.migrations.migration.Migration """ name = "_".join(migration_file.split("_")[4:]) migration_file = os.path.join(path, "%s.py" % migration_file) # Loading parent module parent = os.path.join(path, "__init__.py") if not os.path.exists(parent): with open(parent, "w"): pass load_module("migrations", parent) # Loading module mod = load_module("migrations.%s" % name, migration_file) klass = getattr(mod, inflection.camelize(name)) instance = klass() instance.set_connection(self.get_repository().get_connection()) return instance
python
def _resolve(self, path, migration_file): """ Resolve a migration instance from a file. :param migration_file: The migration file :type migration_file: str :rtype: orator.migrations.migration.Migration """ name = "_".join(migration_file.split("_")[4:]) migration_file = os.path.join(path, "%s.py" % migration_file) # Loading parent module parent = os.path.join(path, "__init__.py") if not os.path.exists(parent): with open(parent, "w"): pass load_module("migrations", parent) # Loading module mod = load_module("migrations.%s" % name, migration_file) klass = getattr(mod, inflection.camelize(name)) instance = klass() instance.set_connection(self.get_repository().get_connection()) return instance
['def', '_resolve', '(', 'self', ',', 'path', ',', 'migration_file', ')', ':', 'name', '=', '"_"', '.', 'join', '(', 'migration_file', '.', 'split', '(', '"_"', ')', '[', '4', ':', ']', ')', 'migration_file', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', '"%s.py"', '%', 'migration_file', ')', '# Loading parent module', 'parent', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', '"__init__.py"', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'parent', ')', ':', 'with', 'open', '(', 'parent', ',', '"w"', ')', ':', 'pass', 'load_module', '(', '"migrations"', ',', 'parent', ')', '# Loading module', 'mod', '=', 'load_module', '(', '"migrations.%s"', '%', 'name', ',', 'migration_file', ')', 'klass', '=', 'getattr', '(', 'mod', ',', 'inflection', '.', 'camelize', '(', 'name', ')', ')', 'instance', '=', 'klass', '(', ')', 'instance', '.', 'set_connection', '(', 'self', '.', 'get_repository', '(', ')', '.', 'get_connection', '(', ')', ')', 'return', 'instance']
Resolve a migration instance from a file. :param migration_file: The migration file :type migration_file: str :rtype: orator.migrations.migration.Migration
['Resolve', 'a', 'migration', 'instance', 'from', 'a', 'file', '.']
train
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/migrations/migrator.py#L244-L272
3,716
yyuu/botornado
boto/ec2/connection.py
EC2Connection.reset_instance_attribute
def reset_instance_attribute(self, instance_id, attribute): """ Resets an attribute of an instance to its default value. :type instance_id: string :param instance_id: ID of the instance :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not """ params = {'InstanceId' : instance_id, 'Attribute' : attribute} return self.get_status('ResetInstanceAttribute', params, verb='POST')
python
def reset_instance_attribute(self, instance_id, attribute): """ Resets an attribute of an instance to its default value. :type instance_id: string :param instance_id: ID of the instance :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not """ params = {'InstanceId' : instance_id, 'Attribute' : attribute} return self.get_status('ResetInstanceAttribute', params, verb='POST')
['def', 'reset_instance_attribute', '(', 'self', ',', 'instance_id', ',', 'attribute', ')', ':', 'params', '=', '{', "'InstanceId'", ':', 'instance_id', ',', "'Attribute'", ':', 'attribute', '}', 'return', 'self', '.', 'get_status', '(', "'ResetInstanceAttribute'", ',', 'params', ',', 'verb', '=', "'POST'", ')']
Resets an attribute of an instance to its default value. :type instance_id: string :param instance_id: ID of the instance :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not
['Resets', 'an', 'attribute', 'of', 'an', 'instance', 'to', 'its', 'default', 'value', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L824-L840
3,717
mushkevych/scheduler
synergy/scheduler/state_machine_freerun.py
StateMachineFreerun._log_message
def _log_message(self, level, freerun_entry, msg): """ method performs logging into log file and the freerun_entry """ self.logger.log(level, msg) assert isinstance(freerun_entry, FreerunProcessEntry) event_log = freerun_entry.event_log if len(event_log) > MAX_NUMBER_OF_EVENTS: del event_log[-1] event_log.insert(0, msg) self.freerun_process_dao.update(freerun_entry)
python
def _log_message(self, level, freerun_entry, msg): """ method performs logging into log file and the freerun_entry """ self.logger.log(level, msg) assert isinstance(freerun_entry, FreerunProcessEntry) event_log = freerun_entry.event_log if len(event_log) > MAX_NUMBER_OF_EVENTS: del event_log[-1] event_log.insert(0, msg) self.freerun_process_dao.update(freerun_entry)
['def', '_log_message', '(', 'self', ',', 'level', ',', 'freerun_entry', ',', 'msg', ')', ':', 'self', '.', 'logger', '.', 'log', '(', 'level', ',', 'msg', ')', 'assert', 'isinstance', '(', 'freerun_entry', ',', 'FreerunProcessEntry', ')', 'event_log', '=', 'freerun_entry', '.', 'event_log', 'if', 'len', '(', 'event_log', ')', '>', 'MAX_NUMBER_OF_EVENTS', ':', 'del', 'event_log', '[', '-', '1', ']', 'event_log', '.', 'insert', '(', '0', ',', 'msg', ')', 'self', '.', 'freerun_process_dao', '.', 'update', '(', 'freerun_entry', ')']
method performs logging into log file and the freerun_entry
['method', 'performs', 'logging', 'into', 'log', 'file', 'and', 'the', 'freerun_entry']
train
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/state_machine_freerun.py#L31-L40
3,718
bitprophet/ssh
ssh/channel.py
Channel.sendall_stderr
def sendall_stderr(self, s): """ Send data to the channel's "stderr" stream, without allowing partial results. Unlike L{send_stderr}, this method continues to send data from the given string until all data has been sent or an error occurs. Nothing is returned. @param s: data to send to the client as "stderr" output. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @since: 1.1 """ while s: if self.closed: raise socket.error('Socket is closed') sent = self.send_stderr(s) s = s[sent:] return None
python
def sendall_stderr(self, s): """ Send data to the channel's "stderr" stream, without allowing partial results. Unlike L{send_stderr}, this method continues to send data from the given string until all data has been sent or an error occurs. Nothing is returned. @param s: data to send to the client as "stderr" output. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @since: 1.1 """ while s: if self.closed: raise socket.error('Socket is closed') sent = self.send_stderr(s) s = s[sent:] return None
['def', 'sendall_stderr', '(', 'self', ',', 's', ')', ':', 'while', 's', ':', 'if', 'self', '.', 'closed', ':', 'raise', 'socket', '.', 'error', '(', "'Socket is closed'", ')', 'sent', '=', 'self', '.', 'send_stderr', '(', 's', ')', 's', '=', 's', '[', 'sent', ':', ']', 'return', 'None']
Send data to the channel's "stderr" stream, without allowing partial results. Unlike L{send_stderr}, this method continues to send data from the given string until all data has been sent or an error occurs. Nothing is returned. @param s: data to send to the client as "stderr" output. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @since: 1.1
['Send', 'data', 'to', 'the', 'channel', 's', 'stderr', 'stream', 'without', 'allowing', 'partial', 'results', '.', 'Unlike', 'L', '{', 'send_stderr', '}', 'this', 'method', 'continues', 'to', 'send', 'data', 'from', 'the', 'given', 'string', 'until', 'all', 'data', 'has', 'been', 'sent', 'or', 'an', 'error', 'occurs', '.', 'Nothing', 'is', 'returned', '.']
train
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L793-L815
3,719
pandas-dev/pandas
pandas/core/groupby/base.py
GroupByMixin._dispatch
def _dispatch(name, *args, **kwargs): """ Dispatch to apply. """ def outer(self, *args, **kwargs): def f(x): x = self._shallow_copy(x, groupby=self._groupby) return getattr(x, name)(*args, **kwargs) return self._groupby.apply(f) outer.__name__ = name return outer
python
def _dispatch(name, *args, **kwargs): """ Dispatch to apply. """ def outer(self, *args, **kwargs): def f(x): x = self._shallow_copy(x, groupby=self._groupby) return getattr(x, name)(*args, **kwargs) return self._groupby.apply(f) outer.__name__ = name return outer
['def', '_dispatch', '(', 'name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'def', 'outer', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'def', 'f', '(', 'x', ')', ':', 'x', '=', 'self', '.', '_shallow_copy', '(', 'x', ',', 'groupby', '=', 'self', '.', '_groupby', ')', 'return', 'getattr', '(', 'x', ',', 'name', ')', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', '_groupby', '.', 'apply', '(', 'f', ')', 'outer', '.', '__name__', '=', 'name', 'return', 'outer']
Dispatch to apply.
['Dispatch', 'to', 'apply', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/base.py#L20-L31
3,720
cisco-sas/kitty
kitty/data/report.py
Report.get_status
def get_status(self): ''' Get the status of the report and its sub-reports. :rtype: str :return: report status ('passed', 'failed' or 'error') ''' status = self.get('status') if status == Report.PASSED: for sr_name in self._sub_reports: sr = self._sub_reports[sr_name] sr_status = sr.get_status() reason = sr.get('reason') if sr_status == Report.ERROR: self.error(reason) break if sr_status == Report.FAILED: self.failed(reason) break status = self.get('status') return status
python
def get_status(self): ''' Get the status of the report and its sub-reports. :rtype: str :return: report status ('passed', 'failed' or 'error') ''' status = self.get('status') if status == Report.PASSED: for sr_name in self._sub_reports: sr = self._sub_reports[sr_name] sr_status = sr.get_status() reason = sr.get('reason') if sr_status == Report.ERROR: self.error(reason) break if sr_status == Report.FAILED: self.failed(reason) break status = self.get('status') return status
['def', 'get_status', '(', 'self', ')', ':', 'status', '=', 'self', '.', 'get', '(', "'status'", ')', 'if', 'status', '==', 'Report', '.', 'PASSED', ':', 'for', 'sr_name', 'in', 'self', '.', '_sub_reports', ':', 'sr', '=', 'self', '.', '_sub_reports', '[', 'sr_name', ']', 'sr_status', '=', 'sr', '.', 'get_status', '(', ')', 'reason', '=', 'sr', '.', 'get', '(', "'reason'", ')', 'if', 'sr_status', '==', 'Report', '.', 'ERROR', ':', 'self', '.', 'error', '(', 'reason', ')', 'break', 'if', 'sr_status', '==', 'Report', '.', 'FAILED', ':', 'self', '.', 'failed', '(', 'reason', ')', 'break', 'status', '=', 'self', '.', 'get', '(', "'status'", ')', 'return', 'status']
Get the status of the report and its sub-reports. :rtype: str :return: report status ('passed', 'failed' or 'error')
['Get', 'the', 'status', 'of', 'the', 'report', 'and', 'its', 'sub', '-', 'reports', '.']
train
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/data/report.py#L219-L239
3,721
PyGithub/PyGithub
github/Repository.py
Repository.has_in_collaborators
def has_in_collaborators(self, collaborator): """ :calls: `GET /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_ :param collaborator: string or :class:`github.NamedUser.NamedUser` :rtype: bool """ assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator if isinstance(collaborator, github.NamedUser.NamedUser): collaborator = collaborator._identity status, headers, data = self._requester.requestJson( "GET", self.url + "/collaborators/" + collaborator ) return status == 204
python
def has_in_collaborators(self, collaborator): """ :calls: `GET /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_ :param collaborator: string or :class:`github.NamedUser.NamedUser` :rtype: bool """ assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator if isinstance(collaborator, github.NamedUser.NamedUser): collaborator = collaborator._identity status, headers, data = self._requester.requestJson( "GET", self.url + "/collaborators/" + collaborator ) return status == 204
['def', 'has_in_collaborators', '(', 'self', ',', 'collaborator', ')', ':', 'assert', 'isinstance', '(', 'collaborator', ',', 'github', '.', 'NamedUser', '.', 'NamedUser', ')', 'or', 'isinstance', '(', 'collaborator', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'collaborator', 'if', 'isinstance', '(', 'collaborator', ',', 'github', '.', 'NamedUser', '.', 'NamedUser', ')', ':', 'collaborator', '=', 'collaborator', '.', '_identity', 'status', ',', 'headers', ',', 'data', '=', 'self', '.', '_requester', '.', 'requestJson', '(', '"GET"', ',', 'self', '.', 'url', '+', '"/collaborators/"', '+', 'collaborator', ')', 'return', 'status', '==', '204']
:calls: `GET /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_ :param collaborator: string or :class:`github.NamedUser.NamedUser` :rtype: bool
[':', 'calls', ':', 'GET', '/', 'repos', '/', ':', 'owner', '/', ':', 'repo', '/', 'collaborators', '/', ':', 'user', '<http', ':', '//', 'developer', '.', 'github', '.', 'com', '/', 'v3', '/', 'repos', '/', 'collaborators', '>', '_', ':', 'param', 'collaborator', ':', 'string', 'or', ':', 'class', ':', 'github', '.', 'NamedUser', '.', 'NamedUser', ':', 'rtype', ':', 'bool']
train
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L2564-L2579
3,722
jealous/stockstats
stockstats.py
StockDataFrame._get_kdjd
def _get_kdjd(cls, df, n_days): """ Get the D of KDJ D = 2/3 × (prev. D) +1/3 × (curr. K) 2/3 and 1/3 are the smooth parameters. :param df: data :param n_days: calculation range :return: None """ k_column = 'kdjk_{}'.format(n_days) d_column = 'kdjd_{}'.format(n_days) df[d_column] = list(cls._calc_kd(df.get(k_column)))
python
def _get_kdjd(cls, df, n_days): """ Get the D of KDJ D = 2/3 × (prev. D) +1/3 × (curr. K) 2/3 and 1/3 are the smooth parameters. :param df: data :param n_days: calculation range :return: None """ k_column = 'kdjk_{}'.format(n_days) d_column = 'kdjd_{}'.format(n_days) df[d_column] = list(cls._calc_kd(df.get(k_column)))
['def', '_get_kdjd', '(', 'cls', ',', 'df', ',', 'n_days', ')', ':', 'k_column', '=', "'kdjk_{}'", '.', 'format', '(', 'n_days', ')', 'd_column', '=', "'kdjd_{}'", '.', 'format', '(', 'n_days', ')', 'df', '[', 'd_column', ']', '=', 'list', '(', 'cls', '.', '_calc_kd', '(', 'df', '.', 'get', '(', 'k_column', ')', ')', ')']
Get the D of KDJ D = 2/3 × (prev. D) +1/3 × (curr. K) 2/3 and 1/3 are the smooth parameters. :param df: data :param n_days: calculation range :return: None
['Get', 'the', 'D', 'of', 'KDJ', 'D', '=', '2', '/', '3', '×', '(', 'prev', '.', 'D', ')', '+', '1', '/', '3', '×', '(', 'curr', '.', 'K', ')', '2', '/', '3', 'and', '1', '/', '3', 'are', 'the', 'smooth', 'parameters', '.', ':', 'param', 'df', ':', 'data', ':', 'param', 'n_days', ':', 'calculation', 'range', ':', 'return', ':', 'None']
train
https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L636-L647
3,723
numenta/nupic.core
bindings/py/src/nupic/bindings/tools/cyclical_serialization_perf.py
main
def main(): """Measure capnp serialization performance of a network containing a simple python region that in-turn contains a Random instance. """ engine.Network.registerPyRegion(__name__, SerializationTestPyRegion.__name__) try: _runTest() finally: engine.Network.unregisterPyRegion(SerializationTestPyRegion.__name__)
python
def main(): """Measure capnp serialization performance of a network containing a simple python region that in-turn contains a Random instance. """ engine.Network.registerPyRegion(__name__, SerializationTestPyRegion.__name__) try: _runTest() finally: engine.Network.unregisterPyRegion(SerializationTestPyRegion.__name__)
['def', 'main', '(', ')', ':', 'engine', '.', 'Network', '.', 'registerPyRegion', '(', '__name__', ',', 'SerializationTestPyRegion', '.', '__name__', ')', 'try', ':', '_runTest', '(', ')', 'finally', ':', 'engine', '.', 'Network', '.', 'unregisterPyRegion', '(', 'SerializationTestPyRegion', '.', '__name__', ')']
Measure capnp serialization performance of a network containing a simple python region that in-turn contains a Random instance.
['Measure', 'capnp', 'serialization', 'performance', 'of', 'a', 'network', 'containing', 'a', 'simple', 'python', 'region', 'that', 'in', '-', 'turn', 'contains', 'a', 'Random', 'instance', '.']
train
https://github.com/numenta/nupic.core/blob/333290c32403ce11e7117f826a6348c3a8e6c125/bindings/py/src/nupic/bindings/tools/cyclical_serialization_perf.py#L113-L123
3,724
OpenTreeOfLife/peyotl
peyotl/phylesystem/phylesystem_shard.py
PhylesystemShard._advance_new_study_id
def _advance_new_study_id(self): """ ASSUMES the caller holds the _doc_counter_lock ! Returns the current numeric part of the next study ID, advances the counter to the next value, and stores that value in the file in case the server is restarted. """ c = self._next_study_id self._next_study_id = 1 + c content = u'{"next_study_id": %d}\n' % self._next_study_id # The content is JSON, but we hand-rolled the string above # so that we can use it as a commit_msg self._write_master_branch_resource(content, self._id_minting_file, commit_msg=content, is_json=False) return c
python
def _advance_new_study_id(self): """ ASSUMES the caller holds the _doc_counter_lock ! Returns the current numeric part of the next study ID, advances the counter to the next value, and stores that value in the file in case the server is restarted. """ c = self._next_study_id self._next_study_id = 1 + c content = u'{"next_study_id": %d}\n' % self._next_study_id # The content is JSON, but we hand-rolled the string above # so that we can use it as a commit_msg self._write_master_branch_resource(content, self._id_minting_file, commit_msg=content, is_json=False) return c
['def', '_advance_new_study_id', '(', 'self', ')', ':', 'c', '=', 'self', '.', '_next_study_id', 'self', '.', '_next_study_id', '=', '1', '+', 'c', 'content', '=', 'u\'{"next_study_id": %d}\\n\'', '%', 'self', '.', '_next_study_id', '# The content is JSON, but we hand-rolled the string above', '# so that we can use it as a commit_msg', 'self', '.', '_write_master_branch_resource', '(', 'content', ',', 'self', '.', '_id_minting_file', ',', 'commit_msg', '=', 'content', ',', 'is_json', '=', 'False', ')', 'return', 'c']
ASSUMES the caller holds the _doc_counter_lock ! Returns the current numeric part of the next study ID, advances the counter to the next value, and stores that value in the file in case the server is restarted.
['ASSUMES', 'the', 'caller', 'holds', 'the', '_doc_counter_lock', '!', 'Returns', 'the', 'current', 'numeric', 'part', 'of', 'the', 'next', 'study', 'ID', 'advances', 'the', 'counter', 'to', 'the', 'next', 'value', 'and', 'stores', 'that', 'value', 'in', 'the', 'file', 'in', 'case', 'the', 'server', 'is', 'restarted', '.']
train
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_shard.py#L281-L296
3,725
svenkreiss/pysparkling
pysparkling/streaming/dstream.py
DStream.countByValue
def countByValue(self): """Apply countByValue to every RDD.abs :rtype: DStream .. warning:: Implemented as a local operation. Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([[1, 1, 5, 5, 5, 2]]) ... .countByValue() ... .foreachRDD(lambda rdd: print(sorted(rdd.collect()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.15) [(1, 2), (2, 1), (5, 3)] """ return self.transform( lambda rdd: self._context._context.parallelize( rdd.countByValue().items()))
python
def countByValue(self): """Apply countByValue to every RDD.abs :rtype: DStream .. warning:: Implemented as a local operation. Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([[1, 1, 5, 5, 5, 2]]) ... .countByValue() ... .foreachRDD(lambda rdd: print(sorted(rdd.collect()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.15) [(1, 2), (2, 1), (5, 3)] """ return self.transform( lambda rdd: self._context._context.parallelize( rdd.countByValue().items()))
['def', 'countByValue', '(', 'self', ')', ':', 'return', 'self', '.', 'transform', '(', 'lambda', 'rdd', ':', 'self', '.', '_context', '.', '_context', '.', 'parallelize', '(', 'rdd', '.', 'countByValue', '(', ')', '.', 'items', '(', ')', ')', ')']
Apply countByValue to every RDD.abs :rtype: DStream .. warning:: Implemented as a local operation. Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([[1, 1, 5, 5, 5, 2]]) ... .countByValue() ... .foreachRDD(lambda rdd: print(sorted(rdd.collect()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.15) [(1, 2), (2, 1), (5, 3)]
['Apply', 'countByValue', 'to', 'every', 'RDD', '.', 'abs']
train
https://github.com/svenkreiss/pysparkling/blob/596d0ef2793100f7115efe228ff9bfc17beaa08d/pysparkling/streaming/dstream.py#L94-L120
3,726
dnanexus/dx-toolkit
src/python/dxpy/api.py
container_rename_folder
def container_rename_folder(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /container-xxxx/renameFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder """ return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
python
def container_rename_folder(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /container-xxxx/renameFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder """ return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
['def', 'container_rename_folder', '(', 'object_id', ',', 'input_params', '=', '{', '}', ',', 'always_retry', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'return', 'DXHTTPRequest', '(', "'/%s/renameFolder'", '%', 'object_id', ',', 'input_params', ',', 'always_retry', '=', 'always_retry', ',', '*', '*', 'kwargs', ')']
Invokes the /container-xxxx/renameFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder
['Invokes', 'the', '/', 'container', '-', 'xxxx', '/', 'renameFolder', 'API', 'method', '.']
train
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L395-L401
3,727
openstack/python-scciclient
scciclient/irmc/viom/client.py
VIOMConfiguration.set_fc_volume
def set_fc_volume(self, port_id, target_wwn, target_lun=0, boot_prio=1, initiator_wwnn=None, initiator_wwpn=None): """Set FibreChannel volume information to configuration. :param port_id: Physical port ID. :param target_wwn: WWN of target. :param target_lun: LUN number of target. :param boot_prio: Boot priority of the volume. 1 indicates the highest priority. :param initiator_wwnn: Virtual WWNN for initiator if necessary. :param initiator_wwpn: Virtual WWPN for initiator if necessary. """ port_handler = _parse_physical_port_id(port_id) fc_target = elcm.FCTarget(target_wwn, target_lun) fc_boot = elcm.FCBoot(boot_prio=boot_prio, boot_enable=True) fc_boot.add_target(fc_target) port = self._find_port(port_handler) if port: port_handler.set_fc_port(port, fc_boot, wwnn=initiator_wwnn, wwpn=initiator_wwpn) else: port = port_handler.create_fc_port(fc_boot, wwnn=initiator_wwnn, wwpn=initiator_wwpn) self._add_port(port_handler, port)
python
def set_fc_volume(self, port_id, target_wwn, target_lun=0, boot_prio=1, initiator_wwnn=None, initiator_wwpn=None): """Set FibreChannel volume information to configuration. :param port_id: Physical port ID. :param target_wwn: WWN of target. :param target_lun: LUN number of target. :param boot_prio: Boot priority of the volume. 1 indicates the highest priority. :param initiator_wwnn: Virtual WWNN for initiator if necessary. :param initiator_wwpn: Virtual WWPN for initiator if necessary. """ port_handler = _parse_physical_port_id(port_id) fc_target = elcm.FCTarget(target_wwn, target_lun) fc_boot = elcm.FCBoot(boot_prio=boot_prio, boot_enable=True) fc_boot.add_target(fc_target) port = self._find_port(port_handler) if port: port_handler.set_fc_port(port, fc_boot, wwnn=initiator_wwnn, wwpn=initiator_wwpn) else: port = port_handler.create_fc_port(fc_boot, wwnn=initiator_wwnn, wwpn=initiator_wwpn) self._add_port(port_handler, port)
['def', 'set_fc_volume', '(', 'self', ',', 'port_id', ',', 'target_wwn', ',', 'target_lun', '=', '0', ',', 'boot_prio', '=', '1', ',', 'initiator_wwnn', '=', 'None', ',', 'initiator_wwpn', '=', 'None', ')', ':', 'port_handler', '=', '_parse_physical_port_id', '(', 'port_id', ')', 'fc_target', '=', 'elcm', '.', 'FCTarget', '(', 'target_wwn', ',', 'target_lun', ')', 'fc_boot', '=', 'elcm', '.', 'FCBoot', '(', 'boot_prio', '=', 'boot_prio', ',', 'boot_enable', '=', 'True', ')', 'fc_boot', '.', 'add_target', '(', 'fc_target', ')', 'port', '=', 'self', '.', '_find_port', '(', 'port_handler', ')', 'if', 'port', ':', 'port_handler', '.', 'set_fc_port', '(', 'port', ',', 'fc_boot', ',', 'wwnn', '=', 'initiator_wwnn', ',', 'wwpn', '=', 'initiator_wwpn', ')', 'else', ':', 'port', '=', 'port_handler', '.', 'create_fc_port', '(', 'fc_boot', ',', 'wwnn', '=', 'initiator_wwnn', ',', 'wwpn', '=', 'initiator_wwpn', ')', 'self', '.', '_add_port', '(', 'port_handler', ',', 'port', ')']
Set FibreChannel volume information to configuration. :param port_id: Physical port ID. :param target_wwn: WWN of target. :param target_lun: LUN number of target. :param boot_prio: Boot priority of the volume. 1 indicates the highest priority. :param initiator_wwnn: Virtual WWNN for initiator if necessary. :param initiator_wwpn: Virtual WWPN for initiator if necessary.
['Set', 'FibreChannel', 'volume', 'information', 'to', 'configuration', '.']
train
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/viom/client.py#L375-L401
3,728
Shizmob/pydle
pydle/features/rfc1459/client.py
RFC1459Support.on_raw_301
async def on_raw_301(self, message): """ User is away. """ target, nickname, message = message.params info = { 'away': True, 'away_message': message } if nickname in self.users: self._sync_user(nickname, info) if nickname in self._pending['whois']: self._whois_info[nickname].update(info)
python
async def on_raw_301(self, message): """ User is away. """ target, nickname, message = message.params info = { 'away': True, 'away_message': message } if nickname in self.users: self._sync_user(nickname, info) if nickname in self._pending['whois']: self._whois_info[nickname].update(info)
['async', 'def', 'on_raw_301', '(', 'self', ',', 'message', ')', ':', 'target', ',', 'nickname', ',', 'message', '=', 'message', '.', 'params', 'info', '=', '{', "'away'", ':', 'True', ',', "'away_message'", ':', 'message', '}', 'if', 'nickname', 'in', 'self', '.', 'users', ':', 'self', '.', '_sync_user', '(', 'nickname', ',', 'info', ')', 'if', 'nickname', 'in', 'self', '.', '_pending', '[', "'whois'", ']', ':', 'self', '.', '_whois_info', '[', 'nickname', ']', '.', 'update', '(', 'info', ')']
User is away.
['User', 'is', 'away', '.']
train
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L776-L787
3,729
pantsbuild/pants
src/python/pants/backend/jvm/subsystems/jvm.py
JVM.get_jvm_options
def get_jvm_options(self): """Return the options to run this JVM with. These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on. Thus named because get_options() already exists (and returns this object's Pants options). """ ret = [] for opt in self.get_options().options: ret.extend(safe_shlex_split(opt)) if (self.get_options().debug or self.get_options().is_flagged('debug_port') or self.get_options().is_flagged('debug_args')): debug_port = self.get_options().debug_port ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args) return ret
python
def get_jvm_options(self): """Return the options to run this JVM with. These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on. Thus named because get_options() already exists (and returns this object's Pants options). """ ret = [] for opt in self.get_options().options: ret.extend(safe_shlex_split(opt)) if (self.get_options().debug or self.get_options().is_flagged('debug_port') or self.get_options().is_flagged('debug_args')): debug_port = self.get_options().debug_port ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args) return ret
['def', 'get_jvm_options', '(', 'self', ')', ':', 'ret', '=', '[', ']', 'for', 'opt', 'in', 'self', '.', 'get_options', '(', ')', '.', 'options', ':', 'ret', '.', 'extend', '(', 'safe_shlex_split', '(', 'opt', ')', ')', 'if', '(', 'self', '.', 'get_options', '(', ')', '.', 'debug', 'or', 'self', '.', 'get_options', '(', ')', '.', 'is_flagged', '(', "'debug_port'", ')', 'or', 'self', '.', 'get_options', '(', ')', '.', 'is_flagged', '(', "'debug_args'", ')', ')', ':', 'debug_port', '=', 'self', '.', 'get_options', '(', ')', '.', 'debug_port', 'ret', '.', 'extend', '(', 'arg', '.', 'format', '(', 'debug_port', '=', 'debug_port', ')', 'for', 'arg', 'in', 'self', '.', 'get_options', '(', ')', '.', 'debug_args', ')', 'return', 'ret']
Return the options to run this JVM with. These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on. Thus named because get_options() already exists (and returns this object's Pants options).
['Return', 'the', 'options', 'to', 'run', 'this', 'JVM', 'with', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/subsystems/jvm.py#L50-L66
3,730
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAUtil/QADate.py
QA_util_stamp2datetime
def QA_util_stamp2datetime(timestamp): """ datestamp转datetime pandas转出来的timestamp是13位整数 要/1000 It’s common for this to be restricted to years from 1970 through 2038. 从1970年开始的纳秒到当前的计数 转变成 float 类型时间 类似 time.time() 返回的类型 :param timestamp: long类型 :return: 类型float """ try: return datetime.datetime.fromtimestamp(timestamp) except Exception as e: # it won't work ?? try: return datetime.datetime.fromtimestamp(timestamp / 1000) except: try: return datetime.datetime.fromtimestamp(timestamp / 1000000) except: return datetime.datetime.fromtimestamp(timestamp / 1000000000)
python
def QA_util_stamp2datetime(timestamp): """ datestamp转datetime pandas转出来的timestamp是13位整数 要/1000 It’s common for this to be restricted to years from 1970 through 2038. 从1970年开始的纳秒到当前的计数 转变成 float 类型时间 类似 time.time() 返回的类型 :param timestamp: long类型 :return: 类型float """ try: return datetime.datetime.fromtimestamp(timestamp) except Exception as e: # it won't work ?? try: return datetime.datetime.fromtimestamp(timestamp / 1000) except: try: return datetime.datetime.fromtimestamp(timestamp / 1000000) except: return datetime.datetime.fromtimestamp(timestamp / 1000000000)
['def', 'QA_util_stamp2datetime', '(', 'timestamp', ')', ':', 'try', ':', 'return', 'datetime', '.', 'datetime', '.', 'fromtimestamp', '(', 'timestamp', ')', 'except', 'Exception', 'as', 'e', ':', "# it won't work ??", 'try', ':', 'return', 'datetime', '.', 'datetime', '.', 'fromtimestamp', '(', 'timestamp', '/', '1000', ')', 'except', ':', 'try', ':', 'return', 'datetime', '.', 'datetime', '.', 'fromtimestamp', '(', 'timestamp', '/', '1000000', ')', 'except', ':', 'return', 'datetime', '.', 'datetime', '.', 'fromtimestamp', '(', 'timestamp', '/', '1000000000', ')']
datestamp转datetime pandas转出来的timestamp是13位整数 要/1000 It’s common for this to be restricted to years from 1970 through 2038. 从1970年开始的纳秒到当前的计数 转变成 float 类型时间 类似 time.time() 返回的类型 :param timestamp: long类型 :return: 类型float
['datestamp转datetime', 'pandas转出来的timestamp是13位整数', '要', '/', '1000', 'It’s', 'common', 'for', 'this', 'to', 'be', 'restricted', 'to', 'years', 'from', '1970', 'through', '2038', '.', '从1970年开始的纳秒到当前的计数', '转变成', 'float', '类型时间', '类似', 'time', '.', 'time', '()', '返回的类型', ':', 'param', 'timestamp', ':', 'long类型', ':', 'return', ':', '类型float']
train
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAUtil/QADate.py#L173-L192
3,731
santoshphilip/eppy
eppy/idfreader.py
idfreader1
def idfreader1(fname, iddfile, theidf, conv=True, commdct=None, block=None): """read idf file and return bunches""" versiontuple = iddversiontuple(iddfile) # import pdb; pdb.set_trace() block, data, commdct, idd_index = readidf.readdatacommdct1( fname, iddfile=iddfile, commdct=commdct, block=block) if conv: convertallfields(data, commdct, block) # fill gaps in idd ddtt, dtls = data.dt, data.dtls if versiontuple < (8,): skiplist = ["TABLE:MULTIVARIABLELOOKUP"] else: skiplist = None nofirstfields = iddgaps.missingkeys_standard( commdct, dtls, skiplist=skiplist) iddgaps.missingkeys_nonstandard(block, commdct, dtls, nofirstfields) # bunchdt = makebunches(data, commdct) bunchdt = makebunches_alter(data, commdct, theidf) return bunchdt, block, data, commdct, idd_index, versiontuple
python
def idfreader1(fname, iddfile, theidf, conv=True, commdct=None, block=None): """read idf file and return bunches""" versiontuple = iddversiontuple(iddfile) # import pdb; pdb.set_trace() block, data, commdct, idd_index = readidf.readdatacommdct1( fname, iddfile=iddfile, commdct=commdct, block=block) if conv: convertallfields(data, commdct, block) # fill gaps in idd ddtt, dtls = data.dt, data.dtls if versiontuple < (8,): skiplist = ["TABLE:MULTIVARIABLELOOKUP"] else: skiplist = None nofirstfields = iddgaps.missingkeys_standard( commdct, dtls, skiplist=skiplist) iddgaps.missingkeys_nonstandard(block, commdct, dtls, nofirstfields) # bunchdt = makebunches(data, commdct) bunchdt = makebunches_alter(data, commdct, theidf) return bunchdt, block, data, commdct, idd_index, versiontuple
['def', 'idfreader1', '(', 'fname', ',', 'iddfile', ',', 'theidf', ',', 'conv', '=', 'True', ',', 'commdct', '=', 'None', ',', 'block', '=', 'None', ')', ':', 'versiontuple', '=', 'iddversiontuple', '(', 'iddfile', ')', '# import pdb; pdb.set_trace()', 'block', ',', 'data', ',', 'commdct', ',', 'idd_index', '=', 'readidf', '.', 'readdatacommdct1', '(', 'fname', ',', 'iddfile', '=', 'iddfile', ',', 'commdct', '=', 'commdct', ',', 'block', '=', 'block', ')', 'if', 'conv', ':', 'convertallfields', '(', 'data', ',', 'commdct', ',', 'block', ')', '# fill gaps in idd', 'ddtt', ',', 'dtls', '=', 'data', '.', 'dt', ',', 'data', '.', 'dtls', 'if', 'versiontuple', '<', '(', '8', ',', ')', ':', 'skiplist', '=', '[', '"TABLE:MULTIVARIABLELOOKUP"', ']', 'else', ':', 'skiplist', '=', 'None', 'nofirstfields', '=', 'iddgaps', '.', 'missingkeys_standard', '(', 'commdct', ',', 'dtls', ',', 'skiplist', '=', 'skiplist', ')', 'iddgaps', '.', 'missingkeys_nonstandard', '(', 'block', ',', 'commdct', ',', 'dtls', ',', 'nofirstfields', ')', '# bunchdt = makebunches(data, commdct)', 'bunchdt', '=', 'makebunches_alter', '(', 'data', ',', 'commdct', ',', 'theidf', ')', 'return', 'bunchdt', ',', 'block', ',', 'data', ',', 'commdct', ',', 'idd_index', ',', 'versiontuple']
read idf file and return bunches
['read', 'idf', 'file', 'and', 'return', 'bunches']
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idfreader.py#L252-L275
3,732
DavidMStraub/pylha
pylha/parse.py
load
def load(stream): """Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.""" if isinstance(stream, str): string = stream else: string = stream.read() tokens = tokenize(string) return parse(tokens)
python
def load(stream): """Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.""" if isinstance(stream, str): string = stream else: string = stream.read() tokens = tokenize(string) return parse(tokens)
['def', 'load', '(', 'stream', ')', ':', 'if', 'isinstance', '(', 'stream', ',', 'str', ')', ':', 'string', '=', 'stream', 'else', ':', 'string', '=', 'stream', '.', 'read', '(', ')', 'tokens', '=', 'tokenize', '(', 'string', ')', 'return', 'parse', '(', 'tokens', ')']
Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.
['Parse', 'the', 'LHA', 'document', 'and', 'produce', 'the', 'corresponding', 'Python', 'object', '.', 'Accepts', 'a', 'string', 'or', 'a', 'file', '-', 'like', 'object', '.']
train
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L80-L88
3,733
clinicedc/edc-notification
edc_notification/site_notifications.py
SiteNotifications.update_notification_list
def update_notification_list(self, apps=None, schema_editor=None, verbose=False): """Updates the notification model to ensure all registered notifications classes are listed. Typically called from a post_migrate signal. Also, in tests you can register a notification and the Notification class (not model) will automatically call this method if the named notification does not exist. See notification.notify() """ Notification = (apps or django_apps).get_model("edc_notification.notification") # flag all notifications as disabled and re-enable as required Notification.objects.all().update(enabled=False) if site_notifications.loaded: if verbose: sys.stdout.write( style.MIGRATE_HEADING("Populating Notification model:\n") ) self.delete_unregistered_notifications(apps=apps) for name, notification_cls in site_notifications.registry.items(): if verbose: sys.stdout.write( f" * Adding '{name}': '{notification_cls().display_name}'\n" ) try: obj = Notification.objects.get(name=name) except ObjectDoesNotExist: Notification.objects.create( name=name, display_name=notification_cls().display_name, enabled=True, ) else: obj.display_name = notification_cls().display_name obj.enabled = True obj.save()
python
def update_notification_list(self, apps=None, schema_editor=None, verbose=False): """Updates the notification model to ensure all registered notifications classes are listed. Typically called from a post_migrate signal. Also, in tests you can register a notification and the Notification class (not model) will automatically call this method if the named notification does not exist. See notification.notify() """ Notification = (apps or django_apps).get_model("edc_notification.notification") # flag all notifications as disabled and re-enable as required Notification.objects.all().update(enabled=False) if site_notifications.loaded: if verbose: sys.stdout.write( style.MIGRATE_HEADING("Populating Notification model:\n") ) self.delete_unregistered_notifications(apps=apps) for name, notification_cls in site_notifications.registry.items(): if verbose: sys.stdout.write( f" * Adding '{name}': '{notification_cls().display_name}'\n" ) try: obj = Notification.objects.get(name=name) except ObjectDoesNotExist: Notification.objects.create( name=name, display_name=notification_cls().display_name, enabled=True, ) else: obj.display_name = notification_cls().display_name obj.enabled = True obj.save()
['def', 'update_notification_list', '(', 'self', ',', 'apps', '=', 'None', ',', 'schema_editor', '=', 'None', ',', 'verbose', '=', 'False', ')', ':', 'Notification', '=', '(', 'apps', 'or', 'django_apps', ')', '.', 'get_model', '(', '"edc_notification.notification"', ')', '# flag all notifications as disabled and re-enable as required', 'Notification', '.', 'objects', '.', 'all', '(', ')', '.', 'update', '(', 'enabled', '=', 'False', ')', 'if', 'site_notifications', '.', 'loaded', ':', 'if', 'verbose', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'style', '.', 'MIGRATE_HEADING', '(', '"Populating Notification model:\\n"', ')', ')', 'self', '.', 'delete_unregistered_notifications', '(', 'apps', '=', 'apps', ')', 'for', 'name', ',', 'notification_cls', 'in', 'site_notifications', '.', 'registry', '.', 'items', '(', ')', ':', 'if', 'verbose', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'f" * Adding \'{name}\': \'{notification_cls().display_name}\'\\n"', ')', 'try', ':', 'obj', '=', 'Notification', '.', 'objects', '.', 'get', '(', 'name', '=', 'name', ')', 'except', 'ObjectDoesNotExist', ':', 'Notification', '.', 'objects', '.', 'create', '(', 'name', '=', 'name', ',', 'display_name', '=', 'notification_cls', '(', ')', '.', 'display_name', ',', 'enabled', '=', 'True', ',', ')', 'else', ':', 'obj', '.', 'display_name', '=', 'notification_cls', '(', ')', '.', 'display_name', 'obj', '.', 'enabled', '=', 'True', 'obj', '.', 'save', '(', ')']
Updates the notification model to ensure all registered notifications classes are listed. Typically called from a post_migrate signal. Also, in tests you can register a notification and the Notification class (not model) will automatically call this method if the named notification does not exist. See notification.notify()
['Updates', 'the', 'notification', 'model', 'to', 'ensure', 'all', 'registered', 'notifications', 'classes', 'are', 'listed', '.']
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/site_notifications.py#L100-L136
3,734
QInfer/python-qinfer
src/qinfer/derived_models.py
BinomialModel.domain
def domain(self, expparams): """ Returns a list of ``Domain``s, one for each input expparam. :param numpy.ndarray expparams: Array of experimental parameters. This array must be of dtype agreeing with the ``expparams_dtype`` property, or, in the case where ``n_outcomes_constant`` is ``True``, ``None`` should be a valid input. :rtype: list of ``Domain`` """ return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
python
def domain(self, expparams): """ Returns a list of ``Domain``s, one for each input expparam. :param numpy.ndarray expparams: Array of experimental parameters. This array must be of dtype agreeing with the ``expparams_dtype`` property, or, in the case where ``n_outcomes_constant`` is ``True``, ``None`` should be a valid input. :rtype: list of ``Domain`` """ return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
['def', 'domain', '(', 'self', ',', 'expparams', ')', ':', 'return', '[', 'IntegerDomain', '(', 'min', '=', '0', ',', 'max', '=', 'n_o', '-', '1', ')', 'for', 'n_o', 'in', 'self', '.', 'n_outcomes', '(', 'expparams', ')', ']']
Returns a list of ``Domain``s, one for each input expparam. :param numpy.ndarray expparams: Array of experimental parameters. This array must be of dtype agreeing with the ``expparams_dtype`` property, or, in the case where ``n_outcomes_constant`` is ``True``, ``None`` should be a valid input. :rtype: list of ``Domain``
['Returns', 'a', 'list', 'of', 'Domain', 's', 'one', 'for', 'each', 'input', 'expparam', '.']
train
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/derived_models.py#L287-L298
3,735
estnltk/estnltk
estnltk/disambiguator.py
Disambiguator.__find_hidden_analyses
def __find_hidden_analyses(self, docs): """ Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei tule arvestada lemmade järelühestamisel: *) kesksõnade nud, dud, tud mitmesused; *) muutumatute sõnade sõnaliigi mitmesus; *) oleviku 'olema' mitmesus ('nad on' vs 'ta on'); *) asesõnade ainsuse-mitmuse mitmesus; *) arv- ja asesõnade vaheline mitmesus; Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega, iga võti kujul (doc_index, word_index); """ hidden = dict() nudTudLopud = re.compile('^.*[ntd]ud$') for d in range(len(docs)): for w in range(len(docs[d][WORDS])): word = docs[d][WORDS][w] if ANALYSIS in word and len(word[ANALYSIS]) > 1: # # 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus: # kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, // nudTud = [ nudTudLopud.match(a[ROOT]) != None or \ nudTudLopud.match(a[ENDING]) != None \ for a in word[ANALYSIS] ] if nudTud.count( True ) > 1: hidden[(d, w)] = 1 # # 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära: # Nt kui+0 //_D_ // kui+0 //_J_ // # nagu+0 //_D_ // nagu+0 //_J_ // lemmas = set([ a[ROOT] for a in word[ANALYSIS] ]) forms = set([ a[FORM] for a in word[ANALYSIS] ]) if len(lemmas) == 1 and len(forms) == 1 and (list(forms))[0] == '': hidden[(d, w)] = 1 # # 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused: # Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks; endings = set([ a[ENDING] for a in word[ANALYSIS] ]) if len(lemmas) == 1 and (list(lemmas))[0] == 'ole' and len(endings) == 1 \ and (list(endings))[0] == '0': hidden[(d, w)] = 1 # # 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus: # Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n // # kes+0 //_P_ sg n // kes+0 //_P_ pl n // postags = set([ a[POSTAG] for a in word[ANALYSIS] ]) if len(lemmas) == 1 and len(postags) == 1 and 'P' in postags and \ len(endings) == 1: hidden[(d, w)] = 1 # # 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus: # Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, // # üks+l //_N_ sg ad, // üks+l //_P_ sg ad, // if len(lemmas) == 1 and 'P' in postags and ('O' in postags or \ 'N' in postags) and len(endings) == 1: hidden[(d, w)] = 1 return hidden
python
def __find_hidden_analyses(self, docs): """ Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei tule arvestada lemmade järelühestamisel: *) kesksõnade nud, dud, tud mitmesused; *) muutumatute sõnade sõnaliigi mitmesus; *) oleviku 'olema' mitmesus ('nad on' vs 'ta on'); *) asesõnade ainsuse-mitmuse mitmesus; *) arv- ja asesõnade vaheline mitmesus; Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega, iga võti kujul (doc_index, word_index); """ hidden = dict() nudTudLopud = re.compile('^.*[ntd]ud$') for d in range(len(docs)): for w in range(len(docs[d][WORDS])): word = docs[d][WORDS][w] if ANALYSIS in word and len(word[ANALYSIS]) > 1: # # 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus: # kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, // nudTud = [ nudTudLopud.match(a[ROOT]) != None or \ nudTudLopud.match(a[ENDING]) != None \ for a in word[ANALYSIS] ] if nudTud.count( True ) > 1: hidden[(d, w)] = 1 # # 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära: # Nt kui+0 //_D_ // kui+0 //_J_ // # nagu+0 //_D_ // nagu+0 //_J_ // lemmas = set([ a[ROOT] for a in word[ANALYSIS] ]) forms = set([ a[FORM] for a in word[ANALYSIS] ]) if len(lemmas) == 1 and len(forms) == 1 and (list(forms))[0] == '': hidden[(d, w)] = 1 # # 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused: # Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks; endings = set([ a[ENDING] for a in word[ANALYSIS] ]) if len(lemmas) == 1 and (list(lemmas))[0] == 'ole' and len(endings) == 1 \ and (list(endings))[0] == '0': hidden[(d, w)] = 1 # # 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus: # Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n // # kes+0 //_P_ sg n // kes+0 //_P_ pl n // postags = set([ a[POSTAG] for a in word[ANALYSIS] ]) if len(lemmas) == 1 and len(postags) == 1 and 'P' in postags and \ len(endings) == 1: hidden[(d, w)] = 1 # # 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus: # Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, // # üks+l //_N_ sg ad, // üks+l //_P_ sg ad, // if len(lemmas) == 1 and 'P' in postags and ('O' in postags or \ 'N' in postags) and len(endings) == 1: hidden[(d, w)] = 1 return hidden
['def', '__find_hidden_analyses', '(', 'self', ',', 'docs', ')', ':', 'hidden', '=', 'dict', '(', ')', 'nudTudLopud', '=', 're', '.', 'compile', '(', "'^.*[ntd]ud$'", ')', 'for', 'd', 'in', 'range', '(', 'len', '(', 'docs', ')', ')', ':', 'for', 'w', 'in', 'range', '(', 'len', '(', 'docs', '[', 'd', ']', '[', 'WORDS', ']', ')', ')', ':', 'word', '=', 'docs', '[', 'd', ']', '[', 'WORDS', ']', '[', 'w', ']', 'if', 'ANALYSIS', 'in', 'word', 'and', 'len', '(', 'word', '[', 'ANALYSIS', ']', ')', '>', '1', ':', '#', '# 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus:', '# kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, //', 'nudTud', '=', '[', 'nudTudLopud', '.', 'match', '(', 'a', '[', 'ROOT', ']', ')', '!=', 'None', 'or', 'nudTudLopud', '.', 'match', '(', 'a', '[', 'ENDING', ']', ')', '!=', 'None', 'for', 'a', 'in', 'word', '[', 'ANALYSIS', ']', ']', 'if', 'nudTud', '.', 'count', '(', 'True', ')', '>', '1', ':', 'hidden', '[', '(', 'd', ',', 'w', ')', ']', '=', '1', '#', '# 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära:', '# Nt kui+0 //_D_ // kui+0 //_J_ //', '# nagu+0 //_D_ // nagu+0 //_J_ //', 'lemmas', '=', 'set', '(', '[', 'a', '[', 'ROOT', ']', 'for', 'a', 'in', 'word', '[', 'ANALYSIS', ']', ']', ')', 'forms', '=', 'set', '(', '[', 'a', '[', 'FORM', ']', 'for', 'a', 'in', 'word', '[', 'ANALYSIS', ']', ']', ')', 'if', 'len', '(', 'lemmas', ')', '==', '1', 'and', 'len', '(', 'forms', ')', '==', '1', 'and', '(', 'list', '(', 'forms', ')', ')', '[', '0', ']', '==', "''", ':', 'hidden', '[', '(', 'd', ',', 'w', ')', ']', '=', '1', '#', "# 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused:", "# Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks;", 'endings', '=', 'set', '(', '[', 'a', '[', 'ENDING', ']', 'for', 'a', 'in', 'word', '[', 'ANALYSIS', ']', ']', ')', 'if', 'len', '(', 'lemmas', ')', '==', '1', 'and', '(', 'list', '(', 'lemmas', ')', ')', '[', '0', ']', '==', "'ole'", 'and', 'len', '(', 'endings', ')', '==', '1', 'and', '(', 'list', '(', 'endings', ')', ')', '[', '0', ']', '==', "'0'", ':', 'hidden', '[', '(', 'd', ',', 'w', ')', ']', '=', '1', '#', '# 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus:', '# Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n //', '# kes+0 //_P_ sg n // kes+0 //_P_ pl n //', 'postags', '=', 'set', '(', '[', 'a', '[', 'POSTAG', ']', 'for', 'a', 'in', 'word', '[', 'ANALYSIS', ']', ']', ')', 'if', 'len', '(', 'lemmas', ')', '==', '1', 'and', 'len', '(', 'postags', ')', '==', '1', 'and', "'P'", 'in', 'postags', 'and', 'len', '(', 'endings', ')', '==', '1', ':', 'hidden', '[', '(', 'd', ',', 'w', ')', ']', '=', '1', '#', '# 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus:', '# Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, //', '# üks+l //_N_ sg ad, // üks+l //_P_ sg ad, //', 'if', 'len', '(', 'lemmas', ')', '==', '1', 'and', "'P'", 'in', 'postags', 'and', '(', "'O'", 'in', 'postags', 'or', "'N'", 'in', 'postags', ')', 'and', 'len', '(', 'endings', ')', '==', '1', ':', 'hidden', '[', '(', 'd', ',', 'w', ')', ']', '=', '1', 'return', 'hidden']
Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei tule arvestada lemmade järelühestamisel: *) kesksõnade nud, dud, tud mitmesused; *) muutumatute sõnade sõnaliigi mitmesus; *) oleviku 'olema' mitmesus ('nad on' vs 'ta on'); *) asesõnade ainsuse-mitmuse mitmesus; *) arv- ja asesõnade vaheline mitmesus; Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega, iga võti kujul (doc_index, word_index);
['Jätab', 'meelde', 'millised', 'analüüsid', 'on', 'nn', 'peidetud', 'ehk', 'siis', 'mida', 'ei', 'tule', 'arvestada', 'lemmade', 'järelühestamisel', ':', '*', ')', 'kesksõnade', 'nud', 'dud', 'tud', 'mitmesused', ';', '*', ')', 'muutumatute', 'sõnade', 'sõnaliigi', 'mitmesus', ';', '*', ')', 'oleviku', 'olema', 'mitmesus', '(', 'nad', 'on', 'vs', 'ta', 'on', ')', ';', '*', ')', 'asesõnade', 'ainsuse', '-', 'mitmuse', 'mitmesus', ';', '*', ')', 'arv', '-', 'ja', 'asesõnade', 'vaheline', 'mitmesus', ';', 'Tagastab', 'sõnastiku', 'peidetud', 'analüüse', 'sisaldanud', 'sõnade', 'asukohtadega', 'iga', 'võti', 'kujul', '(', 'doc_index', 'word_index', ')', ';']
train
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L491-L545
3,736
graphql-python/graphql-core-next
graphql/subscription/subscribe.py
create_source_event_stream
async def create_source_event_stream( schema: GraphQLSchema, document: DocumentNode, root_value: Any = None, context_value: Any = None, variable_values: Dict[str, Any] = None, operation_name: str = None, field_resolver: GraphQLFieldResolver = None, ) -> Union[AsyncIterable[Any], ExecutionResult]: """Create source even stream Implements the "CreateSourceEventStream" algorithm described in the GraphQL specification, resolving the subscription source event stream. Returns a coroutine that yields an AsyncIterable. If the client provided invalid arguments, the source stream could not be created, or the resolver did not return an AsyncIterable, this function will throw an error, which should be caught and handled by the caller. A Source Event Stream represents a sequence of events, each of which triggers a GraphQL execution for that event. This may be useful when hosting the stateful subscription service in a different process or machine than the stateless GraphQL execution engine, or otherwise separating these two steps. For more on this, see the "Supporting Subscriptions at Scale" information in the GraphQL spec. """ # If arguments are missing or incorrectly typed, this is an internal developer # mistake which should throw an early error. assert_valid_execution_arguments(schema, document, variable_values) # If a valid context cannot be created due to incorrect arguments, this will throw # an error. context = ExecutionContext.build( schema, document, root_value, context_value, variable_values, operation_name, field_resolver, ) # Return early errors if execution context failed. if isinstance(context, list): return ExecutionResult(data=None, errors=context) type_ = get_operation_root_type(schema, context.operation) fields = context.collect_fields(type_, context.operation.selection_set, {}, set()) response_names = list(fields) response_name = response_names[0] field_nodes = fields[response_name] field_node = field_nodes[0] field_name = field_node.name.value field_def = get_field_def(schema, type_, field_name) if not field_def: raise GraphQLError( f"The subscription field '{field_name}' is not defined.", field_nodes ) # Call the `subscribe()` resolver or the default resolver to produce an # AsyncIterable yielding raw payloads. resolve_fn = field_def.subscribe or context.field_resolver resolve_fn = cast(GraphQLFieldResolver, resolve_fn) # help mypy path = add_path(None, response_name) info = context.build_resolve_info(field_def, field_nodes, type_, path) # `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm # from GraphQL specification. It differs from `resolve_field_value` due to # providing a different `resolve_fn`. result = context.resolve_field_value_or_error( field_def, field_nodes, resolve_fn, root_value, info ) event_stream = await cast(Awaitable, result) if isawaitable(result) else result # If `event_stream` is an Error, rethrow a located error. if isinstance(event_stream, Exception): raise located_error(event_stream, field_nodes, response_path_as_list(path)) # Assert field returned an event stream, otherwise yield an error. if isinstance(event_stream, AsyncIterable): return cast(AsyncIterable, event_stream) raise TypeError( f"Subscription field must return AsyncIterable. Received: {event_stream!r}" )
python
async def create_source_event_stream( schema: GraphQLSchema, document: DocumentNode, root_value: Any = None, context_value: Any = None, variable_values: Dict[str, Any] = None, operation_name: str = None, field_resolver: GraphQLFieldResolver = None, ) -> Union[AsyncIterable[Any], ExecutionResult]: """Create source even stream Implements the "CreateSourceEventStream" algorithm described in the GraphQL specification, resolving the subscription source event stream. Returns a coroutine that yields an AsyncIterable. If the client provided invalid arguments, the source stream could not be created, or the resolver did not return an AsyncIterable, this function will throw an error, which should be caught and handled by the caller. A Source Event Stream represents a sequence of events, each of which triggers a GraphQL execution for that event. This may be useful when hosting the stateful subscription service in a different process or machine than the stateless GraphQL execution engine, or otherwise separating these two steps. For more on this, see the "Supporting Subscriptions at Scale" information in the GraphQL spec. """ # If arguments are missing or incorrectly typed, this is an internal developer # mistake which should throw an early error. assert_valid_execution_arguments(schema, document, variable_values) # If a valid context cannot be created due to incorrect arguments, this will throw # an error. context = ExecutionContext.build( schema, document, root_value, context_value, variable_values, operation_name, field_resolver, ) # Return early errors if execution context failed. if isinstance(context, list): return ExecutionResult(data=None, errors=context) type_ = get_operation_root_type(schema, context.operation) fields = context.collect_fields(type_, context.operation.selection_set, {}, set()) response_names = list(fields) response_name = response_names[0] field_nodes = fields[response_name] field_node = field_nodes[0] field_name = field_node.name.value field_def = get_field_def(schema, type_, field_name) if not field_def: raise GraphQLError( f"The subscription field '{field_name}' is not defined.", field_nodes ) # Call the `subscribe()` resolver or the default resolver to produce an # AsyncIterable yielding raw payloads. resolve_fn = field_def.subscribe or context.field_resolver resolve_fn = cast(GraphQLFieldResolver, resolve_fn) # help mypy path = add_path(None, response_name) info = context.build_resolve_info(field_def, field_nodes, type_, path) # `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm # from GraphQL specification. It differs from `resolve_field_value` due to # providing a different `resolve_fn`. result = context.resolve_field_value_or_error( field_def, field_nodes, resolve_fn, root_value, info ) event_stream = await cast(Awaitable, result) if isawaitable(result) else result # If `event_stream` is an Error, rethrow a located error. if isinstance(event_stream, Exception): raise located_error(event_stream, field_nodes, response_path_as_list(path)) # Assert field returned an event stream, otherwise yield an error. if isinstance(event_stream, AsyncIterable): return cast(AsyncIterable, event_stream) raise TypeError( f"Subscription field must return AsyncIterable. Received: {event_stream!r}" )
['async', 'def', 'create_source_event_stream', '(', 'schema', ':', 'GraphQLSchema', ',', 'document', ':', 'DocumentNode', ',', 'root_value', ':', 'Any', '=', 'None', ',', 'context_value', ':', 'Any', '=', 'None', ',', 'variable_values', ':', 'Dict', '[', 'str', ',', 'Any', ']', '=', 'None', ',', 'operation_name', ':', 'str', '=', 'None', ',', 'field_resolver', ':', 'GraphQLFieldResolver', '=', 'None', ',', ')', '->', 'Union', '[', 'AsyncIterable', '[', 'Any', ']', ',', 'ExecutionResult', ']', ':', '# If arguments are missing or incorrectly typed, this is an internal developer', '# mistake which should throw an early error.', 'assert_valid_execution_arguments', '(', 'schema', ',', 'document', ',', 'variable_values', ')', '# If a valid context cannot be created due to incorrect arguments, this will throw', '# an error.', 'context', '=', 'ExecutionContext', '.', 'build', '(', 'schema', ',', 'document', ',', 'root_value', ',', 'context_value', ',', 'variable_values', ',', 'operation_name', ',', 'field_resolver', ',', ')', '# Return early errors if execution context failed.', 'if', 'isinstance', '(', 'context', ',', 'list', ')', ':', 'return', 'ExecutionResult', '(', 'data', '=', 'None', ',', 'errors', '=', 'context', ')', 'type_', '=', 'get_operation_root_type', '(', 'schema', ',', 'context', '.', 'operation', ')', 'fields', '=', 'context', '.', 'collect_fields', '(', 'type_', ',', 'context', '.', 'operation', '.', 'selection_set', ',', '{', '}', ',', 'set', '(', ')', ')', 'response_names', '=', 'list', '(', 'fields', ')', 'response_name', '=', 'response_names', '[', '0', ']', 'field_nodes', '=', 'fields', '[', 'response_name', ']', 'field_node', '=', 'field_nodes', '[', '0', ']', 'field_name', '=', 'field_node', '.', 'name', '.', 'value', 'field_def', '=', 'get_field_def', '(', 'schema', ',', 'type_', ',', 'field_name', ')', 'if', 'not', 'field_def', ':', 'raise', 'GraphQLError', '(', 'f"The subscription field \'{field_name}\' is not defined."', ',', 'field_nodes', ')', '# Call the `subscribe()` resolver or the default resolver to produce an', '# AsyncIterable yielding raw payloads.', 'resolve_fn', '=', 'field_def', '.', 'subscribe', 'or', 'context', '.', 'field_resolver', 'resolve_fn', '=', 'cast', '(', 'GraphQLFieldResolver', ',', 'resolve_fn', ')', '# help mypy', 'path', '=', 'add_path', '(', 'None', ',', 'response_name', ')', 'info', '=', 'context', '.', 'build_resolve_info', '(', 'field_def', ',', 'field_nodes', ',', 'type_', ',', 'path', ')', '# `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm', '# from GraphQL specification. It differs from `resolve_field_value` due to', '# providing a different `resolve_fn`.', 'result', '=', 'context', '.', 'resolve_field_value_or_error', '(', 'field_def', ',', 'field_nodes', ',', 'resolve_fn', ',', 'root_value', ',', 'info', ')', 'event_stream', '=', 'await', 'cast', '(', 'Awaitable', ',', 'result', ')', 'if', 'isawaitable', '(', 'result', ')', 'else', 'result', '# If `event_stream` is an Error, rethrow a located error.', 'if', 'isinstance', '(', 'event_stream', ',', 'Exception', ')', ':', 'raise', 'located_error', '(', 'event_stream', ',', 'field_nodes', ',', 'response_path_as_list', '(', 'path', ')', ')', '# Assert field returned an event stream, otherwise yield an error.', 'if', 'isinstance', '(', 'event_stream', ',', 'AsyncIterable', ')', ':', 'return', 'cast', '(', 'AsyncIterable', ',', 'event_stream', ')', 'raise', 'TypeError', '(', 'f"Subscription field must return AsyncIterable. Received: {event_stream!r}"', ')']
Create source even stream Implements the "CreateSourceEventStream" algorithm described in the GraphQL specification, resolving the subscription source event stream. Returns a coroutine that yields an AsyncIterable. If the client provided invalid arguments, the source stream could not be created, or the resolver did not return an AsyncIterable, this function will throw an error, which should be caught and handled by the caller. A Source Event Stream represents a sequence of events, each of which triggers a GraphQL execution for that event. This may be useful when hosting the stateful subscription service in a different process or machine than the stateless GraphQL execution engine, or otherwise separating these two steps. For more on this, see the "Supporting Subscriptions at Scale" information in the GraphQL spec.
['Create', 'source', 'even', 'stream']
train
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/subscription/subscribe.py#L91-L178
3,737
CZ-NIC/yangson
yangson/schemanode.py
InternalNode._case_stmt
def _case_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle case statement.""" self._handle_child(CaseNode(), stmt, sctx)
python
def _case_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle case statement.""" self._handle_child(CaseNode(), stmt, sctx)
['def', '_case_stmt', '(', 'self', ',', 'stmt', ':', 'Statement', ',', 'sctx', ':', 'SchemaContext', ')', '->', 'None', ':', 'self', '.', '_handle_child', '(', 'CaseNode', '(', ')', ',', 'stmt', ',', 'sctx', ')']
Handle case statement.
['Handle', 'case', 'statement', '.']
train
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L623-L625
3,738
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
wrap_kwargs_to_initdict
def wrap_kwargs_to_initdict(init_kwargs_fn: InitKwargsFnType, typename: str, check_result: bool = True) \ -> InstanceToInitDictFnType: """ Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``. """ def wrapper(obj: Instance) -> InitDict: result = init_kwargs_fn(obj) if check_result: if not isinstance(result, dict): raise ValueError( "Class {} failed to provide a kwargs dict and " "provided instead: {}".format(typename, repr(result))) return kwargs_to_initdict(init_kwargs_fn(obj)) return wrapper
python
def wrap_kwargs_to_initdict(init_kwargs_fn: InitKwargsFnType, typename: str, check_result: bool = True) \ -> InstanceToInitDictFnType: """ Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``. """ def wrapper(obj: Instance) -> InitDict: result = init_kwargs_fn(obj) if check_result: if not isinstance(result, dict): raise ValueError( "Class {} failed to provide a kwargs dict and " "provided instead: {}".format(typename, repr(result))) return kwargs_to_initdict(init_kwargs_fn(obj)) return wrapper
['def', 'wrap_kwargs_to_initdict', '(', 'init_kwargs_fn', ':', 'InitKwargsFnType', ',', 'typename', ':', 'str', ',', 'check_result', ':', 'bool', '=', 'True', ')', '->', 'InstanceToInitDictFnType', ':', 'def', 'wrapper', '(', 'obj', ':', 'Instance', ')', '->', 'InitDict', ':', 'result', '=', 'init_kwargs_fn', '(', 'obj', ')', 'if', 'check_result', ':', 'if', 'not', 'isinstance', '(', 'result', ',', 'dict', ')', ':', 'raise', 'ValueError', '(', '"Class {} failed to provide a kwargs dict and "', '"provided instead: {}"', '.', 'format', '(', 'typename', ',', 'repr', '(', 'result', ')', ')', ')', 'return', 'kwargs_to_initdict', '(', 'init_kwargs_fn', '(', 'obj', ')', ')', 'return', 'wrapper']
Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``.
['Wraps', 'a', 'function', 'producing', 'a', 'KwargsDict', 'making', 'it', 'into', 'a', 'function', 'producing', 'an', 'InitDict', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L270-L287
3,739
google/tangent
tangent/reverse_ad.py
ReverseAD.primal_and_adjoint_for_tracing
def primal_and_adjoint_for_tracing(self, node): """Build the primal and adjoint of a traceable function. Args: node: ast.Call node of a function we wish to trace, instead of transform Returns: primal: new ast.Assign node to replace the original primal call adjoint: new ast.Assign node using the VJP generated in primal to calculate the adjoint. """ primal_template = grads.primals[tracing.Traceable] adjoint_template = grads.adjoints[tracing.Traceable] # Prep to_pack = node.args target = ast_.copy_node(self.orig_target) vjp = quoting.quote(self.namer.unique('%s_grad' % node.func.id)) tmp = create.create_temp(quoting.quote('tmp'), self.namer) assert len(node.keywords) == 0 # Full replacement of primal # TODO: do we need to set 'pri_call' on this? primal = template.replace( primal_template, namer=self.namer, result=target, fn=node.func, tmp=tmp, vjp=vjp, args=gast.Tuple(elts=to_pack, ctx=gast.Load())) # Building adjoint using the vjp generated with the primal dto_pack = gast.Tuple( elts=[create.create_temp_grad(arg, self.namer) for arg in to_pack], ctx=gast.Store()) adjoint = template.replace( adjoint_template, namer=self.namer, result=target, vjp=vjp, dargs=dto_pack) return primal, adjoint
python
def primal_and_adjoint_for_tracing(self, node): """Build the primal and adjoint of a traceable function. Args: node: ast.Call node of a function we wish to trace, instead of transform Returns: primal: new ast.Assign node to replace the original primal call adjoint: new ast.Assign node using the VJP generated in primal to calculate the adjoint. """ primal_template = grads.primals[tracing.Traceable] adjoint_template = grads.adjoints[tracing.Traceable] # Prep to_pack = node.args target = ast_.copy_node(self.orig_target) vjp = quoting.quote(self.namer.unique('%s_grad' % node.func.id)) tmp = create.create_temp(quoting.quote('tmp'), self.namer) assert len(node.keywords) == 0 # Full replacement of primal # TODO: do we need to set 'pri_call' on this? primal = template.replace( primal_template, namer=self.namer, result=target, fn=node.func, tmp=tmp, vjp=vjp, args=gast.Tuple(elts=to_pack, ctx=gast.Load())) # Building adjoint using the vjp generated with the primal dto_pack = gast.Tuple( elts=[create.create_temp_grad(arg, self.namer) for arg in to_pack], ctx=gast.Store()) adjoint = template.replace( adjoint_template, namer=self.namer, result=target, vjp=vjp, dargs=dto_pack) return primal, adjoint
['def', 'primal_and_adjoint_for_tracing', '(', 'self', ',', 'node', ')', ':', 'primal_template', '=', 'grads', '.', 'primals', '[', 'tracing', '.', 'Traceable', ']', 'adjoint_template', '=', 'grads', '.', 'adjoints', '[', 'tracing', '.', 'Traceable', ']', '# Prep', 'to_pack', '=', 'node', '.', 'args', 'target', '=', 'ast_', '.', 'copy_node', '(', 'self', '.', 'orig_target', ')', 'vjp', '=', 'quoting', '.', 'quote', '(', 'self', '.', 'namer', '.', 'unique', '(', "'%s_grad'", '%', 'node', '.', 'func', '.', 'id', ')', ')', 'tmp', '=', 'create', '.', 'create_temp', '(', 'quoting', '.', 'quote', '(', "'tmp'", ')', ',', 'self', '.', 'namer', ')', 'assert', 'len', '(', 'node', '.', 'keywords', ')', '==', '0', '# Full replacement of primal', "# TODO: do we need to set 'pri_call' on this?", 'primal', '=', 'template', '.', 'replace', '(', 'primal_template', ',', 'namer', '=', 'self', '.', 'namer', ',', 'result', '=', 'target', ',', 'fn', '=', 'node', '.', 'func', ',', 'tmp', '=', 'tmp', ',', 'vjp', '=', 'vjp', ',', 'args', '=', 'gast', '.', 'Tuple', '(', 'elts', '=', 'to_pack', ',', 'ctx', '=', 'gast', '.', 'Load', '(', ')', ')', ')', '# Building adjoint using the vjp generated with the primal', 'dto_pack', '=', 'gast', '.', 'Tuple', '(', 'elts', '=', '[', 'create', '.', 'create_temp_grad', '(', 'arg', ',', 'self', '.', 'namer', ')', 'for', 'arg', 'in', 'to_pack', ']', ',', 'ctx', '=', 'gast', '.', 'Store', '(', ')', ')', 'adjoint', '=', 'template', '.', 'replace', '(', 'adjoint_template', ',', 'namer', '=', 'self', '.', 'namer', ',', 'result', '=', 'target', ',', 'vjp', '=', 'vjp', ',', 'dargs', '=', 'dto_pack', ')', 'return', 'primal', ',', 'adjoint']
Build the primal and adjoint of a traceable function. Args: node: ast.Call node of a function we wish to trace, instead of transform Returns: primal: new ast.Assign node to replace the original primal call adjoint: new ast.Assign node using the VJP generated in primal to calculate the adjoint.
['Build', 'the', 'primal', 'and', 'adjoint', 'of', 'a', 'traceable', 'function', '.']
train
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/reverse_ad.py#L638-L682
3,740
genialis/resolwe
resolwe/flow/utils/purge.py
_location_purge_all
def _location_purge_all(delete=False, verbosity=0): """Purge all data locations.""" if DataLocation.objects.exists(): for location in DataLocation.objects.filter(Q(purged=False) | Q(data=None)): location_purge(location.id, delete, verbosity) else: logger.info("No data locations")
python
def _location_purge_all(delete=False, verbosity=0): """Purge all data locations.""" if DataLocation.objects.exists(): for location in DataLocation.objects.filter(Q(purged=False) | Q(data=None)): location_purge(location.id, delete, verbosity) else: logger.info("No data locations")
['def', '_location_purge_all', '(', 'delete', '=', 'False', ',', 'verbosity', '=', '0', ')', ':', 'if', 'DataLocation', '.', 'objects', '.', 'exists', '(', ')', ':', 'for', 'location', 'in', 'DataLocation', '.', 'objects', '.', 'filter', '(', 'Q', '(', 'purged', '=', 'False', ')', '|', 'Q', '(', 'data', '=', 'None', ')', ')', ':', 'location_purge', '(', 'location', '.', 'id', ',', 'delete', ',', 'verbosity', ')', 'else', ':', 'logger', '.', 'info', '(', '"No data locations"', ')']
Purge all data locations.
['Purge', 'all', 'data', 'locations', '.']
train
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/purge.py#L164-L170
3,741
inveniosoftware/invenio-access
invenio_access/models.py
ActionNeedMixin.deny
def deny(cls, action, **kwargs): """Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance. """ return cls.create(action, exclude=True, **kwargs)
python
def deny(cls, action, **kwargs): """Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance. """ return cls.create(action, exclude=True, **kwargs)
['def', 'deny', '(', 'cls', ',', 'action', ',', '*', '*', 'kwargs', ')', ':', 'return', 'cls', '.', 'create', '(', 'action', ',', 'exclude', '=', 'True', ',', '*', '*', 'kwargs', ')']
Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance.
['Deny', 'the', 'given', 'action', 'need', '.']
train
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/models.py#L71-L77
3,742
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
revnet_164_cifar
def revnet_164_cifar(): """Tiny hparams suitable for CIFAR/etc.""" hparams = revnet_cifar_base() hparams.bottleneck = True hparams.num_channels = [16, 32, 64] hparams.num_layers_per_block = [8, 8, 8] return hparams
python
def revnet_164_cifar(): """Tiny hparams suitable for CIFAR/etc.""" hparams = revnet_cifar_base() hparams.bottleneck = True hparams.num_channels = [16, 32, 64] hparams.num_layers_per_block = [8, 8, 8] return hparams
['def', 'revnet_164_cifar', '(', ')', ':', 'hparams', '=', 'revnet_cifar_base', '(', ')', 'hparams', '.', 'bottleneck', '=', 'True', 'hparams', '.', 'num_channels', '=', '[', '16', ',', '32', ',', '64', ']', 'hparams', '.', 'num_layers_per_block', '=', '[', '8', ',', '8', ',', '8', ']', 'return', 'hparams']
Tiny hparams suitable for CIFAR/etc.
['Tiny', 'hparams', 'suitable', 'for', 'CIFAR', '/', 'etc', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L425-L431
3,743
numenta/htmresearch
projects/sdr_paper/tm_experiments/run_tm_noise_experiment.py
run_tm_noise_experiment
def run_tm_noise_experiment(dim = 2048, cellsPerColumn=1, num_active = 40, activationThreshold=16, initialPermanence=0.8, connectedPermanence=0.50, minThreshold=16, maxNewSynapseCount=20, permanenceIncrement=0.05, permanenceDecrement=0.00, predictedSegmentDecrement=0.000, maxSegmentsPerCell=255, maxSynapsesPerSegment=255, seed=42, num_samples = 1, num_trials = 1000, sequence_length = 20, training_iters = 1, automatic_threshold = False, noise_range = range(0, 100, 5)): """ Run an experiment tracking the performance of the temporal memory given noise. The number of active cells and the dimensions of the TM are fixed. We track performance by comparing the cells predicted to be active with the cells actually active in the sequence without noise at every timestep, and averaging across timesteps. Three metrics are used, correlation (Pearson's r, by numpy.corrcoef), set similarity (Jaccard index) and cosine similarity (using scipy.spatial.distance.cosine). The Jaccard set similarity is the canonical metric used in the paper, but all three metrics tend to produce very similar results. Typically, this experiment is run to test the influence of activation threshold on noise tolerance, with multiple different thresholds tested. However, this experiment could also be used to examine the influence of factors such as sparsity and sequence length. Output is written to tm_noise_{threshold}}.txt, including sample size. We used three different activation threshold settings, 8, 12 and 16, mirroring the parameters used in the Poirazi neuron model experiment. """ if automatic_threshold: activationThreshold = min(num_active/2, maxNewSynapseCount/2) minThreshold = min(num_active/2, maxNewSynapseCount/2) for noise in noise_range: print noise for trial in range(num_trials): tm = TM(columnDimensions=(dim,), cellsPerColumn=cellsPerColumn, activationThreshold=activationThreshold, initialPermanence=initialPermanence, connectedPermanence=connectedPermanence, minThreshold=minThreshold, maxNewSynapseCount=maxNewSynapseCount, permanenceIncrement=permanenceIncrement, permanenceDecrement=permanenceDecrement, predictedSegmentDecrement=predictedSegmentDecrement, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment, )#seed=seed) datapoints = [] canonical_active_cells = [] for sample in range(num_samples): data = generate_evenly_distributed_data_sparse(dim = dim, num_active = num_active, num_samples = sequence_length) datapoints.append(data) for i in range(training_iters): for j in range(data.nRows()): activeColumns = set(data.rowNonZeros(j)[0]) tm.compute(activeColumns, learn = True) tm.reset() current_active_cells = [] for j in range(data.nRows()): activeColumns = set(data.rowNonZeros(j)[0]) tm.compute(activeColumns, learn = True) current_active_cells.append(tm.getActiveCells()) canonical_active_cells.append(current_active_cells) tm.reset() # Now that the TM has been trained, check its performance on each sequence with noise added. correlations = [] similarities = [] csims = [] for datapoint, active_cells in zip(datapoints, canonical_active_cells): data = copy.deepcopy(datapoint) apply_noise(data, noise) predicted_cells = [] for j in range(data.nRows()): activeColumns = set(data.rowNonZeros(j)[0]) tm.compute(activeColumns, learn = False) predicted_cells.append(tm.getPredictiveCells()) similarity = [(0.+len(set(predicted) & set(active)))/len((set(predicted) | set(active))) for predicted, active in zip (predicted_cells[:-1], active_cells[1:])] dense_predicted_cells = convert_cell_lists_to_dense(2048*32, predicted_cells[:-1]) dense_active_cells = convert_cell_lists_to_dense(2048*32, active_cells[1:]) correlation = [numpy.corrcoef(numpy.asarray([predicted, active]))[0, 1] for predicted, active in zip(dense_predicted_cells, dense_active_cells)] csim = [1 - cosine(predicted, active) for predicted, active in zip(dense_predicted_cells, dense_active_cells)] correlation = numpy.nan_to_num(correlation) csim = numpy.nan_to_num(csim) correlations.append(numpy.mean(correlation)) similarities.append(numpy.mean(similarity)) csims.append(numpy.mean(csim)) correlation = numpy.mean(correlations) similarity = numpy.mean(similarities) csim = numpy.mean(csims) with open("tm_noise_{}.txt".format(activationThreshold), "a") as f: f.write(str(noise)+", " + str(correlation) + ", " + str(similarity) + ", " + str(csim) + ", " + str(num_trials) + "\n")
python
def run_tm_noise_experiment(dim = 2048, cellsPerColumn=1, num_active = 40, activationThreshold=16, initialPermanence=0.8, connectedPermanence=0.50, minThreshold=16, maxNewSynapseCount=20, permanenceIncrement=0.05, permanenceDecrement=0.00, predictedSegmentDecrement=0.000, maxSegmentsPerCell=255, maxSynapsesPerSegment=255, seed=42, num_samples = 1, num_trials = 1000, sequence_length = 20, training_iters = 1, automatic_threshold = False, noise_range = range(0, 100, 5)): """ Run an experiment tracking the performance of the temporal memory given noise. The number of active cells and the dimensions of the TM are fixed. We track performance by comparing the cells predicted to be active with the cells actually active in the sequence without noise at every timestep, and averaging across timesteps. Three metrics are used, correlation (Pearson's r, by numpy.corrcoef), set similarity (Jaccard index) and cosine similarity (using scipy.spatial.distance.cosine). The Jaccard set similarity is the canonical metric used in the paper, but all three metrics tend to produce very similar results. Typically, this experiment is run to test the influence of activation threshold on noise tolerance, with multiple different thresholds tested. However, this experiment could also be used to examine the influence of factors such as sparsity and sequence length. Output is written to tm_noise_{threshold}}.txt, including sample size. We used three different activation threshold settings, 8, 12 and 16, mirroring the parameters used in the Poirazi neuron model experiment. """ if automatic_threshold: activationThreshold = min(num_active/2, maxNewSynapseCount/2) minThreshold = min(num_active/2, maxNewSynapseCount/2) for noise in noise_range: print noise for trial in range(num_trials): tm = TM(columnDimensions=(dim,), cellsPerColumn=cellsPerColumn, activationThreshold=activationThreshold, initialPermanence=initialPermanence, connectedPermanence=connectedPermanence, minThreshold=minThreshold, maxNewSynapseCount=maxNewSynapseCount, permanenceIncrement=permanenceIncrement, permanenceDecrement=permanenceDecrement, predictedSegmentDecrement=predictedSegmentDecrement, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment, )#seed=seed) datapoints = [] canonical_active_cells = [] for sample in range(num_samples): data = generate_evenly_distributed_data_sparse(dim = dim, num_active = num_active, num_samples = sequence_length) datapoints.append(data) for i in range(training_iters): for j in range(data.nRows()): activeColumns = set(data.rowNonZeros(j)[0]) tm.compute(activeColumns, learn = True) tm.reset() current_active_cells = [] for j in range(data.nRows()): activeColumns = set(data.rowNonZeros(j)[0]) tm.compute(activeColumns, learn = True) current_active_cells.append(tm.getActiveCells()) canonical_active_cells.append(current_active_cells) tm.reset() # Now that the TM has been trained, check its performance on each sequence with noise added. correlations = [] similarities = [] csims = [] for datapoint, active_cells in zip(datapoints, canonical_active_cells): data = copy.deepcopy(datapoint) apply_noise(data, noise) predicted_cells = [] for j in range(data.nRows()): activeColumns = set(data.rowNonZeros(j)[0]) tm.compute(activeColumns, learn = False) predicted_cells.append(tm.getPredictiveCells()) similarity = [(0.+len(set(predicted) & set(active)))/len((set(predicted) | set(active))) for predicted, active in zip (predicted_cells[:-1], active_cells[1:])] dense_predicted_cells = convert_cell_lists_to_dense(2048*32, predicted_cells[:-1]) dense_active_cells = convert_cell_lists_to_dense(2048*32, active_cells[1:]) correlation = [numpy.corrcoef(numpy.asarray([predicted, active]))[0, 1] for predicted, active in zip(dense_predicted_cells, dense_active_cells)] csim = [1 - cosine(predicted, active) for predicted, active in zip(dense_predicted_cells, dense_active_cells)] correlation = numpy.nan_to_num(correlation) csim = numpy.nan_to_num(csim) correlations.append(numpy.mean(correlation)) similarities.append(numpy.mean(similarity)) csims.append(numpy.mean(csim)) correlation = numpy.mean(correlations) similarity = numpy.mean(similarities) csim = numpy.mean(csims) with open("tm_noise_{}.txt".format(activationThreshold), "a") as f: f.write(str(noise)+", " + str(correlation) + ", " + str(similarity) + ", " + str(csim) + ", " + str(num_trials) + "\n")
['def', 'run_tm_noise_experiment', '(', 'dim', '=', '2048', ',', 'cellsPerColumn', '=', '1', ',', 'num_active', '=', '40', ',', 'activationThreshold', '=', '16', ',', 'initialPermanence', '=', '0.8', ',', 'connectedPermanence', '=', '0.50', ',', 'minThreshold', '=', '16', ',', 'maxNewSynapseCount', '=', '20', ',', 'permanenceIncrement', '=', '0.05', ',', 'permanenceDecrement', '=', '0.00', ',', 'predictedSegmentDecrement', '=', '0.000', ',', 'maxSegmentsPerCell', '=', '255', ',', 'maxSynapsesPerSegment', '=', '255', ',', 'seed', '=', '42', ',', 'num_samples', '=', '1', ',', 'num_trials', '=', '1000', ',', 'sequence_length', '=', '20', ',', 'training_iters', '=', '1', ',', 'automatic_threshold', '=', 'False', ',', 'noise_range', '=', 'range', '(', '0', ',', '100', ',', '5', ')', ')', ':', 'if', 'automatic_threshold', ':', 'activationThreshold', '=', 'min', '(', 'num_active', '/', '2', ',', 'maxNewSynapseCount', '/', '2', ')', 'minThreshold', '=', 'min', '(', 'num_active', '/', '2', ',', 'maxNewSynapseCount', '/', '2', ')', 'for', 'noise', 'in', 'noise_range', ':', 'print', 'noise', 'for', 'trial', 'in', 'range', '(', 'num_trials', ')', ':', 'tm', '=', 'TM', '(', 'columnDimensions', '=', '(', 'dim', ',', ')', ',', 'cellsPerColumn', '=', 'cellsPerColumn', ',', 'activationThreshold', '=', 'activationThreshold', ',', 'initialPermanence', '=', 'initialPermanence', ',', 'connectedPermanence', '=', 'connectedPermanence', ',', 'minThreshold', '=', 'minThreshold', ',', 'maxNewSynapseCount', '=', 'maxNewSynapseCount', ',', 'permanenceIncrement', '=', 'permanenceIncrement', ',', 'permanenceDecrement', '=', 'permanenceDecrement', ',', 'predictedSegmentDecrement', '=', 'predictedSegmentDecrement', ',', 'maxSegmentsPerCell', '=', 'maxSegmentsPerCell', ',', 'maxSynapsesPerSegment', '=', 'maxSynapsesPerSegment', ',', ')', '#seed=seed)', 'datapoints', '=', '[', ']', 'canonical_active_cells', '=', '[', ']', 'for', 'sample', 'in', 'range', '(', 'num_samples', ')', ':', 'data', '=', 'generate_evenly_distributed_data_sparse', '(', 'dim', '=', 'dim', ',', 'num_active', '=', 'num_active', ',', 'num_samples', '=', 'sequence_length', ')', 'datapoints', '.', 'append', '(', 'data', ')', 'for', 'i', 'in', 'range', '(', 'training_iters', ')', ':', 'for', 'j', 'in', 'range', '(', 'data', '.', 'nRows', '(', ')', ')', ':', 'activeColumns', '=', 'set', '(', 'data', '.', 'rowNonZeros', '(', 'j', ')', '[', '0', ']', ')', 'tm', '.', 'compute', '(', 'activeColumns', ',', 'learn', '=', 'True', ')', 'tm', '.', 'reset', '(', ')', 'current_active_cells', '=', '[', ']', 'for', 'j', 'in', 'range', '(', 'data', '.', 'nRows', '(', ')', ')', ':', 'activeColumns', '=', 'set', '(', 'data', '.', 'rowNonZeros', '(', 'j', ')', '[', '0', ']', ')', 'tm', '.', 'compute', '(', 'activeColumns', ',', 'learn', '=', 'True', ')', 'current_active_cells', '.', 'append', '(', 'tm', '.', 'getActiveCells', '(', ')', ')', 'canonical_active_cells', '.', 'append', '(', 'current_active_cells', ')', 'tm', '.', 'reset', '(', ')', '# Now that the TM has been trained, check its performance on each sequence with noise added.', 'correlations', '=', '[', ']', 'similarities', '=', '[', ']', 'csims', '=', '[', ']', 'for', 'datapoint', ',', 'active_cells', 'in', 'zip', '(', 'datapoints', ',', 'canonical_active_cells', ')', ':', 'data', '=', 'copy', '.', 'deepcopy', '(', 'datapoint', ')', 'apply_noise', '(', 'data', ',', 'noise', ')', 'predicted_cells', '=', '[', ']', 'for', 'j', 'in', 'range', '(', 'data', '.', 'nRows', '(', ')', ')', ':', 'activeColumns', '=', 'set', '(', 'data', '.', 'rowNonZeros', '(', 'j', ')', '[', '0', ']', ')', 'tm', '.', 'compute', '(', 'activeColumns', ',', 'learn', '=', 'False', ')', 'predicted_cells', '.', 'append', '(', 'tm', '.', 'getPredictiveCells', '(', ')', ')', 'similarity', '=', '[', '(', '0.', '+', 'len', '(', 'set', '(', 'predicted', ')', '&', 'set', '(', 'active', ')', ')', ')', '/', 'len', '(', '(', 'set', '(', 'predicted', ')', '|', 'set', '(', 'active', ')', ')', ')', 'for', 'predicted', ',', 'active', 'in', 'zip', '(', 'predicted_cells', '[', ':', '-', '1', ']', ',', 'active_cells', '[', '1', ':', ']', ')', ']', 'dense_predicted_cells', '=', 'convert_cell_lists_to_dense', '(', '2048', '*', '32', ',', 'predicted_cells', '[', ':', '-', '1', ']', ')', 'dense_active_cells', '=', 'convert_cell_lists_to_dense', '(', '2048', '*', '32', ',', 'active_cells', '[', '1', ':', ']', ')', 'correlation', '=', '[', 'numpy', '.', 'corrcoef', '(', 'numpy', '.', 'asarray', '(', '[', 'predicted', ',', 'active', ']', ')', ')', '[', '0', ',', '1', ']', 'for', 'predicted', ',', 'active', 'in', 'zip', '(', 'dense_predicted_cells', ',', 'dense_active_cells', ')', ']', 'csim', '=', '[', '1', '-', 'cosine', '(', 'predicted', ',', 'active', ')', 'for', 'predicted', ',', 'active', 'in', 'zip', '(', 'dense_predicted_cells', ',', 'dense_active_cells', ')', ']', 'correlation', '=', 'numpy', '.', 'nan_to_num', '(', 'correlation', ')', 'csim', '=', 'numpy', '.', 'nan_to_num', '(', 'csim', ')', 'correlations', '.', 'append', '(', 'numpy', '.', 'mean', '(', 'correlation', ')', ')', 'similarities', '.', 'append', '(', 'numpy', '.', 'mean', '(', 'similarity', ')', ')', 'csims', '.', 'append', '(', 'numpy', '.', 'mean', '(', 'csim', ')', ')', 'correlation', '=', 'numpy', '.', 'mean', '(', 'correlations', ')', 'similarity', '=', 'numpy', '.', 'mean', '(', 'similarities', ')', 'csim', '=', 'numpy', '.', 'mean', '(', 'csims', ')', 'with', 'open', '(', '"tm_noise_{}.txt"', '.', 'format', '(', 'activationThreshold', ')', ',', '"a"', ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'str', '(', 'noise', ')', '+', '", "', '+', 'str', '(', 'correlation', ')', '+', '", "', '+', 'str', '(', 'similarity', ')', '+', '", "', '+', 'str', '(', 'csim', ')', '+', '", "', '+', 'str', '(', 'num_trials', ')', '+', '"\\n"', ')']
Run an experiment tracking the performance of the temporal memory given noise. The number of active cells and the dimensions of the TM are fixed. We track performance by comparing the cells predicted to be active with the cells actually active in the sequence without noise at every timestep, and averaging across timesteps. Three metrics are used, correlation (Pearson's r, by numpy.corrcoef), set similarity (Jaccard index) and cosine similarity (using scipy.spatial.distance.cosine). The Jaccard set similarity is the canonical metric used in the paper, but all three metrics tend to produce very similar results. Typically, this experiment is run to test the influence of activation threshold on noise tolerance, with multiple different thresholds tested. However, this experiment could also be used to examine the influence of factors such as sparsity and sequence length. Output is written to tm_noise_{threshold}}.txt, including sample size. We used three different activation threshold settings, 8, 12 and 16, mirroring the parameters used in the Poirazi neuron model experiment.
['Run', 'an', 'experiment', 'tracking', 'the', 'performance', 'of', 'the', 'temporal', 'memory', 'given', 'noise', '.', 'The', 'number', 'of', 'active', 'cells', 'and', 'the', 'dimensions', 'of', 'the', 'TM', 'are', 'fixed', '.', 'We', 'track', 'performance', 'by', 'comparing', 'the', 'cells', 'predicted', 'to', 'be', 'active', 'with', 'the', 'cells', 'actually', 'active', 'in', 'the', 'sequence', 'without', 'noise', 'at', 'every', 'timestep', 'and', 'averaging', 'across', 'timesteps', '.', 'Three', 'metrics', 'are', 'used', 'correlation', '(', 'Pearson', 's', 'r', 'by', 'numpy', '.', 'corrcoef', ')', 'set', 'similarity', '(', 'Jaccard', 'index', ')', 'and', 'cosine', 'similarity', '(', 'using', 'scipy', '.', 'spatial', '.', 'distance', '.', 'cosine', ')', '.', 'The', 'Jaccard', 'set', 'similarity', 'is', 'the', 'canonical', 'metric', 'used', 'in', 'the', 'paper', 'but', 'all', 'three', 'metrics', 'tend', 'to', 'produce', 'very', 'similar', 'results', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/tm_experiments/run_tm_noise_experiment.py#L44-L158
3,744
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/python_breakpoint.py
PythonBreakpoint._SetCompleted
def _SetCompleted(self): """Atomically marks the breakpoint as completed. Returns: True if the breakpoint wasn't marked already completed or False if the breakpoint was already completed. """ with self._lock: if self._completed: return False self._completed = True return True
python
def _SetCompleted(self): """Atomically marks the breakpoint as completed. Returns: True if the breakpoint wasn't marked already completed or False if the breakpoint was already completed. """ with self._lock: if self._completed: return False self._completed = True return True
['def', '_SetCompleted', '(', 'self', ')', ':', 'with', 'self', '.', '_lock', ':', 'if', 'self', '.', '_completed', ':', 'return', 'False', 'self', '.', '_completed', '=', 'True', 'return', 'True']
Atomically marks the breakpoint as completed. Returns: True if the breakpoint wasn't marked already completed or False if the breakpoint was already completed.
['Atomically', 'marks', 'the', 'breakpoint', 'as', 'completed', '.']
train
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/python_breakpoint.py#L359-L370
3,745
markchil/gptools
gptools/kernel/core.py
Kernel.set_hyperparams
def set_hyperparams(self, new_params): """Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class. """ new_params = scipy.asarray(new_params, dtype=float) if len(new_params) == len(self.free_params): if self.enforce_bounds: for idx, new_param, bound in zip(range(0, len(new_params)), new_params, self.free_param_bounds): if bound[0] is not None and new_param < bound[0]: new_params[idx] = bound[0] elif bound[1] is not None and new_param > bound[1]: new_params[idx] = bound[1] self.params[~self.fixed_params] = new_params else: raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
python
def set_hyperparams(self, new_params): """Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class. """ new_params = scipy.asarray(new_params, dtype=float) if len(new_params) == len(self.free_params): if self.enforce_bounds: for idx, new_param, bound in zip(range(0, len(new_params)), new_params, self.free_param_bounds): if bound[0] is not None and new_param < bound[0]: new_params[idx] = bound[0] elif bound[1] is not None and new_param > bound[1]: new_params[idx] = bound[1] self.params[~self.fixed_params] = new_params else: raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
['def', 'set_hyperparams', '(', 'self', ',', 'new_params', ')', ':', 'new_params', '=', 'scipy', '.', 'asarray', '(', 'new_params', ',', 'dtype', '=', 'float', ')', 'if', 'len', '(', 'new_params', ')', '==', 'len', '(', 'self', '.', 'free_params', ')', ':', 'if', 'self', '.', 'enforce_bounds', ':', 'for', 'idx', ',', 'new_param', ',', 'bound', 'in', 'zip', '(', 'range', '(', '0', ',', 'len', '(', 'new_params', ')', ')', ',', 'new_params', ',', 'self', '.', 'free_param_bounds', ')', ':', 'if', 'bound', '[', '0', ']', 'is', 'not', 'None', 'and', 'new_param', '<', 'bound', '[', '0', ']', ':', 'new_params', '[', 'idx', ']', '=', 'bound', '[', '0', ']', 'elif', 'bound', '[', '1', ']', 'is', 'not', 'None', 'and', 'new_param', '>', 'bound', '[', '1', ']', ':', 'new_params', '[', 'idx', ']', '=', 'bound', '[', '1', ']', 'self', '.', 'params', '[', '~', 'self', '.', 'fixed_params', ']', '=', 'new_params', 'else', ':', 'raise', 'ValueError', '(', '"Length of new_params must be %s!"', '%', '(', 'len', '(', 'self', '.', 'free_params', ')', ',', ')', ')']
Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class.
['Sets', 'the', 'free', 'hyperparameters', 'to', 'the', 'new', 'parameter', 'values', 'in', 'new_params', '.', 'Parameters', '----------', 'new_params', ':', ':', 'py', ':', 'class', ':', 'Array', 'or', 'other', 'Array', '-', 'like', '(', 'len', '(', ':', 'py', ':', 'attr', ':', 'self', '.', 'free_params', ')', ')', 'New', 'parameter', 'values', 'ordered', 'as', 'dictated', 'by', 'the', 'docstring', 'for', 'the', 'class', '.']
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/core.py#L236-L256
3,746
pandas-dev/pandas
pandas/core/algorithms.py
unique
def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
python
def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
['def', 'unique', '(', 'values', ')', ':', 'values', '=', '_ensure_arraylike', '(', 'values', ')', 'if', 'is_extension_array_dtype', '(', 'values', ')', ':', "# Dispatch to extension dtype's unique.", 'return', 'values', '.', 'unique', '(', ')', 'original', '=', 'values', 'htable', ',', '_', ',', 'values', ',', 'dtype', ',', 'ndtype', '=', '_get_hashtable_algo', '(', 'values', ')', 'table', '=', 'htable', '(', 'len', '(', 'values', ')', ')', 'uniques', '=', 'table', '.', 'unique', '(', 'values', ')', 'uniques', '=', '_reconstruct_data', '(', 'uniques', ',', 'dtype', ',', 'original', ')', 'return', 'uniques']
Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
['Hash', 'table', '-', 'based', 'unique', '.', 'Uniques', 'are', 'returned', 'in', 'order', 'of', 'appearance', '.', 'This', 'does', 'NOT', 'sort', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L276-L367
3,747
leosartaj/sub
sub/main.py
download
def download(name, options): """ download a file or all files in a directory """ dire = os.path.dirname(name) # returns the directory name fName = os.path.basename(name) # returns the filename fNameOnly, fExt = os.path.splitext(fName) dwn = 0 if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose): dwn += 1 elif dirExists(name): for filename in os.listdir(name): if options.recursive: dwn += download(os.path.join(name, filename), options) else: if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose): dwn += 1 return dwn
python
def download(name, options): """ download a file or all files in a directory """ dire = os.path.dirname(name) # returns the directory name fName = os.path.basename(name) # returns the filename fNameOnly, fExt = os.path.splitext(fName) dwn = 0 if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose): dwn += 1 elif dirExists(name): for filename in os.listdir(name): if options.recursive: dwn += download(os.path.join(name, filename), options) else: if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose): dwn += 1 return dwn
['def', 'download', '(', 'name', ',', 'options', ')', ':', 'dire', '=', 'os', '.', 'path', '.', 'dirname', '(', 'name', ')', '# returns the directory name', 'fName', '=', 'os', '.', 'path', '.', 'basename', '(', 'name', ')', '# returns the filename', 'fNameOnly', ',', 'fExt', '=', 'os', '.', 'path', '.', 'splitext', '(', 'fName', ')', 'dwn', '=', '0', 'if', 'fileExists', '(', 'fName', ',', 'dire', ')', 'and', 'not', 'fileExists', '(', '(', 'fNameOnly', '+', "'.srt'", ')', ',', 'dire', ')', ':', '# skip if already downloaded', 'if', 'file_downloaded', '(', 'download_file', '(', 'fName', ',', 'options', '.', 'timeout', ',', 'dire', ')', ',', 'fName', ',', 'options', '.', 'verbose', ')', ':', 'dwn', '+=', '1', 'elif', 'dirExists', '(', 'name', ')', ':', 'for', 'filename', 'in', 'os', '.', 'listdir', '(', 'name', ')', ':', 'if', 'options', '.', 'recursive', ':', 'dwn', '+=', 'download', '(', 'os', '.', 'path', '.', 'join', '(', 'name', ',', 'filename', ')', ',', 'options', ')', 'else', ':', 'if', 'file_downloaded', '(', 'download_file', '(', 'filename', ',', 'options', '.', 'timeout', ',', 'name', ')', ',', 'filename', ',', 'options', '.', 'verbose', ')', ':', 'dwn', '+=', '1', 'return', 'dwn']
download a file or all files in a directory
['download', 'a', 'file', 'or', 'all', 'files', 'in', 'a', 'directory']
train
https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L95-L115
3,748
pytroll/pyspectral
rsr_convert_scripts/viirs_rsr.py
main
def main(): """Main""" import sys import h5py handler = logging.StreamHandler(sys.stderr) formatter = logging.Formatter(fmt=_DEFAULT_LOG_FORMAT, datefmt=_DEFAULT_TIME_FORMAT) handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) LOG.setLevel(logging.DEBUG) LOG.addHandler(handler) platform_name = "NOAA-20" # platform_name = "Suomi-NPP" viirs = ViirsRSR('M1', platform_name) filename = os.path.join(viirs.output_dir, "rsr_viirs_{0}.h5".format(platform_name)) with h5py.File(filename, "w") as h5f: h5f.attrs['description'] = 'Relative Spectral Responses for VIIRS' h5f.attrs['platform_name'] = platform_name h5f.attrs['sensor'] = 'viirs' h5f.attrs['band_names'] = VIIRS_BAND_NAMES for chname in VIIRS_BAND_NAMES: viirs = ViirsRSR(chname, platform_name) grp = h5f.create_group(chname) grp.attrs['number_of_detectors'] = len(viirs.rsr.keys()) # Loop over each detector to check if the sampling wavelengths are # identical: det_names = viirs.rsr.keys() wvl = viirs.rsr[det_names[0]]['wavelength'] wvl, idx = np.unique(wvl, return_index=True) wvl_is_constant = True for det in det_names[1:]: det_wvl = np.unique(viirs.rsr[det]['wavelength']) if not np.alltrue(wvl == det_wvl): LOG.warning( "Wavelngth arrays are not the same among detectors!") wvl_is_constant = False if wvl_is_constant: arr = wvl dset = grp.create_dataset('wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr # Loop over each detector: for det in viirs.rsr: det_grp = grp.create_group(det) wvl = viirs.rsr[det]['wavelength'][ ~np.isnan(viirs.rsr[det]['wavelength'])] rsp = viirs.rsr[det]['response'][ ~np.isnan(viirs.rsr[det]['wavelength'])] wvl, idx = np.unique(wvl, return_index=True) rsp = np.take(rsp, idx) LOG.debug("wvl.shape: %s", str(wvl.shape)) det_grp.attrs[ 'central_wavelength'] = get_central_wave(wvl, rsp) if not wvl_is_constant: arr = wvl dset = det_grp.create_dataset( 'wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr dset = det_grp.create_dataset('response', rsp.shape, dtype='f') dset[...] = rsp
python
def main(): """Main""" import sys import h5py handler = logging.StreamHandler(sys.stderr) formatter = logging.Formatter(fmt=_DEFAULT_LOG_FORMAT, datefmt=_DEFAULT_TIME_FORMAT) handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) LOG.setLevel(logging.DEBUG) LOG.addHandler(handler) platform_name = "NOAA-20" # platform_name = "Suomi-NPP" viirs = ViirsRSR('M1', platform_name) filename = os.path.join(viirs.output_dir, "rsr_viirs_{0}.h5".format(platform_name)) with h5py.File(filename, "w") as h5f: h5f.attrs['description'] = 'Relative Spectral Responses for VIIRS' h5f.attrs['platform_name'] = platform_name h5f.attrs['sensor'] = 'viirs' h5f.attrs['band_names'] = VIIRS_BAND_NAMES for chname in VIIRS_BAND_NAMES: viirs = ViirsRSR(chname, platform_name) grp = h5f.create_group(chname) grp.attrs['number_of_detectors'] = len(viirs.rsr.keys()) # Loop over each detector to check if the sampling wavelengths are # identical: det_names = viirs.rsr.keys() wvl = viirs.rsr[det_names[0]]['wavelength'] wvl, idx = np.unique(wvl, return_index=True) wvl_is_constant = True for det in det_names[1:]: det_wvl = np.unique(viirs.rsr[det]['wavelength']) if not np.alltrue(wvl == det_wvl): LOG.warning( "Wavelngth arrays are not the same among detectors!") wvl_is_constant = False if wvl_is_constant: arr = wvl dset = grp.create_dataset('wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr # Loop over each detector: for det in viirs.rsr: det_grp = grp.create_group(det) wvl = viirs.rsr[det]['wavelength'][ ~np.isnan(viirs.rsr[det]['wavelength'])] rsp = viirs.rsr[det]['response'][ ~np.isnan(viirs.rsr[det]['wavelength'])] wvl, idx = np.unique(wvl, return_index=True) rsp = np.take(rsp, idx) LOG.debug("wvl.shape: %s", str(wvl.shape)) det_grp.attrs[ 'central_wavelength'] = get_central_wave(wvl, rsp) if not wvl_is_constant: arr = wvl dset = det_grp.create_dataset( 'wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr dset = det_grp.create_dataset('response', rsp.shape, dtype='f') dset[...] = rsp
['def', 'main', '(', ')', ':', 'import', 'sys', 'import', 'h5py', 'handler', '=', 'logging', '.', 'StreamHandler', '(', 'sys', '.', 'stderr', ')', 'formatter', '=', 'logging', '.', 'Formatter', '(', 'fmt', '=', '_DEFAULT_LOG_FORMAT', ',', 'datefmt', '=', '_DEFAULT_TIME_FORMAT', ')', 'handler', '.', 'setFormatter', '(', 'formatter', ')', 'handler', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'LOG', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'LOG', '.', 'addHandler', '(', 'handler', ')', 'platform_name', '=', '"NOAA-20"', '# platform_name = "Suomi-NPP"', 'viirs', '=', 'ViirsRSR', '(', "'M1'", ',', 'platform_name', ')', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'viirs', '.', 'output_dir', ',', '"rsr_viirs_{0}.h5"', '.', 'format', '(', 'platform_name', ')', ')', 'with', 'h5py', '.', 'File', '(', 'filename', ',', '"w"', ')', 'as', 'h5f', ':', 'h5f', '.', 'attrs', '[', "'description'", ']', '=', "'Relative Spectral Responses for VIIRS'", 'h5f', '.', 'attrs', '[', "'platform_name'", ']', '=', 'platform_name', 'h5f', '.', 'attrs', '[', "'sensor'", ']', '=', "'viirs'", 'h5f', '.', 'attrs', '[', "'band_names'", ']', '=', 'VIIRS_BAND_NAMES', 'for', 'chname', 'in', 'VIIRS_BAND_NAMES', ':', 'viirs', '=', 'ViirsRSR', '(', 'chname', ',', 'platform_name', ')', 'grp', '=', 'h5f', '.', 'create_group', '(', 'chname', ')', 'grp', '.', 'attrs', '[', "'number_of_detectors'", ']', '=', 'len', '(', 'viirs', '.', 'rsr', '.', 'keys', '(', ')', ')', '# Loop over each detector to check if the sampling wavelengths are', '# identical:', 'det_names', '=', 'viirs', '.', 'rsr', '.', 'keys', '(', ')', 'wvl', '=', 'viirs', '.', 'rsr', '[', 'det_names', '[', '0', ']', ']', '[', "'wavelength'", ']', 'wvl', ',', 'idx', '=', 'np', '.', 'unique', '(', 'wvl', ',', 'return_index', '=', 'True', ')', 'wvl_is_constant', '=', 'True', 'for', 'det', 'in', 'det_names', '[', '1', ':', ']', ':', 'det_wvl', '=', 'np', '.', 'unique', '(', 'viirs', '.', 'rsr', '[', 'det', ']', '[', "'wavelength'", ']', ')', 'if', 'not', 'np', '.', 'alltrue', '(', 'wvl', '==', 'det_wvl', ')', ':', 'LOG', '.', 'warning', '(', '"Wavelngth arrays are not the same among detectors!"', ')', 'wvl_is_constant', '=', 'False', 'if', 'wvl_is_constant', ':', 'arr', '=', 'wvl', 'dset', '=', 'grp', '.', 'create_dataset', '(', "'wavelength'", ',', 'arr', '.', 'shape', ',', 'dtype', '=', "'f'", ')', 'dset', '.', 'attrs', '[', "'unit'", ']', '=', "'m'", 'dset', '.', 'attrs', '[', "'scale'", ']', '=', '1e-06', 'dset', '[', '...', ']', '=', 'arr', '# Loop over each detector:', 'for', 'det', 'in', 'viirs', '.', 'rsr', ':', 'det_grp', '=', 'grp', '.', 'create_group', '(', 'det', ')', 'wvl', '=', 'viirs', '.', 'rsr', '[', 'det', ']', '[', "'wavelength'", ']', '[', '~', 'np', '.', 'isnan', '(', 'viirs', '.', 'rsr', '[', 'det', ']', '[', "'wavelength'", ']', ')', ']', 'rsp', '=', 'viirs', '.', 'rsr', '[', 'det', ']', '[', "'response'", ']', '[', '~', 'np', '.', 'isnan', '(', 'viirs', '.', 'rsr', '[', 'det', ']', '[', "'wavelength'", ']', ')', ']', 'wvl', ',', 'idx', '=', 'np', '.', 'unique', '(', 'wvl', ',', 'return_index', '=', 'True', ')', 'rsp', '=', 'np', '.', 'take', '(', 'rsp', ',', 'idx', ')', 'LOG', '.', 'debug', '(', '"wvl.shape: %s"', ',', 'str', '(', 'wvl', '.', 'shape', ')', ')', 'det_grp', '.', 'attrs', '[', "'central_wavelength'", ']', '=', 'get_central_wave', '(', 'wvl', ',', 'rsp', ')', 'if', 'not', 'wvl_is_constant', ':', 'arr', '=', 'wvl', 'dset', '=', 'det_grp', '.', 'create_dataset', '(', "'wavelength'", ',', 'arr', '.', 'shape', ',', 'dtype', '=', "'f'", ')', 'dset', '.', 'attrs', '[', "'unit'", ']', '=', "'m'", 'dset', '.', 'attrs', '[', "'scale'", ']', '=', '1e-06', 'dset', '[', '...', ']', '=', 'arr', 'dset', '=', 'det_grp', '.', 'create_dataset', '(', "'response'", ',', 'rsp', '.', 'shape', ',', 'dtype', '=', "'f'", ')', 'dset', '[', '...', ']', '=', 'rsp']
Main
['Main']
train
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/rsr_convert_scripts/viirs_rsr.py#L190-L262
3,749
paolodragone/pymzn
pymzn/dzn/marsh.py
dict2dzn
def dict2dzn( objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None ): """Serializes the objects in input and produces a list of strings encoding them into dzn format. Optionally, the produced dzn is written on a file. Supported types of objects include: ``str``, ``int``, ``float``, ``set``, ``list`` or ``dict``. List and dict are serialized into dzn (multi-dimensional) arrays. The key-set of a dict is used as index-set of dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``. Parameters ---------- objs : dict A dictionary containing the objects to serialize, the keys are the names of the variables. declare : bool Whether to include the declaration of the variable in the statements or just the assignment. Default is ``False``. assign : bool Whether to include assignment of the value in the statements or just the declaration. declare_enums : bool Whether to declare the enums found as types of the objects to serialize. Default is ``True``. wrap : bool Whether to wrap the serialized values. fout : str Path to the output file, if None no output file is written. Returns ------- list List of strings containing the dzn-encoded objects. """ log = logging.getLogger(__name__) vals = [] enums = set() for key, val in objs.items(): if _is_enum(val) and declare_enums: enum_type = type(val) enum_name = enum_type.__name__ if enum_name not in enums: enum_stmt = stmt2enum( enum_type, declare=declare, assign=assign, wrap=wrap ) vals.append(enum_stmt) enums.add(enum_name) stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap) vals.append(stmt) if fout: log.debug('Writing file: {}'.format(fout)) with open(fout, 'w') as f: for val in vals: f.write('{}\n\n'.format(val)) return vals
python
def dict2dzn( objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None ): """Serializes the objects in input and produces a list of strings encoding them into dzn format. Optionally, the produced dzn is written on a file. Supported types of objects include: ``str``, ``int``, ``float``, ``set``, ``list`` or ``dict``. List and dict are serialized into dzn (multi-dimensional) arrays. The key-set of a dict is used as index-set of dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``. Parameters ---------- objs : dict A dictionary containing the objects to serialize, the keys are the names of the variables. declare : bool Whether to include the declaration of the variable in the statements or just the assignment. Default is ``False``. assign : bool Whether to include assignment of the value in the statements or just the declaration. declare_enums : bool Whether to declare the enums found as types of the objects to serialize. Default is ``True``. wrap : bool Whether to wrap the serialized values. fout : str Path to the output file, if None no output file is written. Returns ------- list List of strings containing the dzn-encoded objects. """ log = logging.getLogger(__name__) vals = [] enums = set() for key, val in objs.items(): if _is_enum(val) and declare_enums: enum_type = type(val) enum_name = enum_type.__name__ if enum_name not in enums: enum_stmt = stmt2enum( enum_type, declare=declare, assign=assign, wrap=wrap ) vals.append(enum_stmt) enums.add(enum_name) stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap) vals.append(stmt) if fout: log.debug('Writing file: {}'.format(fout)) with open(fout, 'w') as f: for val in vals: f.write('{}\n\n'.format(val)) return vals
['def', 'dict2dzn', '(', 'objs', ',', 'declare', '=', 'False', ',', 'assign', '=', 'True', ',', 'declare_enums', '=', 'True', ',', 'wrap', '=', 'True', ',', 'fout', '=', 'None', ')', ':', 'log', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', 'vals', '=', '[', ']', 'enums', '=', 'set', '(', ')', 'for', 'key', ',', 'val', 'in', 'objs', '.', 'items', '(', ')', ':', 'if', '_is_enum', '(', 'val', ')', 'and', 'declare_enums', ':', 'enum_type', '=', 'type', '(', 'val', ')', 'enum_name', '=', 'enum_type', '.', '__name__', 'if', 'enum_name', 'not', 'in', 'enums', ':', 'enum_stmt', '=', 'stmt2enum', '(', 'enum_type', ',', 'declare', '=', 'declare', ',', 'assign', '=', 'assign', ',', 'wrap', '=', 'wrap', ')', 'vals', '.', 'append', '(', 'enum_stmt', ')', 'enums', '.', 'add', '(', 'enum_name', ')', 'stmt', '=', 'stmt2dzn', '(', 'key', ',', 'val', ',', 'declare', '=', 'declare', ',', 'assign', '=', 'assign', ',', 'wrap', '=', 'wrap', ')', 'vals', '.', 'append', '(', 'stmt', ')', 'if', 'fout', ':', 'log', '.', 'debug', '(', "'Writing file: {}'", '.', 'format', '(', 'fout', ')', ')', 'with', 'open', '(', 'fout', ',', "'w'", ')', 'as', 'f', ':', 'for', 'val', 'in', 'vals', ':', 'f', '.', 'write', '(', "'{}\\n\\n'", '.', 'format', '(', 'val', ')', ')', 'return', 'vals']
Serializes the objects in input and produces a list of strings encoding them into dzn format. Optionally, the produced dzn is written on a file. Supported types of objects include: ``str``, ``int``, ``float``, ``set``, ``list`` or ``dict``. List and dict are serialized into dzn (multi-dimensional) arrays. The key-set of a dict is used as index-set of dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``. Parameters ---------- objs : dict A dictionary containing the objects to serialize, the keys are the names of the variables. declare : bool Whether to include the declaration of the variable in the statements or just the assignment. Default is ``False``. assign : bool Whether to include assignment of the value in the statements or just the declaration. declare_enums : bool Whether to declare the enums found as types of the objects to serialize. Default is ``True``. wrap : bool Whether to wrap the serialized values. fout : str Path to the output file, if None no output file is written. Returns ------- list List of strings containing the dzn-encoded objects.
['Serializes', 'the', 'objects', 'in', 'input', 'and', 'produces', 'a', 'list', 'of', 'strings', 'encoding', 'them', 'into', 'dzn', 'format', '.', 'Optionally', 'the', 'produced', 'dzn', 'is', 'written', 'on', 'a', 'file', '.']
train
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L334-L391
3,750
ryanpetrello/cleaver
cleaver/util.py
random_variant
def random_variant(variants, weights): """ A generator that, given a list of variants and a corresponding list of weights, returns one random weighted selection. """ total = 0 accumulator = [] for w in weights: total += w accumulator.append(total) r = randint(0, total - 1) yield variants[bisect(accumulator, r)]
python
def random_variant(variants, weights): """ A generator that, given a list of variants and a corresponding list of weights, returns one random weighted selection. """ total = 0 accumulator = [] for w in weights: total += w accumulator.append(total) r = randint(0, total - 1) yield variants[bisect(accumulator, r)]
['def', 'random_variant', '(', 'variants', ',', 'weights', ')', ':', 'total', '=', '0', 'accumulator', '=', '[', ']', 'for', 'w', 'in', 'weights', ':', 'total', '+=', 'w', 'accumulator', '.', 'append', '(', 'total', ')', 'r', '=', 'randint', '(', '0', ',', 'total', '-', '1', ')', 'yield', 'variants', '[', 'bisect', '(', 'accumulator', ',', 'r', ')', ']']
A generator that, given a list of variants and a corresponding list of weights, returns one random weighted selection.
['A', 'generator', 'that', 'given', 'a', 'list', 'of', 'variants', 'and', 'a', 'corresponding', 'list', 'of', 'weights', 'returns', 'one', 'random', 'weighted', 'selection', '.']
train
https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/util.py#L7-L19
3,751
DataDog/integrations-core
kubelet/datadog_checks/kubelet/prometheus.py
CadvisorPrometheusScraperMixin._get_entity_id_if_container_metric
def _get_entity_id_if_container_metric(self, labels): """ Checks the labels indicate a container metric, then extract the container id from them. :param labels :return str or None """ if CadvisorPrometheusScraperMixin._is_container_metric(labels): pod = self._get_pod_by_metric_label(labels) if is_static_pending_pod(pod): # If the pod is static, ContainerStatus is unavailable. # Return the pod UID so that we can collect metrics from it later on. return self._get_pod_uid(labels) return self._get_container_id(labels)
python
def _get_entity_id_if_container_metric(self, labels): """ Checks the labels indicate a container metric, then extract the container id from them. :param labels :return str or None """ if CadvisorPrometheusScraperMixin._is_container_metric(labels): pod = self._get_pod_by_metric_label(labels) if is_static_pending_pod(pod): # If the pod is static, ContainerStatus is unavailable. # Return the pod UID so that we can collect metrics from it later on. return self._get_pod_uid(labels) return self._get_container_id(labels)
['def', '_get_entity_id_if_container_metric', '(', 'self', ',', 'labels', ')', ':', 'if', 'CadvisorPrometheusScraperMixin', '.', '_is_container_metric', '(', 'labels', ')', ':', 'pod', '=', 'self', '.', '_get_pod_by_metric_label', '(', 'labels', ')', 'if', 'is_static_pending_pod', '(', 'pod', ')', ':', '# If the pod is static, ContainerStatus is unavailable.', '# Return the pod UID so that we can collect metrics from it later on.', 'return', 'self', '.', '_get_pod_uid', '(', 'labels', ')', 'return', 'self', '.', '_get_container_id', '(', 'labels', ')']
Checks the labels indicate a container metric, then extract the container id from them. :param labels :return str or None
['Checks', 'the', 'labels', 'indicate', 'a', 'container', 'metric', 'then', 'extract', 'the', 'container', 'id', 'from', 'them', '.']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubelet/datadog_checks/kubelet/prometheus.py#L165-L179
3,752
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/uri_parser.py
parse_host
def parse_host(entity, default_port=DEFAULT_PORT): """Validates a host string Returns a 2-tuple of host followed by port where port is default_port if it wasn't specified in the string. :Parameters: - `entity`: A host or host:port string where host could be a hostname or IP address. - `default_port`: The port number to use when one wasn't specified in entity. """ host = entity port = default_port if entity[0] == '[': host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port elif entity.find(':') != -1: if entity.count(':') > 1: raise ValueError("Reserved characters such as ':' must be " "escaped according RFC 2396. An IPv6 " "address literal must be enclosed in '[' " "and ']' according to RFC 2732.") host, port = host.split(':', 1) if isinstance(port, string_type): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the ismaster response. return host.lower(), port
python
def parse_host(entity, default_port=DEFAULT_PORT): """Validates a host string Returns a 2-tuple of host followed by port where port is default_port if it wasn't specified in the string. :Parameters: - `entity`: A host or host:port string where host could be a hostname or IP address. - `default_port`: The port number to use when one wasn't specified in entity. """ host = entity port = default_port if entity[0] == '[': host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port elif entity.find(':') != -1: if entity.count(':') > 1: raise ValueError("Reserved characters such as ':' must be " "escaped according RFC 2396. An IPv6 " "address literal must be enclosed in '[' " "and ']' according to RFC 2732.") host, port = host.split(':', 1) if isinstance(port, string_type): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the ismaster response. return host.lower(), port
['def', 'parse_host', '(', 'entity', ',', 'default_port', '=', 'DEFAULT_PORT', ')', ':', 'host', '=', 'entity', 'port', '=', 'default_port', 'if', 'entity', '[', '0', ']', '==', "'['", ':', 'host', ',', 'port', '=', 'parse_ipv6_literal_host', '(', 'entity', ',', 'default_port', ')', 'elif', 'entity', '.', 'endswith', '(', '".sock"', ')', ':', 'return', 'entity', ',', 'default_port', 'elif', 'entity', '.', 'find', '(', "':'", ')', '!=', '-', '1', ':', 'if', 'entity', '.', 'count', '(', "':'", ')', '>', '1', ':', 'raise', 'ValueError', '(', '"Reserved characters such as \':\' must be "', '"escaped according RFC 2396. An IPv6 "', '"address literal must be enclosed in \'[\' "', '"and \']\' according to RFC 2732."', ')', 'host', ',', 'port', '=', 'host', '.', 'split', '(', "':'", ',', '1', ')', 'if', 'isinstance', '(', 'port', ',', 'string_type', ')', ':', 'if', 'not', 'port', '.', 'isdigit', '(', ')', 'or', 'int', '(', 'port', ')', '>', '65535', 'or', 'int', '(', 'port', ')', '<=', '0', ':', 'raise', 'ValueError', '(', '"Port must be an integer between 0 and 65535: %s"', '%', '(', 'port', ',', ')', ')', 'port', '=', 'int', '(', 'port', ')', '# Normalize hostname to lowercase, since DNS is case-insensitive:', '# http://tools.ietf.org/html/rfc4343', '# This prevents useless rediscovery if "foo.com" is in the seed list but', '# "FOO.com" is in the ismaster response.', 'return', 'host', '.', 'lower', '(', ')', ',', 'port']
Validates a host string Returns a 2-tuple of host followed by port where port is default_port if it wasn't specified in the string. :Parameters: - `entity`: A host or host:port string where host could be a hostname or IP address. - `default_port`: The port number to use when one wasn't specified in entity.
['Validates', 'a', 'host', 'string']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/uri_parser.py#L119-L154
3,753
tensorflow/tensor2tensor
tensor2tensor/envs/env_problem.py
EnvProblem._verify_same_spaces
def _verify_same_spaces(self): """Verifies that all the envs have the same observation and action space.""" # Pre-conditions: self._envs is initialized. if self._envs is None: raise ValueError("Environments not initialized.") if not isinstance(self._envs, list): tf.logging.warning("Not checking observation and action space " "compatibility across envs, since there is just one.") return # NOTE: We compare string representations of observation_space and # action_space because compositional classes like space.Tuple don't return # true on object comparison. if not all( str(env.observation_space) == str(self.observation_space) for env in self._envs): err_str = ("All environments should have the same observation space, but " "don't.") tf.logging.error(err_str) # Log all observation spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has observation space [%s]", i, env.observation_space) raise ValueError(err_str) if not all( str(env.action_space) == str(self.action_space) for env in self._envs): err_str = "All environments should have the same action space, but don't." tf.logging.error(err_str) # Log all action spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has action space [%s]", i, env.action_space) raise ValueError(err_str)
python
def _verify_same_spaces(self): """Verifies that all the envs have the same observation and action space.""" # Pre-conditions: self._envs is initialized. if self._envs is None: raise ValueError("Environments not initialized.") if not isinstance(self._envs, list): tf.logging.warning("Not checking observation and action space " "compatibility across envs, since there is just one.") return # NOTE: We compare string representations of observation_space and # action_space because compositional classes like space.Tuple don't return # true on object comparison. if not all( str(env.observation_space) == str(self.observation_space) for env in self._envs): err_str = ("All environments should have the same observation space, but " "don't.") tf.logging.error(err_str) # Log all observation spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has observation space [%s]", i, env.observation_space) raise ValueError(err_str) if not all( str(env.action_space) == str(self.action_space) for env in self._envs): err_str = "All environments should have the same action space, but don't." tf.logging.error(err_str) # Log all action spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has action space [%s]", i, env.action_space) raise ValueError(err_str)
['def', '_verify_same_spaces', '(', 'self', ')', ':', '# Pre-conditions: self._envs is initialized.', 'if', 'self', '.', '_envs', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Environments not initialized."', ')', 'if', 'not', 'isinstance', '(', 'self', '.', '_envs', ',', 'list', ')', ':', 'tf', '.', 'logging', '.', 'warning', '(', '"Not checking observation and action space "', '"compatibility across envs, since there is just one."', ')', 'return', '# NOTE: We compare string representations of observation_space and', "# action_space because compositional classes like space.Tuple don't return", '# true on object comparison.', 'if', 'not', 'all', '(', 'str', '(', 'env', '.', 'observation_space', ')', '==', 'str', '(', 'self', '.', 'observation_space', ')', 'for', 'env', 'in', 'self', '.', '_envs', ')', ':', 'err_str', '=', '(', '"All environments should have the same observation space, but "', '"don\'t."', ')', 'tf', '.', 'logging', '.', 'error', '(', 'err_str', ')', '# Log all observation spaces.', 'for', 'i', ',', 'env', 'in', 'enumerate', '(', 'self', '.', '_envs', ')', ':', 'tf', '.', 'logging', '.', 'error', '(', '"Env[%d] has observation space [%s]"', ',', 'i', ',', 'env', '.', 'observation_space', ')', 'raise', 'ValueError', '(', 'err_str', ')', 'if', 'not', 'all', '(', 'str', '(', 'env', '.', 'action_space', ')', '==', 'str', '(', 'self', '.', 'action_space', ')', 'for', 'env', 'in', 'self', '.', '_envs', ')', ':', 'err_str', '=', '"All environments should have the same action space, but don\'t."', 'tf', '.', 'logging', '.', 'error', '(', 'err_str', ')', '# Log all action spaces.', 'for', 'i', ',', 'env', 'in', 'enumerate', '(', 'self', '.', '_envs', ')', ':', 'tf', '.', 'logging', '.', 'error', '(', '"Env[%d] has action space [%s]"', ',', 'i', ',', 'env', '.', 'action_space', ')', 'raise', 'ValueError', '(', 'err_str', ')']
Verifies that all the envs have the same observation and action space.
['Verifies', 'that', 'all', 'the', 'envs', 'have', 'the', 'same', 'observation', 'and', 'action', 'space', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L199-L235
3,754
tariqdaouda/pyGeno
pyGeno/tools/BinarySequence.py
BinarySequence.decode
def decode(self, binSequence): """decodes a binary sequence to return a string""" try: binSeq = iter(binSequence[0]) except TypeError, te: binSeq = binSequence ret = '' for b in binSeq : ch = '' for c in self.charToBin : if b & self.forma[self.charToBin[c]] > 0 : ch += c +'/' if ch == '' : raise KeyError('Key %d unkowom, bad format' % b) ret += ch[:-1] return ret
python
def decode(self, binSequence): """decodes a binary sequence to return a string""" try: binSeq = iter(binSequence[0]) except TypeError, te: binSeq = binSequence ret = '' for b in binSeq : ch = '' for c in self.charToBin : if b & self.forma[self.charToBin[c]] > 0 : ch += c +'/' if ch == '' : raise KeyError('Key %d unkowom, bad format' % b) ret += ch[:-1] return ret
['def', 'decode', '(', 'self', ',', 'binSequence', ')', ':', 'try', ':', 'binSeq', '=', 'iter', '(', 'binSequence', '[', '0', ']', ')', 'except', 'TypeError', ',', 'te', ':', 'binSeq', '=', 'binSequence', 'ret', '=', "''", 'for', 'b', 'in', 'binSeq', ':', 'ch', '=', "''", 'for', 'c', 'in', 'self', '.', 'charToBin', ':', 'if', 'b', '&', 'self', '.', 'forma', '[', 'self', '.', 'charToBin', '[', 'c', ']', ']', '>', '0', ':', 'ch', '+=', 'c', '+', "'/'", 'if', 'ch', '==', "''", ':', 'raise', 'KeyError', '(', "'Key %d unkowom, bad format'", '%', 'b', ')', 'ret', '+=', 'ch', '[', ':', '-', '1', ']', 'return', 'ret']
decodes a binary sequence to return a string
['decodes', 'a', 'binary', 'sequence', 'to', 'return', 'a', 'string']
train
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/BinarySequence.py#L298-L315
3,755
uber/doubles
doubles/allowance.py
Allowance.satisfy_custom_matcher
def satisfy_custom_matcher(self, args, kwargs): """Return a boolean indicating if the args satisfy the stub :return: Whether or not the stub accepts the provided arguments. :rtype: bool """ if not self._custom_matcher: return False try: return self._custom_matcher(*args, **kwargs) except Exception: return False
python
def satisfy_custom_matcher(self, args, kwargs): """Return a boolean indicating if the args satisfy the stub :return: Whether or not the stub accepts the provided arguments. :rtype: bool """ if not self._custom_matcher: return False try: return self._custom_matcher(*args, **kwargs) except Exception: return False
['def', 'satisfy_custom_matcher', '(', 'self', ',', 'args', ',', 'kwargs', ')', ':', 'if', 'not', 'self', '.', '_custom_matcher', ':', 'return', 'False', 'try', ':', 'return', 'self', '.', '_custom_matcher', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', ':', 'return', 'False']
Return a boolean indicating if the args satisfy the stub :return: Whether or not the stub accepts the provided arguments. :rtype: bool
['Return', 'a', 'boolean', 'indicating', 'if', 'the', 'args', 'satisfy', 'the', 'stub']
train
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/allowance.py#L235-L246
3,756
idlesign/django-sitegate
sitegate/flows_base.py
FlowsBase.get_arg_or_attr
def get_arg_or_attr(self, name, default=None): """Returns flow argument, as provided with sitegate decorators or attribute set as a flow class attribute or default.""" if name in self.flow_args: return self.flow_args[name] try: return getattr(self, name) except AttributeError: return default
python
def get_arg_or_attr(self, name, default=None): """Returns flow argument, as provided with sitegate decorators or attribute set as a flow class attribute or default.""" if name in self.flow_args: return self.flow_args[name] try: return getattr(self, name) except AttributeError: return default
['def', 'get_arg_or_attr', '(', 'self', ',', 'name', ',', 'default', '=', 'None', ')', ':', 'if', 'name', 'in', 'self', '.', 'flow_args', ':', 'return', 'self', '.', 'flow_args', '[', 'name', ']', 'try', ':', 'return', 'getattr', '(', 'self', ',', 'name', ')', 'except', 'AttributeError', ':', 'return', 'default']
Returns flow argument, as provided with sitegate decorators or attribute set as a flow class attribute or default.
['Returns', 'flow', 'argument', 'as', 'provided', 'with', 'sitegate', 'decorators', 'or', 'attribute', 'set', 'as', 'a', 'flow', 'class', 'attribute', 'or', 'default', '.']
train
https://github.com/idlesign/django-sitegate/blob/0e58de91605071833d75a7c21f2d0de2f2e3c896/sitegate/flows_base.py#L90-L98
3,757
nicolargo/glances
glances/plugins/glances_network.py
Plugin.update_views
def update_views(self): """Update stats views.""" # Call the father's method super(Plugin, self).update_views() # Add specifics informations # Alert for i in self.stats: ifrealname = i['interface_name'].split(':')[0] # Convert rate in bps ( to be able to compare to interface speed) bps_rx = int(i['rx'] // i['time_since_update'] * 8) bps_tx = int(i['tx'] // i['time_since_update'] * 8) # Decorate the bitrate with the configuration file thresolds alert_rx = self.get_alert(bps_rx, header=ifrealname + '_rx') alert_tx = self.get_alert(bps_tx, header=ifrealname + '_tx') # If nothing is define in the configuration file... # ... then use the interface speed (not available on all systems) if alert_rx == 'DEFAULT' and 'speed' in i and i['speed'] != 0: alert_rx = self.get_alert(current=bps_rx, maximum=i['speed'], header='rx') if alert_tx == 'DEFAULT' and 'speed' in i and i['speed'] != 0: alert_tx = self.get_alert(current=bps_tx, maximum=i['speed'], header='tx') # then decorates self.views[i[self.get_key()]]['rx']['decoration'] = alert_rx self.views[i[self.get_key()]]['tx']['decoration'] = alert_tx
python
def update_views(self): """Update stats views.""" # Call the father's method super(Plugin, self).update_views() # Add specifics informations # Alert for i in self.stats: ifrealname = i['interface_name'].split(':')[0] # Convert rate in bps ( to be able to compare to interface speed) bps_rx = int(i['rx'] // i['time_since_update'] * 8) bps_tx = int(i['tx'] // i['time_since_update'] * 8) # Decorate the bitrate with the configuration file thresolds alert_rx = self.get_alert(bps_rx, header=ifrealname + '_rx') alert_tx = self.get_alert(bps_tx, header=ifrealname + '_tx') # If nothing is define in the configuration file... # ... then use the interface speed (not available on all systems) if alert_rx == 'DEFAULT' and 'speed' in i and i['speed'] != 0: alert_rx = self.get_alert(current=bps_rx, maximum=i['speed'], header='rx') if alert_tx == 'DEFAULT' and 'speed' in i and i['speed'] != 0: alert_tx = self.get_alert(current=bps_tx, maximum=i['speed'], header='tx') # then decorates self.views[i[self.get_key()]]['rx']['decoration'] = alert_rx self.views[i[self.get_key()]]['tx']['decoration'] = alert_tx
['def', 'update_views', '(', 'self', ')', ':', "# Call the father's method", 'super', '(', 'Plugin', ',', 'self', ')', '.', 'update_views', '(', ')', '# Add specifics informations', '# Alert', 'for', 'i', 'in', 'self', '.', 'stats', ':', 'ifrealname', '=', 'i', '[', "'interface_name'", ']', '.', 'split', '(', "':'", ')', '[', '0', ']', '# Convert rate in bps ( to be able to compare to interface speed)', 'bps_rx', '=', 'int', '(', 'i', '[', "'rx'", ']', '//', 'i', '[', "'time_since_update'", ']', '*', '8', ')', 'bps_tx', '=', 'int', '(', 'i', '[', "'tx'", ']', '//', 'i', '[', "'time_since_update'", ']', '*', '8', ')', '# Decorate the bitrate with the configuration file thresolds', 'alert_rx', '=', 'self', '.', 'get_alert', '(', 'bps_rx', ',', 'header', '=', 'ifrealname', '+', "'_rx'", ')', 'alert_tx', '=', 'self', '.', 'get_alert', '(', 'bps_tx', ',', 'header', '=', 'ifrealname', '+', "'_tx'", ')', '# If nothing is define in the configuration file...', '# ... then use the interface speed (not available on all systems)', 'if', 'alert_rx', '==', "'DEFAULT'", 'and', "'speed'", 'in', 'i', 'and', 'i', '[', "'speed'", ']', '!=', '0', ':', 'alert_rx', '=', 'self', '.', 'get_alert', '(', 'current', '=', 'bps_rx', ',', 'maximum', '=', 'i', '[', "'speed'", ']', ',', 'header', '=', "'rx'", ')', 'if', 'alert_tx', '==', "'DEFAULT'", 'and', "'speed'", 'in', 'i', 'and', 'i', '[', "'speed'", ']', '!=', '0', ':', 'alert_tx', '=', 'self', '.', 'get_alert', '(', 'current', '=', 'bps_tx', ',', 'maximum', '=', 'i', '[', "'speed'", ']', ',', 'header', '=', "'tx'", ')', '# then decorates', 'self', '.', 'views', '[', 'i', '[', 'self', '.', 'get_key', '(', ')', ']', ']', '[', "'rx'", ']', '[', "'decoration'", ']', '=', 'alert_rx', 'self', '.', 'views', '[', 'i', '[', 'self', '.', 'get_key', '(', ')', ']', ']', '[', "'tx'", ']', '[', "'decoration'", ']', '=', 'alert_tx']
Update stats views.
['Update', 'stats', 'views', '.']
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_network.py#L219-L246
3,758
bjodah/pycodeexport
pycodeexport/codeexport.py
Generic_Code.mod
def mod(self): """ Cached compiled binary of the Generic_Code class. To clear cache invoke :meth:`clear_mod_cache`. """ if self._mod is None: self._mod = self.compile_and_import_binary() return self._mod
python
def mod(self): """ Cached compiled binary of the Generic_Code class. To clear cache invoke :meth:`clear_mod_cache`. """ if self._mod is None: self._mod = self.compile_and_import_binary() return self._mod
['def', 'mod', '(', 'self', ')', ':', 'if', 'self', '.', '_mod', 'is', 'None', ':', 'self', '.', '_mod', '=', 'self', '.', 'compile_and_import_binary', '(', ')', 'return', 'self', '.', '_mod']
Cached compiled binary of the Generic_Code class. To clear cache invoke :meth:`clear_mod_cache`.
['Cached', 'compiled', 'binary', 'of', 'the', 'Generic_Code', 'class', '.']
train
https://github.com/bjodah/pycodeexport/blob/7d1d733745ea4e54fdcee8f16fea313794a4c11b/pycodeexport/codeexport.py#L300-L307
3,759
equinor/segyio
python/segyio/segy.py
SegyFile.xline
def xline(self): """ Interact with segy in crossline mode Returns ------- xline : Line or None Raises ------ ValueError If the file is unstructured Notes ----- .. versionadded:: 1.1 """ if self.unstructured: raise ValueError(self._unstructured_errmsg) if self._xline is not None: return self._xline self._xline = Line(self, self.xlines, self._xline_length, self._xline_stride, self.offsets, 'crossline', ) return self._xline
python
def xline(self): """ Interact with segy in crossline mode Returns ------- xline : Line or None Raises ------ ValueError If the file is unstructured Notes ----- .. versionadded:: 1.1 """ if self.unstructured: raise ValueError(self._unstructured_errmsg) if self._xline is not None: return self._xline self._xline = Line(self, self.xlines, self._xline_length, self._xline_stride, self.offsets, 'crossline', ) return self._xline
['def', 'xline', '(', 'self', ')', ':', 'if', 'self', '.', 'unstructured', ':', 'raise', 'ValueError', '(', 'self', '.', '_unstructured_errmsg', ')', 'if', 'self', '.', '_xline', 'is', 'not', 'None', ':', 'return', 'self', '.', '_xline', 'self', '.', '_xline', '=', 'Line', '(', 'self', ',', 'self', '.', 'xlines', ',', 'self', '.', '_xline_length', ',', 'self', '.', '_xline_stride', ',', 'self', '.', 'offsets', ',', "'crossline'", ',', ')', 'return', 'self', '.', '_xline']
Interact with segy in crossline mode Returns ------- xline : Line or None Raises ------ ValueError If the file is unstructured Notes ----- .. versionadded:: 1.1
['Interact', 'with', 'segy', 'in', 'crossline', 'mode']
train
https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/segy.py#L573-L603
3,760
vtkiorg/vtki
vtki/plotting.py
BasePlotter.update_coordinates
def update_coordinates(self, points, mesh=None, render=True): """ Updates the points of the an object in the plotter. Parameters ---------- points : np.ndarray Points to replace existing points. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Forces an update to the render window. Default True. """ if mesh is None: mesh = self.mesh mesh.points = points if render: self._render()
python
def update_coordinates(self, points, mesh=None, render=True): """ Updates the points of the an object in the plotter. Parameters ---------- points : np.ndarray Points to replace existing points. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Forces an update to the render window. Default True. """ if mesh is None: mesh = self.mesh mesh.points = points if render: self._render()
['def', 'update_coordinates', '(', 'self', ',', 'points', ',', 'mesh', '=', 'None', ',', 'render', '=', 'True', ')', ':', 'if', 'mesh', 'is', 'None', ':', 'mesh', '=', 'self', '.', 'mesh', 'mesh', '.', 'points', '=', 'points', 'if', 'render', ':', 'self', '.', '_render', '(', ')']
Updates the points of the an object in the plotter. Parameters ---------- points : np.ndarray Points to replace existing points. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Forces an update to the render window. Default True.
['Updates', 'the', 'points', 'of', 'the', 'an', 'object', 'in', 'the', 'plotter', '.']
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L1869-L1892
3,761
OSSOS/MOP
src/ossos/core/ossos/downloads/cutouts/source.py
SourceCutout.reset_coord
def reset_coord(self): """ Reset the source location based on the init_skycoord values @return: """ (x, y, idx) = self.world2pix(self.init_skycoord.ra, self.init_skycoord.dec, usepv=True) self.update_pixel_location((x, y), idx)
python
def reset_coord(self): """ Reset the source location based on the init_skycoord values @return: """ (x, y, idx) = self.world2pix(self.init_skycoord.ra, self.init_skycoord.dec, usepv=True) self.update_pixel_location((x, y), idx)
['def', 'reset_coord', '(', 'self', ')', ':', '(', 'x', ',', 'y', ',', 'idx', ')', '=', 'self', '.', 'world2pix', '(', 'self', '.', 'init_skycoord', '.', 'ra', ',', 'self', '.', 'init_skycoord', '.', 'dec', ',', 'usepv', '=', 'True', ')', 'self', '.', 'update_pixel_location', '(', '(', 'x', ',', 'y', ')', ',', 'idx', ')']
Reset the source location based on the init_skycoord values @return:
['Reset', 'the', 'source', 'location', 'based', 'on', 'the', 'init_skycoord', 'values']
train
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/downloads/cutouts/source.py#L63-L71
3,762
svasilev94/GraphLibrary
graphlibrary/graph.py
Graph.remove_vertex
def remove_vertex(self, vertex): """ Remove vertex from G """ try: self.vertices.pop(vertex) except KeyError: raise GraphInsertError("Vertex %s doesn't exist." % (vertex,)) if vertex in self.nodes: self.nodes.pop(vertex) for element in self.vertices: if vertex in self.vertices[element]: self.vertices[element].remove(vertex) edges = [] # List for edges that include vertex for element in self.edges: if vertex in element: edges.append(element) for element in edges: del self.edges[element]
python
def remove_vertex(self, vertex): """ Remove vertex from G """ try: self.vertices.pop(vertex) except KeyError: raise GraphInsertError("Vertex %s doesn't exist." % (vertex,)) if vertex in self.nodes: self.nodes.pop(vertex) for element in self.vertices: if vertex in self.vertices[element]: self.vertices[element].remove(vertex) edges = [] # List for edges that include vertex for element in self.edges: if vertex in element: edges.append(element) for element in edges: del self.edges[element]
['def', 'remove_vertex', '(', 'self', ',', 'vertex', ')', ':', 'try', ':', 'self', '.', 'vertices', '.', 'pop', '(', 'vertex', ')', 'except', 'KeyError', ':', 'raise', 'GraphInsertError', '(', '"Vertex %s doesn\'t exist."', '%', '(', 'vertex', ',', ')', ')', 'if', 'vertex', 'in', 'self', '.', 'nodes', ':', 'self', '.', 'nodes', '.', 'pop', '(', 'vertex', ')', 'for', 'element', 'in', 'self', '.', 'vertices', ':', 'if', 'vertex', 'in', 'self', '.', 'vertices', '[', 'element', ']', ':', 'self', '.', 'vertices', '[', 'element', ']', '.', 'remove', '(', 'vertex', ')', 'edges', '=', '[', ']', '# List for edges that include vertex\r', 'for', 'element', 'in', 'self', '.', 'edges', ':', 'if', 'vertex', 'in', 'element', ':', 'edges', '.', 'append', '(', 'element', ')', 'for', 'element', 'in', 'edges', ':', 'del', 'self', '.', 'edges', '[', 'element', ']']
Remove vertex from G
['Remove', 'vertex', 'from', 'G']
train
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/graph.py#L72-L90
3,763
blockstack/blockstack-core
blockstack/lib/config.py
load_configuration
def load_configuration(working_dir): """ Load the system configuration and set global variables Return the configuration of the node on success. Return None on failure """ import nameset.virtualchain_hooks as virtualchain_hooks # acquire configuration, and store it globally opts = configure(working_dir) blockstack_opts = opts.get('blockstack', None) blockstack_api_opts = opts.get('blockstack-api', None) bitcoin_opts = opts['bitcoind'] # config file version check config_server_version = blockstack_opts.get('server_version', None) if (config_server_version is None or versions_need_upgrade(config_server_version, VERSION)): print >> sys.stderr, "Obsolete or unrecognizable config file ({}): '{}' != '{}'".format(virtualchain.get_config_filename(virtualchain_hooks, working_dir), config_server_version, VERSION) print >> sys.stderr, 'Please see the release notes for version {} for instructions to upgrade (in the release-notes/ folder).'.format(VERSION) return None # store options set_bitcoin_opts( bitcoin_opts ) set_blockstack_opts( blockstack_opts ) set_blockstack_api_opts( blockstack_api_opts ) return { 'bitcoind': bitcoin_opts, 'blockstack': blockstack_opts, 'blockstack-api': blockstack_api_opts }
python
def load_configuration(working_dir): """ Load the system configuration and set global variables Return the configuration of the node on success. Return None on failure """ import nameset.virtualchain_hooks as virtualchain_hooks # acquire configuration, and store it globally opts = configure(working_dir) blockstack_opts = opts.get('blockstack', None) blockstack_api_opts = opts.get('blockstack-api', None) bitcoin_opts = opts['bitcoind'] # config file version check config_server_version = blockstack_opts.get('server_version', None) if (config_server_version is None or versions_need_upgrade(config_server_version, VERSION)): print >> sys.stderr, "Obsolete or unrecognizable config file ({}): '{}' != '{}'".format(virtualchain.get_config_filename(virtualchain_hooks, working_dir), config_server_version, VERSION) print >> sys.stderr, 'Please see the release notes for version {} for instructions to upgrade (in the release-notes/ folder).'.format(VERSION) return None # store options set_bitcoin_opts( bitcoin_opts ) set_blockstack_opts( blockstack_opts ) set_blockstack_api_opts( blockstack_api_opts ) return { 'bitcoind': bitcoin_opts, 'blockstack': blockstack_opts, 'blockstack-api': blockstack_api_opts }
['def', 'load_configuration', '(', 'working_dir', ')', ':', 'import', 'nameset', '.', 'virtualchain_hooks', 'as', 'virtualchain_hooks', '# acquire configuration, and store it globally', 'opts', '=', 'configure', '(', 'working_dir', ')', 'blockstack_opts', '=', 'opts', '.', 'get', '(', "'blockstack'", ',', 'None', ')', 'blockstack_api_opts', '=', 'opts', '.', 'get', '(', "'blockstack-api'", ',', 'None', ')', 'bitcoin_opts', '=', 'opts', '[', "'bitcoind'", ']', '# config file version check', 'config_server_version', '=', 'blockstack_opts', '.', 'get', '(', "'server_version'", ',', 'None', ')', 'if', '(', 'config_server_version', 'is', 'None', 'or', 'versions_need_upgrade', '(', 'config_server_version', ',', 'VERSION', ')', ')', ':', 'print', '>>', 'sys', '.', 'stderr', ',', '"Obsolete or unrecognizable config file ({}): \'{}\' != \'{}\'"', '.', 'format', '(', 'virtualchain', '.', 'get_config_filename', '(', 'virtualchain_hooks', ',', 'working_dir', ')', ',', 'config_server_version', ',', 'VERSION', ')', 'print', '>>', 'sys', '.', 'stderr', ',', "'Please see the release notes for version {} for instructions to upgrade (in the release-notes/ folder).'", '.', 'format', '(', 'VERSION', ')', 'return', 'None', '# store options', 'set_bitcoin_opts', '(', 'bitcoin_opts', ')', 'set_blockstack_opts', '(', 'blockstack_opts', ')', 'set_blockstack_api_opts', '(', 'blockstack_api_opts', ')', 'return', '{', "'bitcoind'", ':', 'bitcoin_opts', ',', "'blockstack'", ':', 'blockstack_opts', ',', "'blockstack-api'", ':', 'blockstack_api_opts', '}']
Load the system configuration and set global variables Return the configuration of the node on success. Return None on failure
['Load', 'the', 'system', 'configuration', 'and', 'set', 'global', 'variables', 'Return', 'the', 'configuration', 'of', 'the', 'node', 'on', 'success', '.', 'Return', 'None', 'on', 'failure']
train
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/config.py#L2075-L2106
3,764
google/tangent
tangent/reverse_ad.py
joint
def joint(node): """Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint. """ node, _, _ = _fix(node) body = node.body[0].body[:-1] + node.body[1].body func = gast.Module(body=[gast.FunctionDef( name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)]) # Clean up anno.clearanno(func) return func
python
def joint(node): """Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint. """ node, _, _ = _fix(node) body = node.body[0].body[:-1] + node.body[1].body func = gast.Module(body=[gast.FunctionDef( name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)]) # Clean up anno.clearanno(func) return func
['def', 'joint', '(', 'node', ')', ':', 'node', ',', '_', ',', '_', '=', '_fix', '(', 'node', ')', 'body', '=', 'node', '.', 'body', '[', '0', ']', '.', 'body', '[', ':', '-', '1', ']', '+', 'node', '.', 'body', '[', '1', ']', '.', 'body', 'func', '=', 'gast', '.', 'Module', '(', 'body', '=', '[', 'gast', '.', 'FunctionDef', '(', 'name', '=', 'node', '.', 'body', '[', '0', ']', '.', 'name', ',', 'args', '=', 'node', '.', 'body', '[', '1', ']', '.', 'args', ',', 'body', '=', 'body', ',', 'decorator_list', '=', '[', ']', ',', 'returns', '=', 'None', ')', ']', ')', '# Clean up', 'anno', '.', 'clearanno', '(', 'func', ')', 'return', 'func']
Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint.
['Merge', 'the', 'bodies', 'of', 'primal', 'and', 'adjoint', 'into', 'a', 'single', 'function', '.']
train
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/reverse_ad.py#L945-L963
3,765
kashifrazzaqui/json-streamer
jsonstreamer/tape.py
Tape.read
def read(self, size:int=None): """ :param size: number of characters to read from the buffer :return: string that has been read from the buffer """ if size: result = self._buffer[0:size] self._buffer = self._buffer[size:] return result else: result = self._buffer self._buffer = '' return result
python
def read(self, size:int=None): """ :param size: number of characters to read from the buffer :return: string that has been read from the buffer """ if size: result = self._buffer[0:size] self._buffer = self._buffer[size:] return result else: result = self._buffer self._buffer = '' return result
['def', 'read', '(', 'self', ',', 'size', ':', 'int', '=', 'None', ')', ':', 'if', 'size', ':', 'result', '=', 'self', '.', '_buffer', '[', '0', ':', 'size', ']', 'self', '.', '_buffer', '=', 'self', '.', '_buffer', '[', 'size', ':', ']', 'return', 'result', 'else', ':', 'result', '=', 'self', '.', '_buffer', 'self', '.', '_buffer', '=', "''", 'return', 'result']
:param size: number of characters to read from the buffer :return: string that has been read from the buffer
[':', 'param', 'size', ':', 'number', 'of', 'characters', 'to', 'read', 'from', 'the', 'buffer', ':', 'return', ':', 'string', 'that', 'has', 'been', 'read', 'from', 'the', 'buffer']
train
https://github.com/kashifrazzaqui/json-streamer/blob/f87527d57557d11682c12727a1a4eeda9cca3c8f/jsonstreamer/tape.py#L13-L25
3,766
audreyr/cookiecutter
cookiecutter/utils.py
force_delete
def force_delete(func, path, exc_info): """Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/1889597 """ os.chmod(path, stat.S_IWRITE) func(path)
python
def force_delete(func, path, exc_info): """Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/1889597 """ os.chmod(path, stat.S_IWRITE) func(path)
['def', 'force_delete', '(', 'func', ',', 'path', ',', 'exc_info', ')', ':', 'os', '.', 'chmod', '(', 'path', ',', 'stat', '.', 'S_IWRITE', ')', 'func', '(', 'path', ')']
Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/1889597
['Error', 'handler', 'for', 'shutil', '.', 'rmtree', '()', 'equivalent', 'to', 'rm', '-', 'rf', '.']
train
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/utils.py#L19-L26
3,767
hydpy-dev/hydpy
hydpy/core/masktools.py
IndexMask.relevantindices
def relevantindices(self) -> List[int]: """A |list| of all currently relevant indices, calculated as an intercection of the (constant) class attribute `RELEVANT_VALUES` and the (variable) property |IndexMask.refindices|.""" return [idx for idx in numpy.unique(self.refindices.values) if idx in self.RELEVANT_VALUES]
python
def relevantindices(self) -> List[int]: """A |list| of all currently relevant indices, calculated as an intercection of the (constant) class attribute `RELEVANT_VALUES` and the (variable) property |IndexMask.refindices|.""" return [idx for idx in numpy.unique(self.refindices.values) if idx in self.RELEVANT_VALUES]
['def', 'relevantindices', '(', 'self', ')', '->', 'List', '[', 'int', ']', ':', 'return', '[', 'idx', 'for', 'idx', 'in', 'numpy', '.', 'unique', '(', 'self', '.', 'refindices', '.', 'values', ')', 'if', 'idx', 'in', 'self', '.', 'RELEVANT_VALUES', ']']
A |list| of all currently relevant indices, calculated as an intercection of the (constant) class attribute `RELEVANT_VALUES` and the (variable) property |IndexMask.refindices|.
['A', '|list|', 'of', 'all', 'currently', 'relevant', 'indices', 'calculated', 'as', 'an', 'intercection', 'of', 'the', '(', 'constant', ')', 'class', 'attribute', 'RELEVANT_VALUES', 'and', 'the', '(', 'variable', ')', 'property', '|IndexMask', '.', 'refindices|', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/masktools.py#L220-L225
3,768
amanusk/s-tui
s_tui/s_tui.py
GraphView.update_displayed_information
def update_displayed_information(self): """ Update all the graphs that are being displayed """ for source in self.controller.sources: source_name = source.get_source_name() if (any(self.graphs_menu.active_sensors[source_name]) or any(self.summary_menu.active_sensors[source_name])): source.update() for graph in self.visible_graphs.values(): graph.update() # update graph summery for summary in self.visible_summaries.values(): summary.update() # Only update clock if not is stress mode if self.controller.stress_conroller.get_current_mode() != 'Monitor': self.clock_view.set_text(seconds_to_text( (timeit.default_timer() - self.controller.stress_start_time)))
python
def update_displayed_information(self): """ Update all the graphs that are being displayed """ for source in self.controller.sources: source_name = source.get_source_name() if (any(self.graphs_menu.active_sensors[source_name]) or any(self.summary_menu.active_sensors[source_name])): source.update() for graph in self.visible_graphs.values(): graph.update() # update graph summery for summary in self.visible_summaries.values(): summary.update() # Only update clock if not is stress mode if self.controller.stress_conroller.get_current_mode() != 'Monitor': self.clock_view.set_text(seconds_to_text( (timeit.default_timer() - self.controller.stress_start_time)))
['def', 'update_displayed_information', '(', 'self', ')', ':', 'for', 'source', 'in', 'self', '.', 'controller', '.', 'sources', ':', 'source_name', '=', 'source', '.', 'get_source_name', '(', ')', 'if', '(', 'any', '(', 'self', '.', 'graphs_menu', '.', 'active_sensors', '[', 'source_name', ']', ')', 'or', 'any', '(', 'self', '.', 'summary_menu', '.', 'active_sensors', '[', 'source_name', ']', ')', ')', ':', 'source', '.', 'update', '(', ')', 'for', 'graph', 'in', 'self', '.', 'visible_graphs', '.', 'values', '(', ')', ':', 'graph', '.', 'update', '(', ')', '# update graph summery', 'for', 'summary', 'in', 'self', '.', 'visible_summaries', '.', 'values', '(', ')', ':', 'summary', '.', 'update', '(', ')', '# Only update clock if not is stress mode', 'if', 'self', '.', 'controller', '.', 'stress_conroller', '.', 'get_current_mode', '(', ')', '!=', "'Monitor'", ':', 'self', '.', 'clock_view', '.', 'set_text', '(', 'seconds_to_text', '(', '(', 'timeit', '.', 'default_timer', '(', ')', '-', 'self', '.', 'controller', '.', 'stress_start_time', ')', ')', ')']
Update all the graphs that are being displayed
['Update', 'all', 'the', 'graphs', 'that', 'are', 'being', 'displayed']
train
https://github.com/amanusk/s-tui/blob/5e89d15081e716024db28ec03b1e3a7710330951/s_tui/s_tui.py#L235-L254
3,769
ethereum/py-evm
eth/vm/stack.py
Stack.swap
def swap(self, position: int) -> None: """ Perform a SWAP operation on the stack. """ idx = -1 * position - 1 try: self.values[-1], self.values[idx] = self.values[idx], self.values[-1] except IndexError: raise InsufficientStack("Insufficient stack items for SWAP{0}".format(position))
python
def swap(self, position: int) -> None: """ Perform a SWAP operation on the stack. """ idx = -1 * position - 1 try: self.values[-1], self.values[idx] = self.values[idx], self.values[-1] except IndexError: raise InsufficientStack("Insufficient stack items for SWAP{0}".format(position))
['def', 'swap', '(', 'self', ',', 'position', ':', 'int', ')', '->', 'None', ':', 'idx', '=', '-', '1', '*', 'position', '-', '1', 'try', ':', 'self', '.', 'values', '[', '-', '1', ']', ',', 'self', '.', 'values', '[', 'idx', ']', '=', 'self', '.', 'values', '[', 'idx', ']', ',', 'self', '.', 'values', '[', '-', '1', ']', 'except', 'IndexError', ':', 'raise', 'InsufficientStack', '(', '"Insufficient stack items for SWAP{0}"', '.', 'format', '(', 'position', ')', ')']
Perform a SWAP operation on the stack.
['Perform', 'a', 'SWAP', 'operation', 'on', 'the', 'stack', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/stack.py#L89-L97
3,770
GetBlimp/django-rest-framework-jwt
rest_framework_jwt/authentication.py
BaseJSONWebTokenAuthentication.authenticate_credentials
def authenticate_credentials(self, payload): """ Returns an active user that matches the payload's user id and email. """ User = get_user_model() username = jwt_get_username_from_payload(payload) if not username: msg = _('Invalid payload.') raise exceptions.AuthenticationFailed(msg) try: user = User.objects.get_by_natural_key(username) except User.DoesNotExist: msg = _('Invalid signature.') raise exceptions.AuthenticationFailed(msg) if not user.is_active: msg = _('User account is disabled.') raise exceptions.AuthenticationFailed(msg) return user
python
def authenticate_credentials(self, payload): """ Returns an active user that matches the payload's user id and email. """ User = get_user_model() username = jwt_get_username_from_payload(payload) if not username: msg = _('Invalid payload.') raise exceptions.AuthenticationFailed(msg) try: user = User.objects.get_by_natural_key(username) except User.DoesNotExist: msg = _('Invalid signature.') raise exceptions.AuthenticationFailed(msg) if not user.is_active: msg = _('User account is disabled.') raise exceptions.AuthenticationFailed(msg) return user
['def', 'authenticate_credentials', '(', 'self', ',', 'payload', ')', ':', 'User', '=', 'get_user_model', '(', ')', 'username', '=', 'jwt_get_username_from_payload', '(', 'payload', ')', 'if', 'not', 'username', ':', 'msg', '=', '_', '(', "'Invalid payload.'", ')', 'raise', 'exceptions', '.', 'AuthenticationFailed', '(', 'msg', ')', 'try', ':', 'user', '=', 'User', '.', 'objects', '.', 'get_by_natural_key', '(', 'username', ')', 'except', 'User', '.', 'DoesNotExist', ':', 'msg', '=', '_', '(', "'Invalid signature.'", ')', 'raise', 'exceptions', '.', 'AuthenticationFailed', '(', 'msg', ')', 'if', 'not', 'user', '.', 'is_active', ':', 'msg', '=', '_', '(', "'User account is disabled.'", ')', 'raise', 'exceptions', '.', 'AuthenticationFailed', '(', 'msg', ')', 'return', 'user']
Returns an active user that matches the payload's user id and email.
['Returns', 'an', 'active', 'user', 'that', 'matches', 'the', 'payload', 's', 'user', 'id', 'and', 'email', '.']
train
https://github.com/GetBlimp/django-rest-framework-jwt/blob/0a0bd402ec21fd6b9a5f715d114411836fbb2923/rest_framework_jwt/authentication.py#L47-L68
3,771
edibledinos/pwnypack
pwnypack/codec.py
deurlform_app
def deurlform_app(parser, cmd, args): # pragma: no cover """ decode a query string into its key value pairs. """ parser.add_argument('value', help='the query string to decode') args = parser.parse_args(args) return ' '.join('%s=%s' % (key, value) for key, values in deurlform(args.value).items() for value in values)
python
def deurlform_app(parser, cmd, args): # pragma: no cover """ decode a query string into its key value pairs. """ parser.add_argument('value', help='the query string to decode') args = parser.parse_args(args) return ' '.join('%s=%s' % (key, value) for key, values in deurlform(args.value).items() for value in values)
['def', 'deurlform_app', '(', 'parser', ',', 'cmd', ',', 'args', ')', ':', '# pragma: no cover', 'parser', '.', 'add_argument', '(', "'value'", ',', 'help', '=', "'the query string to decode'", ')', 'args', '=', 'parser', '.', 'parse_args', '(', 'args', ')', 'return', "' '", '.', 'join', '(', "'%s=%s'", '%', '(', 'key', ',', 'value', ')', 'for', 'key', ',', 'values', 'in', 'deurlform', '(', 'args', '.', 'value', ')', '.', 'items', '(', ')', 'for', 'value', 'in', 'values', ')']
decode a query string into its key value pairs.
['decode', 'a', 'query', 'string', 'into', 'its', 'key', 'value', 'pairs', '.']
train
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/codec.py#L533-L540
3,772
inonit/drf-haystack
drf_haystack/query.py
FilterQueryBuilder.build_query
def build_query(self, **filters): """ Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields that have been "registered" in `view.fields`. Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any querystring parameters that are not registered in `view.fields` will be ignored. :param dict[str, list[str]] filters: is an expanded QueryDict or a mapping of keys to a list of parameters. """ applicable_filters = [] applicable_exclusions = [] for param, value in filters.items(): excluding_term = False param_parts = param.split("__") base_param = param_parts[0] # only test against field without lookup negation_keyword = constants.DRF_HAYSTACK_NEGATION_KEYWORD if len(param_parts) > 1 and param_parts[1] == negation_keyword: excluding_term = True param = param.replace("__%s" % negation_keyword, "") # haystack wouldn't understand our negation if self.view.serializer_class: if hasattr(self.view.serializer_class.Meta, 'field_aliases'): old_base = base_param base_param = self.view.serializer_class.Meta.field_aliases.get(base_param, base_param) param = param.replace(old_base, base_param) # need to replace the alias fields = getattr(self.view.serializer_class.Meta, 'fields', []) exclude = getattr(self.view.serializer_class.Meta, 'exclude', []) search_fields = getattr(self.view.serializer_class.Meta, 'search_fields', []) # Skip if the parameter is not listed in the serializer's `fields` # or if it's in the `exclude` list. if ((fields or search_fields) and base_param not in chain(fields, search_fields)) or base_param in exclude or not value: continue field_queries = [] if len(param_parts) > 1 and param_parts[-1] in ('in', 'range'): # `in` and `range` filters expects a list of values field_queries.append(self.view.query_object((param, list(self.tokenize(value, self.view.lookup_sep))))) else: for token in self.tokenize(value, self.view.lookup_sep): field_queries.append(self.view.query_object((param, token))) field_queries = [fq for fq in field_queries if fq] if len(field_queries) > 0: term = six.moves.reduce(operator.or_, field_queries) if excluding_term: applicable_exclusions.append(term) else: applicable_filters.append(term) applicable_filters = six.moves.reduce( self.default_operator, filter(lambda x: x, applicable_filters)) if applicable_filters else [] applicable_exclusions = six.moves.reduce( self.default_operator, filter(lambda x: x, applicable_exclusions)) if applicable_exclusions else [] return applicable_filters, applicable_exclusions
python
def build_query(self, **filters): """ Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields that have been "registered" in `view.fields`. Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any querystring parameters that are not registered in `view.fields` will be ignored. :param dict[str, list[str]] filters: is an expanded QueryDict or a mapping of keys to a list of parameters. """ applicable_filters = [] applicable_exclusions = [] for param, value in filters.items(): excluding_term = False param_parts = param.split("__") base_param = param_parts[0] # only test against field without lookup negation_keyword = constants.DRF_HAYSTACK_NEGATION_KEYWORD if len(param_parts) > 1 and param_parts[1] == negation_keyword: excluding_term = True param = param.replace("__%s" % negation_keyword, "") # haystack wouldn't understand our negation if self.view.serializer_class: if hasattr(self.view.serializer_class.Meta, 'field_aliases'): old_base = base_param base_param = self.view.serializer_class.Meta.field_aliases.get(base_param, base_param) param = param.replace(old_base, base_param) # need to replace the alias fields = getattr(self.view.serializer_class.Meta, 'fields', []) exclude = getattr(self.view.serializer_class.Meta, 'exclude', []) search_fields = getattr(self.view.serializer_class.Meta, 'search_fields', []) # Skip if the parameter is not listed in the serializer's `fields` # or if it's in the `exclude` list. if ((fields or search_fields) and base_param not in chain(fields, search_fields)) or base_param in exclude or not value: continue field_queries = [] if len(param_parts) > 1 and param_parts[-1] in ('in', 'range'): # `in` and `range` filters expects a list of values field_queries.append(self.view.query_object((param, list(self.tokenize(value, self.view.lookup_sep))))) else: for token in self.tokenize(value, self.view.lookup_sep): field_queries.append(self.view.query_object((param, token))) field_queries = [fq for fq in field_queries if fq] if len(field_queries) > 0: term = six.moves.reduce(operator.or_, field_queries) if excluding_term: applicable_exclusions.append(term) else: applicable_filters.append(term) applicable_filters = six.moves.reduce( self.default_operator, filter(lambda x: x, applicable_filters)) if applicable_filters else [] applicable_exclusions = six.moves.reduce( self.default_operator, filter(lambda x: x, applicable_exclusions)) if applicable_exclusions else [] return applicable_filters, applicable_exclusions
['def', 'build_query', '(', 'self', ',', '*', '*', 'filters', ')', ':', 'applicable_filters', '=', '[', ']', 'applicable_exclusions', '=', '[', ']', 'for', 'param', ',', 'value', 'in', 'filters', '.', 'items', '(', ')', ':', 'excluding_term', '=', 'False', 'param_parts', '=', 'param', '.', 'split', '(', '"__"', ')', 'base_param', '=', 'param_parts', '[', '0', ']', '# only test against field without lookup', 'negation_keyword', '=', 'constants', '.', 'DRF_HAYSTACK_NEGATION_KEYWORD', 'if', 'len', '(', 'param_parts', ')', '>', '1', 'and', 'param_parts', '[', '1', ']', '==', 'negation_keyword', ':', 'excluding_term', '=', 'True', 'param', '=', 'param', '.', 'replace', '(', '"__%s"', '%', 'negation_keyword', ',', '""', ')', "# haystack wouldn't understand our negation", 'if', 'self', '.', 'view', '.', 'serializer_class', ':', 'if', 'hasattr', '(', 'self', '.', 'view', '.', 'serializer_class', '.', 'Meta', ',', "'field_aliases'", ')', ':', 'old_base', '=', 'base_param', 'base_param', '=', 'self', '.', 'view', '.', 'serializer_class', '.', 'Meta', '.', 'field_aliases', '.', 'get', '(', 'base_param', ',', 'base_param', ')', 'param', '=', 'param', '.', 'replace', '(', 'old_base', ',', 'base_param', ')', '# need to replace the alias', 'fields', '=', 'getattr', '(', 'self', '.', 'view', '.', 'serializer_class', '.', 'Meta', ',', "'fields'", ',', '[', ']', ')', 'exclude', '=', 'getattr', '(', 'self', '.', 'view', '.', 'serializer_class', '.', 'Meta', ',', "'exclude'", ',', '[', ']', ')', 'search_fields', '=', 'getattr', '(', 'self', '.', 'view', '.', 'serializer_class', '.', 'Meta', ',', "'search_fields'", ',', '[', ']', ')', "# Skip if the parameter is not listed in the serializer's `fields`", "# or if it's in the `exclude` list.", 'if', '(', '(', 'fields', 'or', 'search_fields', ')', 'and', 'base_param', 'not', 'in', 'chain', '(', 'fields', ',', 'search_fields', ')', ')', 'or', 'base_param', 'in', 'exclude', 'or', 'not', 'value', ':', 'continue', 'field_queries', '=', '[', ']', 'if', 'len', '(', 'param_parts', ')', '>', '1', 'and', 'param_parts', '[', '-', '1', ']', 'in', '(', "'in'", ',', "'range'", ')', ':', '# `in` and `range` filters expects a list of values', 'field_queries', '.', 'append', '(', 'self', '.', 'view', '.', 'query_object', '(', '(', 'param', ',', 'list', '(', 'self', '.', 'tokenize', '(', 'value', ',', 'self', '.', 'view', '.', 'lookup_sep', ')', ')', ')', ')', ')', 'else', ':', 'for', 'token', 'in', 'self', '.', 'tokenize', '(', 'value', ',', 'self', '.', 'view', '.', 'lookup_sep', ')', ':', 'field_queries', '.', 'append', '(', 'self', '.', 'view', '.', 'query_object', '(', '(', 'param', ',', 'token', ')', ')', ')', 'field_queries', '=', '[', 'fq', 'for', 'fq', 'in', 'field_queries', 'if', 'fq', ']', 'if', 'len', '(', 'field_queries', ')', '>', '0', ':', 'term', '=', 'six', '.', 'moves', '.', 'reduce', '(', 'operator', '.', 'or_', ',', 'field_queries', ')', 'if', 'excluding_term', ':', 'applicable_exclusions', '.', 'append', '(', 'term', ')', 'else', ':', 'applicable_filters', '.', 'append', '(', 'term', ')', 'applicable_filters', '=', 'six', '.', 'moves', '.', 'reduce', '(', 'self', '.', 'default_operator', ',', 'filter', '(', 'lambda', 'x', ':', 'x', ',', 'applicable_filters', ')', ')', 'if', 'applicable_filters', 'else', '[', ']', 'applicable_exclusions', '=', 'six', '.', 'moves', '.', 'reduce', '(', 'self', '.', 'default_operator', ',', 'filter', '(', 'lambda', 'x', ':', 'x', ',', 'applicable_exclusions', ')', ')', 'if', 'applicable_exclusions', 'else', '[', ']', 'return', 'applicable_filters', ',', 'applicable_exclusions']
Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields that have been "registered" in `view.fields`. Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any querystring parameters that are not registered in `view.fields` will be ignored. :param dict[str, list[str]] filters: is an expanded QueryDict or a mapping of keys to a list of parameters.
['Creates', 'a', 'single', 'SQ', 'filter', 'from', 'querystring', 'parameters', 'that', 'correspond', 'to', 'the', 'SearchIndex', 'fields', 'that', 'have', 'been', 'registered', 'in', 'view', '.', 'fields', '.']
train
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/query.py#L89-L151
3,773
wummel/linkchecker
linkcheck/checker/telneturl.py
TelnetUrl.local_check
def local_check (self): """ Warn about empty host names. Else call super.local_check(). """ if not self.host: self.set_result(_("Host is empty"), valid=False) return super(TelnetUrl, self).local_check()
python
def local_check (self): """ Warn about empty host names. Else call super.local_check(). """ if not self.host: self.set_result(_("Host is empty"), valid=False) return super(TelnetUrl, self).local_check()
['def', 'local_check', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'host', ':', 'self', '.', 'set_result', '(', '_', '(', '"Host is empty"', ')', ',', 'valid', '=', 'False', ')', 'return', 'super', '(', 'TelnetUrl', ',', 'self', ')', '.', 'local_check', '(', ')']
Warn about empty host names. Else call super.local_check().
['Warn', 'about', 'empty', 'host', 'names', '.', 'Else', 'call', 'super', '.', 'local_check', '()', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/telneturl.py#L48-L55
3,774
zyga/json-schema-validator
json_schema_validator/schema.py
Schema.disallow
def disallow(self): """ Description of disallowed objects. Disallow must be a type name, a nested schema or a list of those. Type name must be one of ``string``, ``number``, ``integer``, ``boolean``, ``object``, ``array``, ``null`` or ``any``. """ value = self._schema.get("disallow", None) if value is None: return if not isinstance(value, (basestring, dict, list)): raise SchemaError( "disallow value {0!r} is not a simple type name, nested " "schema nor a list of those".format(value)) if isinstance(value, list): disallow_list = value else: disallow_list = [value] seen = set() for js_disallow in disallow_list: if isinstance(js_disallow, dict): # no nested validation here pass else: if js_disallow in seen: raise SchemaError( "disallow value {0!r} contains duplicate element" " {1!r}".format(value, js_disallow)) else: seen.add(js_disallow) if js_disallow not in ( "string", "number", "integer", "boolean", "object", "array", "null", "any"): raise SchemaError( "disallow value {0!r} is not a simple type" " name".format(js_disallow)) return disallow_list
python
def disallow(self): """ Description of disallowed objects. Disallow must be a type name, a nested schema or a list of those. Type name must be one of ``string``, ``number``, ``integer``, ``boolean``, ``object``, ``array``, ``null`` or ``any``. """ value = self._schema.get("disallow", None) if value is None: return if not isinstance(value, (basestring, dict, list)): raise SchemaError( "disallow value {0!r} is not a simple type name, nested " "schema nor a list of those".format(value)) if isinstance(value, list): disallow_list = value else: disallow_list = [value] seen = set() for js_disallow in disallow_list: if isinstance(js_disallow, dict): # no nested validation here pass else: if js_disallow in seen: raise SchemaError( "disallow value {0!r} contains duplicate element" " {1!r}".format(value, js_disallow)) else: seen.add(js_disallow) if js_disallow not in ( "string", "number", "integer", "boolean", "object", "array", "null", "any"): raise SchemaError( "disallow value {0!r} is not a simple type" " name".format(js_disallow)) return disallow_list
['def', 'disallow', '(', 'self', ')', ':', 'value', '=', 'self', '.', '_schema', '.', 'get', '(', '"disallow"', ',', 'None', ')', 'if', 'value', 'is', 'None', ':', 'return', 'if', 'not', 'isinstance', '(', 'value', ',', '(', 'basestring', ',', 'dict', ',', 'list', ')', ')', ':', 'raise', 'SchemaError', '(', '"disallow value {0!r} is not a simple type name, nested "', '"schema nor a list of those"', '.', 'format', '(', 'value', ')', ')', 'if', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'disallow_list', '=', 'value', 'else', ':', 'disallow_list', '=', '[', 'value', ']', 'seen', '=', 'set', '(', ')', 'for', 'js_disallow', 'in', 'disallow_list', ':', 'if', 'isinstance', '(', 'js_disallow', ',', 'dict', ')', ':', '# no nested validation here', 'pass', 'else', ':', 'if', 'js_disallow', 'in', 'seen', ':', 'raise', 'SchemaError', '(', '"disallow value {0!r} contains duplicate element"', '" {1!r}"', '.', 'format', '(', 'value', ',', 'js_disallow', ')', ')', 'else', ':', 'seen', '.', 'add', '(', 'js_disallow', ')', 'if', 'js_disallow', 'not', 'in', '(', '"string"', ',', '"number"', ',', '"integer"', ',', '"boolean"', ',', '"object"', ',', '"array"', ',', '"null"', ',', '"any"', ')', ':', 'raise', 'SchemaError', '(', '"disallow value {0!r} is not a simple type"', '" name"', '.', 'format', '(', 'js_disallow', ')', ')', 'return', 'disallow_list']
Description of disallowed objects. Disallow must be a type name, a nested schema or a list of those. Type name must be one of ``string``, ``number``, ``integer``, ``boolean``, ``object``, ``array``, ``null`` or ``any``.
['Description', 'of', 'disallowed', 'objects', '.']
train
https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L390-L427
3,775
resync/resync
resync/explorer.py
Explorer.explore_show_head
def explore_show_head(self, uri, check_headers=None): """Do HEAD on uri and show infomation. Will also check headers against any values specified in check_headers. """ print("HEAD %s" % (uri)) if (re.match(r'^\w+:', uri)): # Looks like a URI response = requests.head(uri) else: # Mock up response if we have a local file response = self.head_on_file(uri) print(" status: %s" % (response.status_code)) if (response.status_code == '200'): # print some of the headers for header in ['content-length', 'last-modified', 'lastmod', 'content-type', 'etag']: if header in response.headers: check_str = '' if (check_headers is not None and header in check_headers): if (response.headers[header] == check_headers[header]): check_str = ' MATCHES EXPECTED VALUE' else: check_str = ' EXPECTED %s' % ( check_headers[header]) print( " %s: %s%s" % (header, response.headers[header], check_str))
python
def explore_show_head(self, uri, check_headers=None): """Do HEAD on uri and show infomation. Will also check headers against any values specified in check_headers. """ print("HEAD %s" % (uri)) if (re.match(r'^\w+:', uri)): # Looks like a URI response = requests.head(uri) else: # Mock up response if we have a local file response = self.head_on_file(uri) print(" status: %s" % (response.status_code)) if (response.status_code == '200'): # print some of the headers for header in ['content-length', 'last-modified', 'lastmod', 'content-type', 'etag']: if header in response.headers: check_str = '' if (check_headers is not None and header in check_headers): if (response.headers[header] == check_headers[header]): check_str = ' MATCHES EXPECTED VALUE' else: check_str = ' EXPECTED %s' % ( check_headers[header]) print( " %s: %s%s" % (header, response.headers[header], check_str))
['def', 'explore_show_head', '(', 'self', ',', 'uri', ',', 'check_headers', '=', 'None', ')', ':', 'print', '(', '"HEAD %s"', '%', '(', 'uri', ')', ')', 'if', '(', 're', '.', 'match', '(', "r'^\\w+:'", ',', 'uri', ')', ')', ':', '# Looks like a URI', 'response', '=', 'requests', '.', 'head', '(', 'uri', ')', 'else', ':', '# Mock up response if we have a local file', 'response', '=', 'self', '.', 'head_on_file', '(', 'uri', ')', 'print', '(', '" status: %s"', '%', '(', 'response', '.', 'status_code', ')', ')', 'if', '(', 'response', '.', 'status_code', '==', "'200'", ')', ':', '# print some of the headers', 'for', 'header', 'in', '[', "'content-length'", ',', "'last-modified'", ',', "'lastmod'", ',', "'content-type'", ',', "'etag'", ']', ':', 'if', 'header', 'in', 'response', '.', 'headers', ':', 'check_str', '=', "''", 'if', '(', 'check_headers', 'is', 'not', 'None', 'and', 'header', 'in', 'check_headers', ')', ':', 'if', '(', 'response', '.', 'headers', '[', 'header', ']', '==', 'check_headers', '[', 'header', ']', ')', ':', 'check_str', '=', "' MATCHES EXPECTED VALUE'", 'else', ':', 'check_str', '=', "' EXPECTED %s'", '%', '(', 'check_headers', '[', 'header', ']', ')', 'print', '(', '" %s: %s%s"', '%', '(', 'header', ',', 'response', '.', 'headers', '[', 'header', ']', ',', 'check_str', ')', ')']
Do HEAD on uri and show infomation. Will also check headers against any values specified in check_headers.
['Do', 'HEAD', 'on', 'uri', 'and', 'show', 'infomation', '.']
train
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/explorer.py#L271-L300
3,776
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.send_to_address
def send_to_address(self, asset_id, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None): """ Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ params = [asset_id, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_TO_ADDRESS, params=params, id=id, endpoint=endpoint)
python
def send_to_address(self, asset_id, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None): """ Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ params = [asset_id, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_TO_ADDRESS, params=params, id=id, endpoint=endpoint)
['def', 'send_to_address', '(', 'self', ',', 'asset_id', ',', 'to_addr', ',', 'value', ',', 'fee', '=', 'None', ',', 'change_addr', '=', 'None', ',', 'id', '=', 'None', ',', 'endpoint', '=', 'None', ')', ':', 'params', '=', '[', 'asset_id', ',', 'to_addr', ',', 'value', ']', 'if', 'fee', ':', 'params', '.', 'append', '(', 'fee', ')', 'if', 'fee', 'and', 'change_addr', ':', 'params', '.', 'append', '(', 'change_addr', ')', 'elif', 'not', 'fee', 'and', 'change_addr', ':', 'params', '.', 'append', '(', '0', ')', 'params', '.', 'append', '(', 'change_addr', ')', 'return', 'self', '.', '_call_endpoint', '(', 'SEND_TO_ADDRESS', ',', 'params', '=', 'params', ',', 'id', '=', 'id', ',', 'endpoint', '=', 'endpoint', ')']
Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
['Args', ':', 'asset_id', ':', '(', 'str', ')', 'asset', 'identifier', '(', 'for', 'NEO', ':', 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'for', 'GAS', ':', '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7', ')', 'to_addr', ':', '(', 'str', ')', 'destination', 'address', 'value', ':', '(', 'int', '/', 'decimal', ')', 'transfer', 'amount', 'fee', ':', '(', 'decimal', 'optional', ')', 'Paying', 'the', 'handling', 'fee', 'helps', 'elevate', 'the', 'priority', 'of', 'the', 'network', 'to', 'process', 'the', 'transfer', '.', 'It', 'defaults', 'to', '0', 'and', 'can', 'be', 'set', 'to', 'a', 'minimum', 'of', '0', '.', '00000001', '.', 'The', 'low', 'priority', 'threshold', 'is', '0', '.', '001', '.', 'change_addr', ':', '(', 'str', 'optional', ')', 'Change', 'address', 'default', 'is', 'the', 'first', 'standard', 'address', 'in', 'the', 'wallet', '.', 'id', ':', '(', 'int', 'optional', ')', 'id', 'to', 'use', 'for', 'response', 'tracking', 'endpoint', ':', '(', 'RPCEndpoint', 'optional', ')', 'endpoint', 'to', 'specify', 'to', 'use', 'Returns', ':', 'json', 'object', 'of', 'the', 'result', 'or', 'the', 'error', 'encountered', 'in', 'the', 'RPC', 'call']
train
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L381-L402
3,777
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
decode_iter
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a time. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8 """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 end = len(data) - 1 while position < end: obj_size = _UNPACK_INT(data[position:position + 4])[0] elements = data[position:position + obj_size] position += obj_size yield _bson_to_dict(elements, codec_options)
python
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a time. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8 """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 end = len(data) - 1 while position < end: obj_size = _UNPACK_INT(data[position:position + 4])[0] elements = data[position:position + obj_size] position += obj_size yield _bson_to_dict(elements, codec_options)
['def', 'decode_iter', '(', 'data', ',', 'codec_options', '=', 'DEFAULT_CODEC_OPTIONS', ')', ':', 'if', 'not', 'isinstance', '(', 'codec_options', ',', 'CodecOptions', ')', ':', 'raise', '_CODEC_OPTIONS_TYPE_ERROR', 'position', '=', '0', 'end', '=', 'len', '(', 'data', ')', '-', '1', 'while', 'position', '<', 'end', ':', 'obj_size', '=', '_UNPACK_INT', '(', 'data', '[', 'position', ':', 'position', '+', '4', ']', ')', '[', '0', ']', 'elements', '=', 'data', '[', 'position', ':', 'position', '+', 'obj_size', ']', 'position', '+=', 'obj_size', 'yield', '_bson_to_dict', '(', 'elements', ',', 'codec_options', ')']
Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a time. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8
['Decode', 'BSON', 'data', 'to', 'multiple', 'documents', 'as', 'a', 'generator', '.']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L863-L893
3,778
tanghaibao/jcvi
jcvi/compara/synteny.py
fromaligns
def fromaligns(args): """ %prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file. """ p = OptionParser(fromaligns.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) alignsfile, = args fp = must_open(alignsfile) fw = must_open(opts.outfile, "w") for row in fp: if row.startswith("## Alignment"): print("###", file=fw) continue if row[0] == '#' or not row.strip(): continue atoms = row.split(':')[-1].split() print("\t".join(atoms[:2]), file=fw) fw.close()
python
def fromaligns(args): """ %prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file. """ p = OptionParser(fromaligns.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) alignsfile, = args fp = must_open(alignsfile) fw = must_open(opts.outfile, "w") for row in fp: if row.startswith("## Alignment"): print("###", file=fw) continue if row[0] == '#' or not row.strip(): continue atoms = row.split(':')[-1].split() print("\t".join(atoms[:2]), file=fw) fw.close()
['def', 'fromaligns', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'fromaligns', '.', '__doc__', ')', 'p', '.', 'set_outfile', '(', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'alignsfile', ',', '=', 'args', 'fp', '=', 'must_open', '(', 'alignsfile', ')', 'fw', '=', 'must_open', '(', 'opts', '.', 'outfile', ',', '"w"', ')', 'for', 'row', 'in', 'fp', ':', 'if', 'row', '.', 'startswith', '(', '"## Alignment"', ')', ':', 'print', '(', '"###"', ',', 'file', '=', 'fw', ')', 'continue', 'if', 'row', '[', '0', ']', '==', "'#'", 'or', 'not', 'row', '.', 'strip', '(', ')', ':', 'continue', 'atoms', '=', 'row', '.', 'split', '(', "':'", ')', '[', '-', '1', ']', '.', 'split', '(', ')', 'print', '(', '"\\t"', '.', 'join', '(', 'atoms', '[', ':', '2', ']', ')', ',', 'file', '=', 'fw', ')', 'fw', '.', 'close', '(', ')']
%prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file.
['%prog', 'fromaligns', 'out', '.', 'aligns']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L546-L570
3,779
gabstopper/smc-python
smc/core/node.py
Node.loopback_interface
def loopback_interface(self): """ Loopback interfaces for this node. This will return empty if the engine is not a layer 3 firewall type:: >>> engine = Engine('dingo') >>> for node in engine.nodes: ... for loopback in node.loopback_interface: ... loopback ... LoopbackInterface(address=172.20.1.1, nodeid=1, rank=1) LoopbackInterface(address=172.31.1.1, nodeid=1, rank=2) LoopbackInterface(address=2.2.2.2, nodeid=1, rank=3) :rtype: list(LoopbackInterface) """ for lb in self.data.get('loopback_node_dedicated_interface', []): yield LoopbackInterface(lb, self._engine)
python
def loopback_interface(self): """ Loopback interfaces for this node. This will return empty if the engine is not a layer 3 firewall type:: >>> engine = Engine('dingo') >>> for node in engine.nodes: ... for loopback in node.loopback_interface: ... loopback ... LoopbackInterface(address=172.20.1.1, nodeid=1, rank=1) LoopbackInterface(address=172.31.1.1, nodeid=1, rank=2) LoopbackInterface(address=2.2.2.2, nodeid=1, rank=3) :rtype: list(LoopbackInterface) """ for lb in self.data.get('loopback_node_dedicated_interface', []): yield LoopbackInterface(lb, self._engine)
['def', 'loopback_interface', '(', 'self', ')', ':', 'for', 'lb', 'in', 'self', '.', 'data', '.', 'get', '(', "'loopback_node_dedicated_interface'", ',', '[', ']', ')', ':', 'yield', 'LoopbackInterface', '(', 'lb', ',', 'self', '.', '_engine', ')']
Loopback interfaces for this node. This will return empty if the engine is not a layer 3 firewall type:: >>> engine = Engine('dingo') >>> for node in engine.nodes: ... for loopback in node.loopback_interface: ... loopback ... LoopbackInterface(address=172.20.1.1, nodeid=1, rank=1) LoopbackInterface(address=172.31.1.1, nodeid=1, rank=2) LoopbackInterface(address=2.2.2.2, nodeid=1, rank=3) :rtype: list(LoopbackInterface)
['Loopback', 'interfaces', 'for', 'this', 'node', '.', 'This', 'will', 'return', 'empty', 'if', 'the', 'engine', 'is', 'not', 'a', 'layer', '3', 'firewall', 'type', '::', '>>>', 'engine', '=', 'Engine', '(', 'dingo', ')', '>>>', 'for', 'node', 'in', 'engine', '.', 'nodes', ':', '...', 'for', 'loopback', 'in', 'node', '.', 'loopback_interface', ':', '...', 'loopback', '...', 'LoopbackInterface', '(', 'address', '=', '172', '.', '20', '.', '1', '.', '1', 'nodeid', '=', '1', 'rank', '=', '1', ')', 'LoopbackInterface', '(', 'address', '=', '172', '.', '31', '.', '1', '.', '1', 'nodeid', '=', '1', 'rank', '=', '2', ')', 'LoopbackInterface', '(', 'address', '=', '2', '.', '2', '.', '2', '.', '2', 'nodeid', '=', '1', 'rank', '=', '3', ')', ':', 'rtype', ':', 'list', '(', 'LoopbackInterface', ')']
train
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/node.py#L101-L118
3,780
radjkarl/imgProcessor
imgProcessor/filters/filterVerticalLines.py
filterVerticalLines
def filterVerticalLines(arr, min_line_length=4): """ Remove vertical lines in boolean array if linelength >=min_line_length """ gy = arr.shape[0] gx = arr.shape[1] mn = min_line_length-1 for i in range(gy): for j in range(gx): if arr[i,j]: for d in range(min_line_length): if not arr[i+d,j]: break if d == mn: d = 0 while True: if not arr[i+d,j]: break arr[i+d,j] = 0 d +=1
python
def filterVerticalLines(arr, min_line_length=4): """ Remove vertical lines in boolean array if linelength >=min_line_length """ gy = arr.shape[0] gx = arr.shape[1] mn = min_line_length-1 for i in range(gy): for j in range(gx): if arr[i,j]: for d in range(min_line_length): if not arr[i+d,j]: break if d == mn: d = 0 while True: if not arr[i+d,j]: break arr[i+d,j] = 0 d +=1
['def', 'filterVerticalLines', '(', 'arr', ',', 'min_line_length', '=', '4', ')', ':', 'gy', '=', 'arr', '.', 'shape', '[', '0', ']', 'gx', '=', 'arr', '.', 'shape', '[', '1', ']', 'mn', '=', 'min_line_length', '-', '1', 'for', 'i', 'in', 'range', '(', 'gy', ')', ':', 'for', 'j', 'in', 'range', '(', 'gx', ')', ':', 'if', 'arr', '[', 'i', ',', 'j', ']', ':', 'for', 'd', 'in', 'range', '(', 'min_line_length', ')', ':', 'if', 'not', 'arr', '[', 'i', '+', 'd', ',', 'j', ']', ':', 'break', 'if', 'd', '==', 'mn', ':', 'd', '=', '0', 'while', 'True', ':', 'if', 'not', 'arr', '[', 'i', '+', 'd', ',', 'j', ']', ':', 'break', 'arr', '[', 'i', '+', 'd', ',', 'j', ']', '=', '0', 'd', '+=', '1']
Remove vertical lines in boolean array if linelength >=min_line_length
['Remove', 'vertical', 'lines', 'in', 'boolean', 'array', 'if', 'linelength', '>', '=', 'min_line_length']
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/filterVerticalLines.py#L4-L23
3,781
janpipek/physt
physt/histogram_nd.py
HistogramND.numpy_bins
def numpy_bins(self) -> List[np.ndarray]: """Numpy-like bins (if available).""" return [binning.numpy_bins for binning in self._binnings]
python
def numpy_bins(self) -> List[np.ndarray]: """Numpy-like bins (if available).""" return [binning.numpy_bins for binning in self._binnings]
['def', 'numpy_bins', '(', 'self', ')', '->', 'List', '[', 'np', '.', 'ndarray', ']', ':', 'return', '[', 'binning', '.', 'numpy_bins', 'for', 'binning', 'in', 'self', '.', '_binnings', ']']
Numpy-like bins (if available).
['Numpy', '-', 'like', 'bins', '(', 'if', 'available', ')', '.']
train
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_nd.py#L67-L69
3,782
manns/pyspread
pyspread/src/lib/vlc.py
libvlc_audio_output_list_release
def libvlc_audio_output_list_release(p_list): '''Frees the list of available audio output modules. @param p_list: list with audio outputs for release. ''' f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \ _Cfunction('libvlc_audio_output_list_release', ((1,),), None, None, ctypes.POINTER(AudioOutput)) return f(p_list)
python
def libvlc_audio_output_list_release(p_list): '''Frees the list of available audio output modules. @param p_list: list with audio outputs for release. ''' f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \ _Cfunction('libvlc_audio_output_list_release', ((1,),), None, None, ctypes.POINTER(AudioOutput)) return f(p_list)
['def', 'libvlc_audio_output_list_release', '(', 'p_list', ')', ':', 'f', '=', '_Cfunctions', '.', 'get', '(', "'libvlc_audio_output_list_release'", ',', 'None', ')', 'or', '_Cfunction', '(', "'libvlc_audio_output_list_release'", ',', '(', '(', '1', ',', ')', ',', ')', ',', 'None', ',', 'None', ',', 'ctypes', '.', 'POINTER', '(', 'AudioOutput', ')', ')', 'return', 'f', '(', 'p_list', ')']
Frees the list of available audio output modules. @param p_list: list with audio outputs for release.
['Frees', 'the', 'list', 'of', 'available', 'audio', 'output', 'modules', '.']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L6052-L6059
3,783
pymc-devs/pymc
pymc/distributions.py
rexponweib
def rexponweib(alpha, k, loc=0, scale=1, size=None): """ Random exponentiated Weibull variates. """ q = np.random.uniform(size=size) r = flib.exponweib_ppf(q, alpha, k) return loc + r * scale
python
def rexponweib(alpha, k, loc=0, scale=1, size=None): """ Random exponentiated Weibull variates. """ q = np.random.uniform(size=size) r = flib.exponweib_ppf(q, alpha, k) return loc + r * scale
['def', 'rexponweib', '(', 'alpha', ',', 'k', ',', 'loc', '=', '0', ',', 'scale', '=', '1', ',', 'size', '=', 'None', ')', ':', 'q', '=', 'np', '.', 'random', '.', 'uniform', '(', 'size', '=', 'size', ')', 'r', '=', 'flib', '.', 'exponweib_ppf', '(', 'q', ',', 'alpha', ',', 'k', ')', 'return', 'loc', '+', 'r', '*', 'scale']
Random exponentiated Weibull variates.
['Random', 'exponentiated', 'Weibull', 'variates', '.']
train
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1225-L1232
3,784
Dallinger/Dallinger
dallinger/heroku/clock.py
run_check
def run_check(participants, config, reference_time): """For each participant, if they've been active for longer than the experiment duration + 2 minutes, we take action. """ recruiters_with_late_participants = defaultdict(list) for p in participants: timeline = ParticipationTime(p, reference_time, config) if timeline.is_overdue: print( "Error: participant {} with status {} has been playing for too " "long - their recruiter will be notified.".format(p.id, p.status) ) recruiters_with_late_participants[p.recruiter_id].append(p) for recruiter_id, participants in recruiters_with_late_participants.items(): recruiter = recruiters.by_name(recruiter_id) recruiter.notify_duration_exceeded(participants, reference_time)
python
def run_check(participants, config, reference_time): """For each participant, if they've been active for longer than the experiment duration + 2 minutes, we take action. """ recruiters_with_late_participants = defaultdict(list) for p in participants: timeline = ParticipationTime(p, reference_time, config) if timeline.is_overdue: print( "Error: participant {} with status {} has been playing for too " "long - their recruiter will be notified.".format(p.id, p.status) ) recruiters_with_late_participants[p.recruiter_id].append(p) for recruiter_id, participants in recruiters_with_late_participants.items(): recruiter = recruiters.by_name(recruiter_id) recruiter.notify_duration_exceeded(participants, reference_time)
['def', 'run_check', '(', 'participants', ',', 'config', ',', 'reference_time', ')', ':', 'recruiters_with_late_participants', '=', 'defaultdict', '(', 'list', ')', 'for', 'p', 'in', 'participants', ':', 'timeline', '=', 'ParticipationTime', '(', 'p', ',', 'reference_time', ',', 'config', ')', 'if', 'timeline', '.', 'is_overdue', ':', 'print', '(', '"Error: participant {} with status {} has been playing for too "', '"long - their recruiter will be notified."', '.', 'format', '(', 'p', '.', 'id', ',', 'p', '.', 'status', ')', ')', 'recruiters_with_late_participants', '[', 'p', '.', 'recruiter_id', ']', '.', 'append', '(', 'p', ')', 'for', 'recruiter_id', ',', 'participants', 'in', 'recruiters_with_late_participants', '.', 'items', '(', ')', ':', 'recruiter', '=', 'recruiters', '.', 'by_name', '(', 'recruiter_id', ')', 'recruiter', '.', 'notify_duration_exceeded', '(', 'participants', ',', 'reference_time', ')']
For each participant, if they've been active for longer than the experiment duration + 2 minutes, we take action.
['For', 'each', 'participant', 'if', 'they', 've', 'been', 'active', 'for', 'longer', 'than', 'the', 'experiment', 'duration', '+', '2', 'minutes', 'we', 'take', 'action', '.']
train
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/heroku/clock.py#L19-L35
3,785
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py
BigtableClient.mutate_rows
def mutate_rows( self, table_name, entries, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): ... # process element ... pass Args: table_name (str): The unique name of the table to which the mutations should be applied. entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "mutate_rows" not in self._inner_api_calls: self._inner_api_calls[ "mutate_rows" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.mutate_rows, default_retry=self._method_configs["MutateRows"].retry, default_timeout=self._method_configs["MutateRows"].timeout, client_info=self._client_info, ) request = bigtable_pb2.MutateRowsRequest( table_name=table_name, entries=entries, app_profile_id=app_profile_id ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["mutate_rows"]( request, retry=retry, timeout=timeout, metadata=metadata )
python
def mutate_rows( self, table_name, entries, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): ... # process element ... pass Args: table_name (str): The unique name of the table to which the mutations should be applied. entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "mutate_rows" not in self._inner_api_calls: self._inner_api_calls[ "mutate_rows" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.mutate_rows, default_retry=self._method_configs["MutateRows"].retry, default_timeout=self._method_configs["MutateRows"].timeout, client_info=self._client_info, ) request = bigtable_pb2.MutateRowsRequest( table_name=table_name, entries=entries, app_profile_id=app_profile_id ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["mutate_rows"]( request, retry=retry, timeout=timeout, metadata=metadata )
['def', 'mutate_rows', '(', 'self', ',', 'table_name', ',', 'entries', ',', 'app_profile_id', '=', 'None', ',', 'retry', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'DEFAULT', ',', 'timeout', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'DEFAULT', ',', 'metadata', '=', 'None', ',', ')', ':', '# Wrap the transport method to add retry and timeout logic.', 'if', '"mutate_rows"', 'not', 'in', 'self', '.', '_inner_api_calls', ':', 'self', '.', '_inner_api_calls', '[', '"mutate_rows"', ']', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'wrap_method', '(', 'self', '.', 'transport', '.', 'mutate_rows', ',', 'default_retry', '=', 'self', '.', '_method_configs', '[', '"MutateRows"', ']', '.', 'retry', ',', 'default_timeout', '=', 'self', '.', '_method_configs', '[', '"MutateRows"', ']', '.', 'timeout', ',', 'client_info', '=', 'self', '.', '_client_info', ',', ')', 'request', '=', 'bigtable_pb2', '.', 'MutateRowsRequest', '(', 'table_name', '=', 'table_name', ',', 'entries', '=', 'entries', ',', 'app_profile_id', '=', 'app_profile_id', ')', 'if', 'metadata', 'is', 'None', ':', 'metadata', '=', '[', ']', 'metadata', '=', 'list', '(', 'metadata', ')', 'try', ':', 'routing_header', '=', '[', '(', '"table_name"', ',', 'table_name', ')', ']', 'except', 'AttributeError', ':', 'pass', 'else', ':', 'routing_metadata', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'routing_header', '.', 'to_grpc_metadata', '(', 'routing_header', ')', 'metadata', '.', 'append', '(', 'routing_metadata', ')', 'return', 'self', '.', '_inner_api_calls', '[', '"mutate_rows"', ']', '(', 'request', ',', 'retry', '=', 'retry', ',', 'timeout', '=', 'timeout', ',', 'metadata', '=', 'metadata', ')']
Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): ... # process element ... pass Args: table_name (str): The unique name of the table to which the mutations should be applied. entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
['Mutates', 'multiple', 'rows', 'in', 'a', 'batch', '.', 'Each', 'individual', 'row', 'is', 'mutated', 'atomically', 'as', 'in', 'MutateRow', 'but', 'the', 'entire', 'batch', 'is', 'not', 'executed', 'atomically', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L452-L540
3,786
limpyd/redis-limpyd
limpyd/indexes.py
BaseRangeIndex.get_filtered_keys
def get_filtered_keys(self, suffix, *args, **kwargs): """Returns the index key for the given args "value" (`args`) Parameters ---------- kwargs: dict use_lua: bool Default to ``True``, if scripting is supported. If ``True``, the process of reading from the sorted-set, extracting the primary keys, excluding some values if needed, and putting the primary keys in a set or zset, is done in lua at the redis level. Else, data is fetched, manipulated here, then returned to redis. For the other parameters, see ``BaseIndex.get_filtered_keys`` """ accepted_key_types = kwargs.get('accepted_key_types', None) if accepted_key_types\ and 'set' not in accepted_key_types and 'zset' not in accepted_key_types: raise ImplementationError( '%s can only return keys of type "set" or "zset"' % self.__class__.__name__ ) key_type = 'set' if not accepted_key_types or 'set' in accepted_key_types else 'zset' tmp_key = unique_key(self.connection) args = list(args) # special "in" case: we get n keys and make an unionstore with them then return this key if suffix == 'in': values = set(args.pop()) if not values: return [] # no keys in_keys = [ self.get_filtered_keys('eq', *(args+[value]), **kwargs)[0][0] for value in values ] if key_type == 'set': self.connection.sunionstore(tmp_key, *in_keys) else: self.connection.zunionstore(tmp_key, *in_keys) # we can delete the temporary keys for in_key in in_keys: self.connection.delete(in_key) return [(tmp_key, key_type, True)] use_lua = self.model.database.support_scripting() and kwargs.get('use_lua', True) key = self.get_storage_key(*args) value = self.normalize_value(args[-1], transform=False) real_suffix = self.remove_prefix(suffix) if use_lua: start, end, exclude = self.get_boundaries(real_suffix, value) self.call_script(key, tmp_key, key_type, start, end, exclude) else: pks = self.get_pks_for_filter(key, real_suffix, value) if pks: if key_type == 'set': self.connection.sadd(tmp_key, *pks) else: self.connection.zadd(tmp_key, **{pk: idx for idx, pk in enumerate(pks)}) return [(tmp_key, key_type, True)]
python
def get_filtered_keys(self, suffix, *args, **kwargs): """Returns the index key for the given args "value" (`args`) Parameters ---------- kwargs: dict use_lua: bool Default to ``True``, if scripting is supported. If ``True``, the process of reading from the sorted-set, extracting the primary keys, excluding some values if needed, and putting the primary keys in a set or zset, is done in lua at the redis level. Else, data is fetched, manipulated here, then returned to redis. For the other parameters, see ``BaseIndex.get_filtered_keys`` """ accepted_key_types = kwargs.get('accepted_key_types', None) if accepted_key_types\ and 'set' not in accepted_key_types and 'zset' not in accepted_key_types: raise ImplementationError( '%s can only return keys of type "set" or "zset"' % self.__class__.__name__ ) key_type = 'set' if not accepted_key_types or 'set' in accepted_key_types else 'zset' tmp_key = unique_key(self.connection) args = list(args) # special "in" case: we get n keys and make an unionstore with them then return this key if suffix == 'in': values = set(args.pop()) if not values: return [] # no keys in_keys = [ self.get_filtered_keys('eq', *(args+[value]), **kwargs)[0][0] for value in values ] if key_type == 'set': self.connection.sunionstore(tmp_key, *in_keys) else: self.connection.zunionstore(tmp_key, *in_keys) # we can delete the temporary keys for in_key in in_keys: self.connection.delete(in_key) return [(tmp_key, key_type, True)] use_lua = self.model.database.support_scripting() and kwargs.get('use_lua', True) key = self.get_storage_key(*args) value = self.normalize_value(args[-1], transform=False) real_suffix = self.remove_prefix(suffix) if use_lua: start, end, exclude = self.get_boundaries(real_suffix, value) self.call_script(key, tmp_key, key_type, start, end, exclude) else: pks = self.get_pks_for_filter(key, real_suffix, value) if pks: if key_type == 'set': self.connection.sadd(tmp_key, *pks) else: self.connection.zadd(tmp_key, **{pk: idx for idx, pk in enumerate(pks)}) return [(tmp_key, key_type, True)]
['def', 'get_filtered_keys', '(', 'self', ',', 'suffix', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'accepted_key_types', '=', 'kwargs', '.', 'get', '(', "'accepted_key_types'", ',', 'None', ')', 'if', 'accepted_key_types', 'and', "'set'", 'not', 'in', 'accepted_key_types', 'and', "'zset'", 'not', 'in', 'accepted_key_types', ':', 'raise', 'ImplementationError', '(', '\'%s can only return keys of type "set" or "zset"\'', '%', 'self', '.', '__class__', '.', '__name__', ')', 'key_type', '=', "'set'", 'if', 'not', 'accepted_key_types', 'or', "'set'", 'in', 'accepted_key_types', 'else', "'zset'", 'tmp_key', '=', 'unique_key', '(', 'self', '.', 'connection', ')', 'args', '=', 'list', '(', 'args', ')', '# special "in" case: we get n keys and make an unionstore with them then return this key', 'if', 'suffix', '==', "'in'", ':', 'values', '=', 'set', '(', 'args', '.', 'pop', '(', ')', ')', 'if', 'not', 'values', ':', 'return', '[', ']', '# no keys', 'in_keys', '=', '[', 'self', '.', 'get_filtered_keys', '(', "'eq'", ',', '*', '(', 'args', '+', '[', 'value', ']', ')', ',', '*', '*', 'kwargs', ')', '[', '0', ']', '[', '0', ']', 'for', 'value', 'in', 'values', ']', 'if', 'key_type', '==', "'set'", ':', 'self', '.', 'connection', '.', 'sunionstore', '(', 'tmp_key', ',', '*', 'in_keys', ')', 'else', ':', 'self', '.', 'connection', '.', 'zunionstore', '(', 'tmp_key', ',', '*', 'in_keys', ')', '# we can delete the temporary keys', 'for', 'in_key', 'in', 'in_keys', ':', 'self', '.', 'connection', '.', 'delete', '(', 'in_key', ')', 'return', '[', '(', 'tmp_key', ',', 'key_type', ',', 'True', ')', ']', 'use_lua', '=', 'self', '.', 'model', '.', 'database', '.', 'support_scripting', '(', ')', 'and', 'kwargs', '.', 'get', '(', "'use_lua'", ',', 'True', ')', 'key', '=', 'self', '.', 'get_storage_key', '(', '*', 'args', ')', 'value', '=', 'self', '.', 'normalize_value', '(', 'args', '[', '-', '1', ']', ',', 'transform', '=', 'False', ')', 'real_suffix', '=', 'self', '.', 'remove_prefix', '(', 'suffix', ')', 'if', 'use_lua', ':', 'start', ',', 'end', ',', 'exclude', '=', 'self', '.', 'get_boundaries', '(', 'real_suffix', ',', 'value', ')', 'self', '.', 'call_script', '(', 'key', ',', 'tmp_key', ',', 'key_type', ',', 'start', ',', 'end', ',', 'exclude', ')', 'else', ':', 'pks', '=', 'self', '.', 'get_pks_for_filter', '(', 'key', ',', 'real_suffix', ',', 'value', ')', 'if', 'pks', ':', 'if', 'key_type', '==', "'set'", ':', 'self', '.', 'connection', '.', 'sadd', '(', 'tmp_key', ',', '*', 'pks', ')', 'else', ':', 'self', '.', 'connection', '.', 'zadd', '(', 'tmp_key', ',', '*', '*', '{', 'pk', ':', 'idx', 'for', 'idx', ',', 'pk', 'in', 'enumerate', '(', 'pks', ')', '}', ')', 'return', '[', '(', 'tmp_key', ',', 'key_type', ',', 'True', ')', ']']
Returns the index key for the given args "value" (`args`) Parameters ---------- kwargs: dict use_lua: bool Default to ``True``, if scripting is supported. If ``True``, the process of reading from the sorted-set, extracting the primary keys, excluding some values if needed, and putting the primary keys in a set or zset, is done in lua at the redis level. Else, data is fetched, manipulated here, then returned to redis. For the other parameters, see ``BaseIndex.get_filtered_keys``
['Returns', 'the', 'index', 'key', 'for', 'the', 'given', 'args', 'value', '(', 'args', ')']
train
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/indexes.py#L915-L986
3,787
inasafe/inasafe
safe/gui/tools/wizard/step_fc55_agglayer_from_canvas.py
StepFcAggLayerFromCanvas.selected_canvas_agglayer
def selected_canvas_agglayer(self): """Obtain the canvas aggregation layer selected by user. :returns: The currently selected map layer in the list. :rtype: QgsMapLayer """ if self.lstCanvasAggLayers.selectedItems(): item = self.lstCanvasAggLayers.currentItem() else: return None try: layer_id = item.data(QtCore.Qt.UserRole) except (AttributeError, NameError): layer_id = None layer = QgsProject.instance().mapLayer(layer_id) return layer
python
def selected_canvas_agglayer(self): """Obtain the canvas aggregation layer selected by user. :returns: The currently selected map layer in the list. :rtype: QgsMapLayer """ if self.lstCanvasAggLayers.selectedItems(): item = self.lstCanvasAggLayers.currentItem() else: return None try: layer_id = item.data(QtCore.Qt.UserRole) except (AttributeError, NameError): layer_id = None layer = QgsProject.instance().mapLayer(layer_id) return layer
['def', 'selected_canvas_agglayer', '(', 'self', ')', ':', 'if', 'self', '.', 'lstCanvasAggLayers', '.', 'selectedItems', '(', ')', ':', 'item', '=', 'self', '.', 'lstCanvasAggLayers', '.', 'currentItem', '(', ')', 'else', ':', 'return', 'None', 'try', ':', 'layer_id', '=', 'item', '.', 'data', '(', 'QtCore', '.', 'Qt', '.', 'UserRole', ')', 'except', '(', 'AttributeError', ',', 'NameError', ')', ':', 'layer_id', '=', 'None', 'layer', '=', 'QgsProject', '.', 'instance', '(', ')', '.', 'mapLayer', '(', 'layer_id', ')', 'return', 'layer']
Obtain the canvas aggregation layer selected by user. :returns: The currently selected map layer in the list. :rtype: QgsMapLayer
['Obtain', 'the', 'canvas', 'aggregation', 'layer', 'selected', 'by', 'user', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc55_agglayer_from_canvas.py#L76-L92
3,788
cloudera/cm_api
python/src/cm_api/endpoints/types.py
json_to_config
def json_to_config(dic, full = False): """ Converts a JSON-decoded config dictionary to a python dictionary. When materializing the full view, the values in the dictionary will be instances of ApiConfig, instead of strings. @param dic: JSON-decoded config dictionary. @param full: Whether to materialize the full view of the config data. @return: Python dictionary with config data. """ config = { } for entry in dic['items']: k = entry['name'] if full: config[k] = ApiConfig.from_json_dict(entry, None) else: config[k] = entry.get('value') return config
python
def json_to_config(dic, full = False): """ Converts a JSON-decoded config dictionary to a python dictionary. When materializing the full view, the values in the dictionary will be instances of ApiConfig, instead of strings. @param dic: JSON-decoded config dictionary. @param full: Whether to materialize the full view of the config data. @return: Python dictionary with config data. """ config = { } for entry in dic['items']: k = entry['name'] if full: config[k] = ApiConfig.from_json_dict(entry, None) else: config[k] = entry.get('value') return config
['def', 'json_to_config', '(', 'dic', ',', 'full', '=', 'False', ')', ':', 'config', '=', '{', '}', 'for', 'entry', 'in', 'dic', '[', "'items'", ']', ':', 'k', '=', 'entry', '[', "'name'", ']', 'if', 'full', ':', 'config', '[', 'k', ']', '=', 'ApiConfig', '.', 'from_json_dict', '(', 'entry', ',', 'None', ')', 'else', ':', 'config', '[', 'k', ']', '=', 'entry', '.', 'get', '(', "'value'", ')', 'return', 'config']
Converts a JSON-decoded config dictionary to a python dictionary. When materializing the full view, the values in the dictionary will be instances of ApiConfig, instead of strings. @param dic: JSON-decoded config dictionary. @param full: Whether to materialize the full view of the config data. @return: Python dictionary with config data.
['Converts', 'a', 'JSON', '-', 'decoded', 'config', 'dictionary', 'to', 'a', 'python', 'dictionary', '.']
train
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/types.py#L1310-L1328
3,789
kolypto/py-asynctools
asynctools/threading/Parallel.py
Parallel._thread
def _thread(self): """ Thread entry point: does the job once, stored results, and dies. """ # Get args, kwargs = self._jobs.get() # Stop thread when (None, None) comes in if args is None and kwargs is None: return None # Wrappers should exit as well # Work try: self._results.append(self._worker(*args, **kwargs)) return True except Exception as e: self._errors.append(e) return False finally: self._jobs.task_done() with self._jobfinished: self._jobfinished.notify()
python
def _thread(self): """ Thread entry point: does the job once, stored results, and dies. """ # Get args, kwargs = self._jobs.get() # Stop thread when (None, None) comes in if args is None and kwargs is None: return None # Wrappers should exit as well # Work try: self._results.append(self._worker(*args, **kwargs)) return True except Exception as e: self._errors.append(e) return False finally: self._jobs.task_done() with self._jobfinished: self._jobfinished.notify()
['def', '_thread', '(', 'self', ')', ':', '# Get', 'args', ',', 'kwargs', '=', 'self', '.', '_jobs', '.', 'get', '(', ')', '# Stop thread when (None, None) comes in', 'if', 'args', 'is', 'None', 'and', 'kwargs', 'is', 'None', ':', 'return', 'None', '# Wrappers should exit as well', '# Work', 'try', ':', 'self', '.', '_results', '.', 'append', '(', 'self', '.', '_worker', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ')', 'return', 'True', 'except', 'Exception', 'as', 'e', ':', 'self', '.', '_errors', '.', 'append', '(', 'e', ')', 'return', 'False', 'finally', ':', 'self', '.', '_jobs', '.', 'task_done', '(', ')', 'with', 'self', '.', '_jobfinished', ':', 'self', '.', '_jobfinished', '.', 'notify', '(', ')']
Thread entry point: does the job once, stored results, and dies.
['Thread', 'entry', 'point', ':', 'does', 'the', 'job', 'once', 'stored', 'results', 'and', 'dies', '.']
train
https://github.com/kolypto/py-asynctools/blob/04ff42d13b54d200d8cc88b3639937b63278e57c/asynctools/threading/Parallel.py#L36-L55
3,790
PyCQA/astroid
astroid/brain/brain_six.py
_indent
def _indent(text, prefix, predicate=None): """Adds 'prefix' to the beginning of selected lines in 'text'. If 'predicate' is provided, 'prefix' will only be added to the lines where 'predicate(line)' is True. If 'predicate' is not provided, it will default to adding 'prefix' to all non-empty lines that do not consist solely of whitespace characters. """ if predicate is None: predicate = lambda line: line.strip() def prefixed_lines(): for line in text.splitlines(True): yield prefix + line if predicate(line) else line return "".join(prefixed_lines())
python
def _indent(text, prefix, predicate=None): """Adds 'prefix' to the beginning of selected lines in 'text'. If 'predicate' is provided, 'prefix' will only be added to the lines where 'predicate(line)' is True. If 'predicate' is not provided, it will default to adding 'prefix' to all non-empty lines that do not consist solely of whitespace characters. """ if predicate is None: predicate = lambda line: line.strip() def prefixed_lines(): for line in text.splitlines(True): yield prefix + line if predicate(line) else line return "".join(prefixed_lines())
['def', '_indent', '(', 'text', ',', 'prefix', ',', 'predicate', '=', 'None', ')', ':', 'if', 'predicate', 'is', 'None', ':', 'predicate', '=', 'lambda', 'line', ':', 'line', '.', 'strip', '(', ')', 'def', 'prefixed_lines', '(', ')', ':', 'for', 'line', 'in', 'text', '.', 'splitlines', '(', 'True', ')', ':', 'yield', 'prefix', '+', 'line', 'if', 'predicate', '(', 'line', ')', 'else', 'line', 'return', '""', '.', 'join', '(', 'prefixed_lines', '(', ')', ')']
Adds 'prefix' to the beginning of selected lines in 'text'. If 'predicate' is provided, 'prefix' will only be added to the lines where 'predicate(line)' is True. If 'predicate' is not provided, it will default to adding 'prefix' to all non-empty lines that do not consist solely of whitespace characters.
['Adds', 'prefix', 'to', 'the', 'beginning', 'of', 'selected', 'lines', 'in', 'text', '.']
train
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_six.py#L26-L41
3,791
treycucco/bidon
bidon/util/convert.py
to_compressed_string
def to_compressed_string(val, max_length=0): """Converts val to a compressed string. A compressed string is one with no leading or trailing spaces. If val is None, or is blank (all spaces) None is returned. If max_length > 0 and the stripped val is greater than max_length, val[:max_length] is returned. """ if val is None or len(val) == 0: return None rval = " ".join(val.split()) if len(rval) == 0: return None if max_length == 0: return rval else: return rval[:max_length]
python
def to_compressed_string(val, max_length=0): """Converts val to a compressed string. A compressed string is one with no leading or trailing spaces. If val is None, or is blank (all spaces) None is returned. If max_length > 0 and the stripped val is greater than max_length, val[:max_length] is returned. """ if val is None or len(val) == 0: return None rval = " ".join(val.split()) if len(rval) == 0: return None if max_length == 0: return rval else: return rval[:max_length]
['def', 'to_compressed_string', '(', 'val', ',', 'max_length', '=', '0', ')', ':', 'if', 'val', 'is', 'None', 'or', 'len', '(', 'val', ')', '==', '0', ':', 'return', 'None', 'rval', '=', '" "', '.', 'join', '(', 'val', '.', 'split', '(', ')', ')', 'if', 'len', '(', 'rval', ')', '==', '0', ':', 'return', 'None', 'if', 'max_length', '==', '0', ':', 'return', 'rval', 'else', ':', 'return', 'rval', '[', ':', 'max_length', ']']
Converts val to a compressed string. A compressed string is one with no leading or trailing spaces. If val is None, or is blank (all spaces) None is returned. If max_length > 0 and the stripped val is greater than max_length, val[:max_length] is returned.
['Converts', 'val', 'to', 'a', 'compressed', 'string', '.']
train
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/convert.py#L55-L72
3,792
MacHu-GWU/constant2-project
constant2/pkg/inspect_mate/getter.py
get_all_attributes
def get_all_attributes(klass_or_instance): """Get all attribute members (attribute, property style method). """ pairs = list() for attr, value in inspect.getmembers( klass_or_instance, lambda x: not inspect.isroutine(x)): if not (attr.startswith("__") or attr.endswith("__")): pairs.append((attr, value)) return pairs
python
def get_all_attributes(klass_or_instance): """Get all attribute members (attribute, property style method). """ pairs = list() for attr, value in inspect.getmembers( klass_or_instance, lambda x: not inspect.isroutine(x)): if not (attr.startswith("__") or attr.endswith("__")): pairs.append((attr, value)) return pairs
['def', 'get_all_attributes', '(', 'klass_or_instance', ')', ':', 'pairs', '=', 'list', '(', ')', 'for', 'attr', ',', 'value', 'in', 'inspect', '.', 'getmembers', '(', 'klass_or_instance', ',', 'lambda', 'x', ':', 'not', 'inspect', '.', 'isroutine', '(', 'x', ')', ')', ':', 'if', 'not', '(', 'attr', '.', 'startswith', '(', '"__"', ')', 'or', 'attr', '.', 'endswith', '(', '"__"', ')', ')', ':', 'pairs', '.', 'append', '(', '(', 'attr', ',', 'value', ')', ')', 'return', 'pairs']
Get all attribute members (attribute, property style method).
['Get', 'all', 'attribute', 'members', '(', 'attribute', 'property', 'style', 'method', ')', '.']
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/pkg/inspect_mate/getter.py#L71-L79
3,793
yougov/mongo-connector
mongo_connector/namespace_config.py
NamespaceConfig.unmap_namespace
def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None
python
def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None
['def', 'unmap_namespace', '(', 'self', ',', 'plain_target_ns', ')', ':', '# Return the same namespace if there are no included namespaces.', 'if', 'not', 'self', '.', '_regex_map', 'and', 'not', 'self', '.', '_plain', ':', 'return', 'plain_target_ns', 'src_name_set', '=', 'self', '.', '_reverse_plain', '.', 'get', '(', 'plain_target_ns', ')', 'if', 'src_name_set', ':', '# Return the first (and only) item in the set', 'for', 'src_name', 'in', 'src_name_set', ':', 'return', 'src_name', '# The target namespace could also exist in the wildcard namespaces', 'for', '_', ',', 'namespace', 'in', 'self', '.', '_regex_map', ':', 'original_name', '=', 'match_replace_regex', '(', 'namespace_to_regex', '(', 'namespace', '.', 'dest_name', ')', ',', 'plain_target_ns', ',', 'namespace', '.', 'source_name', ',', ')', 'if', 'original_name', ':', 'return', 'original_name', 'return', 'None']
Given a plain target namespace, return the corresponding source namespace.
['Given', 'a', 'plain', 'target', 'namespace', 'return', 'the', 'corresponding', 'source', 'namespace', '.']
train
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L261-L283
3,794
jealous/stockstats
stockstats.py
StockDataFrame._get_mdm
def _get_mdm(cls, df, windows): """ -DM, negative directional moving accumulation If window is not 1, return the SMA of -DM. :param df: data :param windows: range :return: """ window = cls.get_only_one_positive_int(windows) column_name = 'mdm_{}'.format(window) um, dm = df['um'], df['dm'] df['mdm'] = np.where(dm > um, dm, 0) if window > 1: mdm = df['mdm_{}_ema'.format(window)] else: mdm = df['mdm'] df[column_name] = mdm
python
def _get_mdm(cls, df, windows): """ -DM, negative directional moving accumulation If window is not 1, return the SMA of -DM. :param df: data :param windows: range :return: """ window = cls.get_only_one_positive_int(windows) column_name = 'mdm_{}'.format(window) um, dm = df['um'], df['dm'] df['mdm'] = np.where(dm > um, dm, 0) if window > 1: mdm = df['mdm_{}_ema'.format(window)] else: mdm = df['mdm'] df[column_name] = mdm
['def', '_get_mdm', '(', 'cls', ',', 'df', ',', 'windows', ')', ':', 'window', '=', 'cls', '.', 'get_only_one_positive_int', '(', 'windows', ')', 'column_name', '=', "'mdm_{}'", '.', 'format', '(', 'window', ')', 'um', ',', 'dm', '=', 'df', '[', "'um'", ']', ',', 'df', '[', "'dm'", ']', 'df', '[', "'mdm'", ']', '=', 'np', '.', 'where', '(', 'dm', '>', 'um', ',', 'dm', ',', '0', ')', 'if', 'window', '>', '1', ':', 'mdm', '=', 'df', '[', "'mdm_{}_ema'", '.', 'format', '(', 'window', ')', ']', 'else', ':', 'mdm', '=', 'df', '[', "'mdm'", ']', 'df', '[', 'column_name', ']', '=', 'mdm']
-DM, negative directional moving accumulation If window is not 1, return the SMA of -DM. :param df: data :param windows: range :return:
['-', 'DM', 'negative', 'directional', 'moving', 'accumulation', 'If', 'window', 'is', 'not', '1', 'return', 'the', 'SMA', 'of', '-', 'DM', '.', ':', 'param', 'df', ':', 'data', ':', 'param', 'windows', ':', 'range', ':', 'return', ':']
train
https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L515-L531
3,795
epfl-lts2/pygsp
pygsp/reduction.py
interpolate
def interpolate(G, f_subsampled, keep_inds, order=100, reg_eps=0.005, **kwargs): r"""Interpolate a graph signal. Parameters ---------- G : Graph f_subsampled : ndarray A graph signal on the graph G. keep_inds : ndarray List of indices on which the signal is sampled. order : int Degree of the Chebyshev approximation (default = 100). reg_eps : float The regularized graph Laplacian is $\bar{L}=L+\epsilon I$. A smaller epsilon may lead to better regularization, but will also require a higher order Chebyshev approximation. Returns ------- f_interpolated : ndarray Interpolated graph signal on the full vertex set of G. References ---------- See :cite:`pesenson2009variational` """ L_reg = G.L + reg_eps * sparse.eye(G.N) K_reg = getattr(G.mr, 'K_reg', kron_reduction(L_reg, keep_inds)) green_kernel = getattr(G.mr, 'green_kernel', filters.Filter(G, lambda x: 1. / (reg_eps + x))) alpha = K_reg.dot(f_subsampled) try: Nv = np.shape(f_subsampled)[1] f_interpolated = np.zeros((G.N, Nv)) except IndexError: f_interpolated = np.zeros((G.N)) f_interpolated[keep_inds] = alpha return _analysis(green_kernel, f_interpolated, order=order, **kwargs)
python
def interpolate(G, f_subsampled, keep_inds, order=100, reg_eps=0.005, **kwargs): r"""Interpolate a graph signal. Parameters ---------- G : Graph f_subsampled : ndarray A graph signal on the graph G. keep_inds : ndarray List of indices on which the signal is sampled. order : int Degree of the Chebyshev approximation (default = 100). reg_eps : float The regularized graph Laplacian is $\bar{L}=L+\epsilon I$. A smaller epsilon may lead to better regularization, but will also require a higher order Chebyshev approximation. Returns ------- f_interpolated : ndarray Interpolated graph signal on the full vertex set of G. References ---------- See :cite:`pesenson2009variational` """ L_reg = G.L + reg_eps * sparse.eye(G.N) K_reg = getattr(G.mr, 'K_reg', kron_reduction(L_reg, keep_inds)) green_kernel = getattr(G.mr, 'green_kernel', filters.Filter(G, lambda x: 1. / (reg_eps + x))) alpha = K_reg.dot(f_subsampled) try: Nv = np.shape(f_subsampled)[1] f_interpolated = np.zeros((G.N, Nv)) except IndexError: f_interpolated = np.zeros((G.N)) f_interpolated[keep_inds] = alpha return _analysis(green_kernel, f_interpolated, order=order, **kwargs)
['def', 'interpolate', '(', 'G', ',', 'f_subsampled', ',', 'keep_inds', ',', 'order', '=', '100', ',', 'reg_eps', '=', '0.005', ',', '*', '*', 'kwargs', ')', ':', 'L_reg', '=', 'G', '.', 'L', '+', 'reg_eps', '*', 'sparse', '.', 'eye', '(', 'G', '.', 'N', ')', 'K_reg', '=', 'getattr', '(', 'G', '.', 'mr', ',', "'K_reg'", ',', 'kron_reduction', '(', 'L_reg', ',', 'keep_inds', ')', ')', 'green_kernel', '=', 'getattr', '(', 'G', '.', 'mr', ',', "'green_kernel'", ',', 'filters', '.', 'Filter', '(', 'G', ',', 'lambda', 'x', ':', '1.', '/', '(', 'reg_eps', '+', 'x', ')', ')', ')', 'alpha', '=', 'K_reg', '.', 'dot', '(', 'f_subsampled', ')', 'try', ':', 'Nv', '=', 'np', '.', 'shape', '(', 'f_subsampled', ')', '[', '1', ']', 'f_interpolated', '=', 'np', '.', 'zeros', '(', '(', 'G', '.', 'N', ',', 'Nv', ')', ')', 'except', 'IndexError', ':', 'f_interpolated', '=', 'np', '.', 'zeros', '(', '(', 'G', '.', 'N', ')', ')', 'f_interpolated', '[', 'keep_inds', ']', '=', 'alpha', 'return', '_analysis', '(', 'green_kernel', ',', 'f_interpolated', ',', 'order', '=', 'order', ',', '*', '*', 'kwargs', ')']
r"""Interpolate a graph signal. Parameters ---------- G : Graph f_subsampled : ndarray A graph signal on the graph G. keep_inds : ndarray List of indices on which the signal is sampled. order : int Degree of the Chebyshev approximation (default = 100). reg_eps : float The regularized graph Laplacian is $\bar{L}=L+\epsilon I$. A smaller epsilon may lead to better regularization, but will also require a higher order Chebyshev approximation. Returns ------- f_interpolated : ndarray Interpolated graph signal on the full vertex set of G. References ---------- See :cite:`pesenson2009variational`
['r', 'Interpolate', 'a', 'graph', 'signal', '.']
train
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/reduction.py#L145-L187
3,796
saltstack/salt
salt/states/npm.py
cache_cleaned
def cache_cleaned(name=None, user=None, force=False): ''' Ensure that the given package is not cached. If no package is specified, this ensures the entire cache is cleared. name The name of the package to remove from the cache, or None for all packages user The user to run NPM with force Force cleaning of cache. Required for npm@5 and greater .. versionadded:: 2016.11.6 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} specific_pkg = None try: cached_pkgs = __salt__['npm.cache_list'](path=name, runas=user) except (CommandExecutionError, CommandNotFoundError) as err: ret['result'] = False ret['comment'] = 'Error looking up cached {0}: {1}'.format( name or 'packages', err) return ret if name: all_cached_pkgs = __salt__['npm.cache_list'](path=None, runas=user) # The first package is always the cache path cache_root_path = all_cached_pkgs[0] specific_pkg = '{0}/{1}/'.format(cache_root_path, name) if specific_pkg not in cached_pkgs: ret['result'] = True ret['comment'] = 'Package {0} is not in the cache'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Cached {0} set to be removed'.format(name or 'packages') return ret if __salt__['npm.cache_clean'](path=name, runas=user): ret['result'] = True ret['changes'][name or 'cache'] = 'Removed' ret['comment'] = 'Cached {0} successfully removed'.format( name or 'packages' ) else: ret['result'] = False ret['comment'] = 'Error cleaning cached {0}'.format(name or 'packages') return ret
python
def cache_cleaned(name=None, user=None, force=False): ''' Ensure that the given package is not cached. If no package is specified, this ensures the entire cache is cleared. name The name of the package to remove from the cache, or None for all packages user The user to run NPM with force Force cleaning of cache. Required for npm@5 and greater .. versionadded:: 2016.11.6 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} specific_pkg = None try: cached_pkgs = __salt__['npm.cache_list'](path=name, runas=user) except (CommandExecutionError, CommandNotFoundError) as err: ret['result'] = False ret['comment'] = 'Error looking up cached {0}: {1}'.format( name or 'packages', err) return ret if name: all_cached_pkgs = __salt__['npm.cache_list'](path=None, runas=user) # The first package is always the cache path cache_root_path = all_cached_pkgs[0] specific_pkg = '{0}/{1}/'.format(cache_root_path, name) if specific_pkg not in cached_pkgs: ret['result'] = True ret['comment'] = 'Package {0} is not in the cache'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Cached {0} set to be removed'.format(name or 'packages') return ret if __salt__['npm.cache_clean'](path=name, runas=user): ret['result'] = True ret['changes'][name or 'cache'] = 'Removed' ret['comment'] = 'Cached {0} successfully removed'.format( name or 'packages' ) else: ret['result'] = False ret['comment'] = 'Error cleaning cached {0}'.format(name or 'packages') return ret
['def', 'cache_cleaned', '(', 'name', '=', 'None', ',', 'user', '=', 'None', ',', 'force', '=', 'False', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'None', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', 'specific_pkg', '=', 'None', 'try', ':', 'cached_pkgs', '=', '__salt__', '[', "'npm.cache_list'", ']', '(', 'path', '=', 'name', ',', 'runas', '=', 'user', ')', 'except', '(', 'CommandExecutionError', ',', 'CommandNotFoundError', ')', 'as', 'err', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'Error looking up cached {0}: {1}'", '.', 'format', '(', 'name', 'or', "'packages'", ',', 'err', ')', 'return', 'ret', 'if', 'name', ':', 'all_cached_pkgs', '=', '__salt__', '[', "'npm.cache_list'", ']', '(', 'path', '=', 'None', ',', 'runas', '=', 'user', ')', '# The first package is always the cache path', 'cache_root_path', '=', 'all_cached_pkgs', '[', '0', ']', 'specific_pkg', '=', "'{0}/{1}/'", '.', 'format', '(', 'cache_root_path', ',', 'name', ')', 'if', 'specific_pkg', 'not', 'in', 'cached_pkgs', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'Package {0} is not in the cache'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'result'", ']', '=', 'None', 'ret', '[', "'comment'", ']', '=', "'Cached {0} set to be removed'", '.', 'format', '(', 'name', 'or', "'packages'", ')', 'return', 'ret', 'if', '__salt__', '[', "'npm.cache_clean'", ']', '(', 'path', '=', 'name', ',', 'runas', '=', 'user', ')', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'changes'", ']', '[', 'name', 'or', "'cache'", ']', '=', "'Removed'", 'ret', '[', "'comment'", ']', '=', "'Cached {0} successfully removed'", '.', 'format', '(', 'name', 'or', "'packages'", ')', 'else', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'Error cleaning cached {0}'", '.', 'format', '(', 'name', 'or', "'packages'", ')', 'return', 'ret']
Ensure that the given package is not cached. If no package is specified, this ensures the entire cache is cleared. name The name of the package to remove from the cache, or None for all packages user The user to run NPM with force Force cleaning of cache. Required for npm@5 and greater .. versionadded:: 2016.11.6
['Ensure', 'that', 'the', 'given', 'package', 'is', 'not', 'cached', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/npm.py#L322-L378
3,797
ajdavis/mongo-mockup-db
mockupdb/__init__.py
_get_c_string
def _get_c_string(data, position): """Decode a BSON 'C' string to python unicode string.""" end = data.index(b"\x00", position) return _utf_8_decode(data[position:end], None, True)[0], end + 1
python
def _get_c_string(data, position): """Decode a BSON 'C' string to python unicode string.""" end = data.index(b"\x00", position) return _utf_8_decode(data[position:end], None, True)[0], end + 1
['def', '_get_c_string', '(', 'data', ',', 'position', ')', ':', 'end', '=', 'data', '.', 'index', '(', 'b"\\x00"', ',', 'position', ')', 'return', '_utf_8_decode', '(', 'data', '[', 'position', ':', 'end', ']', ',', 'None', ',', 'True', ')', '[', '0', ']', ',', 'end', '+', '1']
Decode a BSON 'C' string to python unicode string.
['Decode', 'a', 'BSON', 'C', 'string', 'to', 'python', 'unicode', 'string', '.']
train
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L286-L289
3,798
andrea-cuttone/geoplotlib
geoplotlib/utils.py
DataAccessObject.rename
def rename(self, mapping): """ Rename fields :param mapping: a dict in the format {'oldkey1': 'newkey1', ...} """ for old_key, new_key in mapping: self.dict[new_key] = self.dict[old_key] del self.dict[old_key]
python
def rename(self, mapping): """ Rename fields :param mapping: a dict in the format {'oldkey1': 'newkey1', ...} """ for old_key, new_key in mapping: self.dict[new_key] = self.dict[old_key] del self.dict[old_key]
['def', 'rename', '(', 'self', ',', 'mapping', ')', ':', 'for', 'old_key', ',', 'new_key', 'in', 'mapping', ':', 'self', '.', 'dict', '[', 'new_key', ']', '=', 'self', '.', 'dict', '[', 'old_key', ']', 'del', 'self', '.', 'dict', '[', 'old_key', ']']
Rename fields :param mapping: a dict in the format {'oldkey1': 'newkey1', ...}
['Rename', 'fields']
train
https://github.com/andrea-cuttone/geoplotlib/blob/a1c355bccec91cabd157569fad6daf53cf7687a1/geoplotlib/utils.py#L74-L82
3,799
Amsterdam/authorization_django
authorization_django/config.py
_rectify
def _rectify(settings): """ Rectify (and validate) the given settings using the functions in :data:`_settings_rectifiers`. """ for key, rectifier in _settings_rectifiers.items(): try: new_value = rectifier['func'](settings[key]) if new_value is False: raise AuthzConfigurationError( 'Error validating {}->{}: {}'.format( _settings_key, key, rectifier['errmsg'])) settings[key] = new_value except: raise AuthzConfigurationError( 'Error validating {}->{}: {}'.format( _settings_key, key, rectifier['errmsg']))
python
def _rectify(settings): """ Rectify (and validate) the given settings using the functions in :data:`_settings_rectifiers`. """ for key, rectifier in _settings_rectifiers.items(): try: new_value = rectifier['func'](settings[key]) if new_value is False: raise AuthzConfigurationError( 'Error validating {}->{}: {}'.format( _settings_key, key, rectifier['errmsg'])) settings[key] = new_value except: raise AuthzConfigurationError( 'Error validating {}->{}: {}'.format( _settings_key, key, rectifier['errmsg']))
['def', '_rectify', '(', 'settings', ')', ':', 'for', 'key', ',', 'rectifier', 'in', '_settings_rectifiers', '.', 'items', '(', ')', ':', 'try', ':', 'new_value', '=', 'rectifier', '[', "'func'", ']', '(', 'settings', '[', 'key', ']', ')', 'if', 'new_value', 'is', 'False', ':', 'raise', 'AuthzConfigurationError', '(', "'Error validating {}->{}: {}'", '.', 'format', '(', '_settings_key', ',', 'key', ',', 'rectifier', '[', "'errmsg'", ']', ')', ')', 'settings', '[', 'key', ']', '=', 'new_value', 'except', ':', 'raise', 'AuthzConfigurationError', '(', "'Error validating {}->{}: {}'", '.', 'format', '(', '_settings_key', ',', 'key', ',', 'rectifier', '[', "'errmsg'", ']', ')', ')']
Rectify (and validate) the given settings using the functions in :data:`_settings_rectifiers`.
['Rectify', '(', 'and', 'validate', ')', 'the', 'given', 'settings', 'using', 'the', 'functions', 'in', ':', 'data', ':', '_settings_rectifiers', '.']
train
https://github.com/Amsterdam/authorization_django/blob/71da52b38a7f5a16a2bde8f8ea97b3c11ccb1be1/authorization_django/config.py#L60-L75