code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def symlink_exists(self, symlink): """Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet. """ if not isinstance(symlink, basestring): raise TypeError("symlink can only be an instance of type basestring") exists = self._call("symlinkExists", in_p=[symlink]) return exists
def function[symlink_exists, parameter[self, symlink]]: constant[Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet. ] if <ast.UnaryOp object at 0x7da20e9b3b80> begin[:] <ast.Raise object at 0x7da20e9b3310> variable[exists] assign[=] call[name[self]._call, parameter[constant[symlinkExists]]] return[name[exists]]
keyword[def] identifier[symlink_exists] ( identifier[self] , identifier[symlink] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[symlink] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[exists] = identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[symlink] ]) keyword[return] identifier[exists]
def symlink_exists(self, symlink): """Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet. """ if not isinstance(symlink, basestring): raise TypeError('symlink can only be an instance of type basestring') # depends on [control=['if'], data=[]] exists = self._call('symlinkExists', in_p=[symlink]) return exists
def op( name, labels, predictions, num_thresholds=None, weights=None, display_name=None, description=None, collections=None): """Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf if num_thresholds is None: num_thresholds = _DEFAULT_NUM_THRESHOLDS if weights is None: weights = 1.0 dtype = predictions.dtype with tf.name_scope(name, values=[labels, predictions, weights]): tf.assert_type(labels, tf.bool) # We cast to float to ensure we have 0.0 or 1.0. f_labels = tf.cast(labels, dtype) # Ensure predictions are all in range [0.0, 1.0]. predictions = tf.minimum(1.0, tf.maximum(0.0, predictions)) # Get weighted true/false labels. true_labels = f_labels * weights false_labels = (1.0 - f_labels) * weights # Before we begin, flatten predictions. predictions = tf.reshape(predictions, [-1]) # Shape the labels so they are broadcast-able for later multiplication. true_labels = tf.reshape(true_labels, [-1, 1]) false_labels = tf.reshape(false_labels, [-1, 1]) # To compute TP/FP/TN/FN, we are measuring a binary classifier # C(t) = (predictions >= t) # at each threshold 't'. So we have # TP(t) = sum( C(t) * true_labels ) # FP(t) = sum( C(t) * false_labels ) # # But, computing C(t) requires computation for each t. To make it fast, # observe that C(t) is a cumulative integral, and so if we have # thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1} # where n = num_thresholds, and if we can compute the bucket function # B(i) = Sum( (predictions == t), t_i <= t < t{i+1} ) # then we get # C(t_i) = sum( B(j), j >= i ) # which is the reversed cumulative sum in tf.cumsum(). # # We can compute B(i) efficiently by taking advantage of the fact that # our thresholds are evenly distributed, in that # width = 1.0 / (num_thresholds - 1) # thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0] # Given a prediction value p, we can map it to its bucket by # bucket_index(p) = floor( p * (num_thresholds - 1) ) # so we can use tf.scatter_add() to update the buckets in one pass. # Compute the bucket indices for each prediction value. bucket_indices = tf.cast( tf.floor(predictions * (num_thresholds - 1)), tf.int32) # Bucket predictions. tp_buckets = tf.reduce_sum( input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels, axis=0) fp_buckets = tf.reduce_sum( input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels, axis=0) # Set up the cumulative sums to compute the actual metrics. tp = tf.cumsum(tp_buckets, reverse=True, name='tp') fp = tf.cumsum(fp_buckets, reverse=True, name='fp') # fn = sum(true_labels) - tp # = sum(tp_buckets) - tp # = tp[0] - tp # Similarly, # tn = fp[0] - fp tn = fp[0] - fp fn = tp[0] - tp precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn) return _create_tensor_summary( name, tp, fp, tn, fn, precision, recall, num_thresholds, display_name, description, collections)
def function[op, parameter[name, labels, predictions, num_thresholds, weights, display_name, description, collections]]: constant[Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall. ] import module[tensorflow.compat.v1] as alias[tf] if compare[name[num_thresholds] is constant[None]] begin[:] variable[num_thresholds] assign[=] name[_DEFAULT_NUM_THRESHOLDS] if compare[name[weights] is constant[None]] begin[:] variable[weights] assign[=] constant[1.0] variable[dtype] assign[=] name[predictions].dtype with call[name[tf].name_scope, parameter[name[name]]] begin[:] call[name[tf].assert_type, parameter[name[labels], name[tf].bool]] variable[f_labels] assign[=] call[name[tf].cast, parameter[name[labels], name[dtype]]] variable[predictions] assign[=] call[name[tf].minimum, parameter[constant[1.0], call[name[tf].maximum, parameter[constant[0.0], name[predictions]]]]] variable[true_labels] assign[=] binary_operation[name[f_labels] * name[weights]] variable[false_labels] assign[=] binary_operation[binary_operation[constant[1.0] - name[f_labels]] * name[weights]] variable[predictions] assign[=] call[name[tf].reshape, parameter[name[predictions], list[[<ast.UnaryOp object at 0x7da18f723b80>]]]] variable[true_labels] assign[=] call[name[tf].reshape, parameter[name[true_labels], list[[<ast.UnaryOp object at 0x7da18f722830>, <ast.Constant object at 0x7da18f720100>]]]] variable[false_labels] assign[=] call[name[tf].reshape, parameter[name[false_labels], list[[<ast.UnaryOp object at 0x7da18f723340>, <ast.Constant object at 0x7da18f720190>]]]] variable[bucket_indices] assign[=] call[name[tf].cast, parameter[call[name[tf].floor, parameter[binary_operation[name[predictions] * binary_operation[name[num_thresholds] - constant[1]]]]], name[tf].int32]] variable[tp_buckets] assign[=] call[name[tf].reduce_sum, parameter[]] variable[fp_buckets] assign[=] call[name[tf].reduce_sum, parameter[]] variable[tp] assign[=] call[name[tf].cumsum, parameter[name[tp_buckets]]] variable[fp] assign[=] call[name[tf].cumsum, parameter[name[fp_buckets]]] variable[tn] assign[=] binary_operation[call[name[fp]][constant[0]] - name[fp]] variable[fn] assign[=] binary_operation[call[name[tp]][constant[0]] - name[tp]] variable[precision] assign[=] binary_operation[name[tp] / call[name[tf].maximum, parameter[name[_MINIMUM_COUNT], binary_operation[name[tp] + name[fp]]]]] variable[recall] assign[=] binary_operation[name[tp] / call[name[tf].maximum, parameter[name[_MINIMUM_COUNT], binary_operation[name[tp] + name[fn]]]]] return[call[name[_create_tensor_summary], parameter[name[name], name[tp], name[fp], name[tn], name[fn], name[precision], name[recall], name[num_thresholds], name[display_name], name[description], name[collections]]]]
keyword[def] identifier[op] ( identifier[name] , identifier[labels] , identifier[predictions] , identifier[num_thresholds] = keyword[None] , identifier[weights] = keyword[None] , identifier[display_name] = keyword[None] , identifier[description] = keyword[None] , identifier[collections] = keyword[None] ): literal[string] keyword[import] identifier[tensorflow] . identifier[compat] . identifier[v1] keyword[as] identifier[tf] keyword[if] identifier[num_thresholds] keyword[is] keyword[None] : identifier[num_thresholds] = identifier[_DEFAULT_NUM_THRESHOLDS] keyword[if] identifier[weights] keyword[is] keyword[None] : identifier[weights] = literal[int] identifier[dtype] = identifier[predictions] . identifier[dtype] keyword[with] identifier[tf] . identifier[name_scope] ( identifier[name] , identifier[values] =[ identifier[labels] , identifier[predictions] , identifier[weights] ]): identifier[tf] . identifier[assert_type] ( identifier[labels] , identifier[tf] . identifier[bool] ) identifier[f_labels] = identifier[tf] . identifier[cast] ( identifier[labels] , identifier[dtype] ) identifier[predictions] = identifier[tf] . identifier[minimum] ( literal[int] , identifier[tf] . identifier[maximum] ( literal[int] , identifier[predictions] )) identifier[true_labels] = identifier[f_labels] * identifier[weights] identifier[false_labels] =( literal[int] - identifier[f_labels] )* identifier[weights] identifier[predictions] = identifier[tf] . identifier[reshape] ( identifier[predictions] ,[- literal[int] ]) identifier[true_labels] = identifier[tf] . identifier[reshape] ( identifier[true_labels] ,[- literal[int] , literal[int] ]) identifier[false_labels] = identifier[tf] . identifier[reshape] ( identifier[false_labels] ,[- literal[int] , literal[int] ]) identifier[bucket_indices] = identifier[tf] . identifier[cast] ( identifier[tf] . identifier[floor] ( identifier[predictions] *( identifier[num_thresholds] - literal[int] )), identifier[tf] . identifier[int32] ) identifier[tp_buckets] = identifier[tf] . identifier[reduce_sum] ( identifier[input_tensor] = identifier[tf] . identifier[one_hot] ( identifier[bucket_indices] , identifier[depth] = identifier[num_thresholds] )* identifier[true_labels] , identifier[axis] = literal[int] ) identifier[fp_buckets] = identifier[tf] . identifier[reduce_sum] ( identifier[input_tensor] = identifier[tf] . identifier[one_hot] ( identifier[bucket_indices] , identifier[depth] = identifier[num_thresholds] )* identifier[false_labels] , identifier[axis] = literal[int] ) identifier[tp] = identifier[tf] . identifier[cumsum] ( identifier[tp_buckets] , identifier[reverse] = keyword[True] , identifier[name] = literal[string] ) identifier[fp] = identifier[tf] . identifier[cumsum] ( identifier[fp_buckets] , identifier[reverse] = keyword[True] , identifier[name] = literal[string] ) identifier[tn] = identifier[fp] [ literal[int] ]- identifier[fp] identifier[fn] = identifier[tp] [ literal[int] ]- identifier[tp] identifier[precision] = identifier[tp] / identifier[tf] . identifier[maximum] ( identifier[_MINIMUM_COUNT] , identifier[tp] + identifier[fp] ) identifier[recall] = identifier[tp] / identifier[tf] . identifier[maximum] ( identifier[_MINIMUM_COUNT] , identifier[tp] + identifier[fn] ) keyword[return] identifier[_create_tensor_summary] ( identifier[name] , identifier[tp] , identifier[fp] , identifier[tn] , identifier[fn] , identifier[precision] , identifier[recall] , identifier[num_thresholds] , identifier[display_name] , identifier[description] , identifier[collections] )
def op(name, labels, predictions, num_thresholds=None, weights=None, display_name=None, description=None, collections=None): """Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf if num_thresholds is None: num_thresholds = _DEFAULT_NUM_THRESHOLDS # depends on [control=['if'], data=['num_thresholds']] if weights is None: weights = 1.0 # depends on [control=['if'], data=['weights']] dtype = predictions.dtype with tf.name_scope(name, values=[labels, predictions, weights]): tf.assert_type(labels, tf.bool) # We cast to float to ensure we have 0.0 or 1.0. f_labels = tf.cast(labels, dtype) # Ensure predictions are all in range [0.0, 1.0]. predictions = tf.minimum(1.0, tf.maximum(0.0, predictions)) # Get weighted true/false labels. true_labels = f_labels * weights false_labels = (1.0 - f_labels) * weights # Before we begin, flatten predictions. predictions = tf.reshape(predictions, [-1]) # Shape the labels so they are broadcast-able for later multiplication. true_labels = tf.reshape(true_labels, [-1, 1]) false_labels = tf.reshape(false_labels, [-1, 1]) # To compute TP/FP/TN/FN, we are measuring a binary classifier # C(t) = (predictions >= t) # at each threshold 't'. So we have # TP(t) = sum( C(t) * true_labels ) # FP(t) = sum( C(t) * false_labels ) # # But, computing C(t) requires computation for each t. To make it fast, # observe that C(t) is a cumulative integral, and so if we have # thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1} # where n = num_thresholds, and if we can compute the bucket function # B(i) = Sum( (predictions == t), t_i <= t < t{i+1} ) # then we get # C(t_i) = sum( B(j), j >= i ) # which is the reversed cumulative sum in tf.cumsum(). # # We can compute B(i) efficiently by taking advantage of the fact that # our thresholds are evenly distributed, in that # width = 1.0 / (num_thresholds - 1) # thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0] # Given a prediction value p, we can map it to its bucket by # bucket_index(p) = floor( p * (num_thresholds - 1) ) # so we can use tf.scatter_add() to update the buckets in one pass. # Compute the bucket indices for each prediction value. bucket_indices = tf.cast(tf.floor(predictions * (num_thresholds - 1)), tf.int32) # Bucket predictions. tp_buckets = tf.reduce_sum(input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels, axis=0) fp_buckets = tf.reduce_sum(input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels, axis=0) # Set up the cumulative sums to compute the actual metrics. tp = tf.cumsum(tp_buckets, reverse=True, name='tp') fp = tf.cumsum(fp_buckets, reverse=True, name='fp') # fn = sum(true_labels) - tp # = sum(tp_buckets) - tp # = tp[0] - tp # Similarly, # tn = fp[0] - fp tn = fp[0] - fp fn = tp[0] - tp precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn) return _create_tensor_summary(name, tp, fp, tn, fn, precision, recall, num_thresholds, display_name, description, collections) # depends on [control=['with'], data=[]]
def add_dimension(dimension,**kwargs): """ Add the dimension defined into the object "dimension" to the DB If dimension["project_id"] is None it means that the dimension is global, otherwise is property of a project If the dimension exists emits an exception """ if numpy.isscalar(dimension): # If it is a scalar, converts to an Object dimension = {'name': dimension} new_dimension = Dimension() new_dimension.name = dimension["name"] if "description" in dimension and dimension["description"] is not None: new_dimension.description = dimension["description"] if "project_id" in dimension and dimension["project_id"] is not None: new_dimension.project_id = dimension["project_id"] # Save on DB db.DBSession.add(new_dimension) db.DBSession.flush() # Load all the record db_dimension = db.DBSession.query(Dimension).filter(Dimension.id==new_dimension.id).one() return JSONObject(db_dimension)
def function[add_dimension, parameter[dimension]]: constant[ Add the dimension defined into the object "dimension" to the DB If dimension["project_id"] is None it means that the dimension is global, otherwise is property of a project If the dimension exists emits an exception ] if call[name[numpy].isscalar, parameter[name[dimension]]] begin[:] variable[dimension] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9c60>], [<ast.Name object at 0x7da18bcca290>]] variable[new_dimension] assign[=] call[name[Dimension], parameter[]] name[new_dimension].name assign[=] call[name[dimension]][constant[name]] if <ast.BoolOp object at 0x7da18bccb7c0> begin[:] name[new_dimension].description assign[=] call[name[dimension]][constant[description]] if <ast.BoolOp object at 0x7da18bccbe80> begin[:] name[new_dimension].project_id assign[=] call[name[dimension]][constant[project_id]] call[name[db].DBSession.add, parameter[name[new_dimension]]] call[name[db].DBSession.flush, parameter[]] variable[db_dimension] assign[=] call[call[call[name[db].DBSession.query, parameter[name[Dimension]]].filter, parameter[compare[name[Dimension].id equal[==] name[new_dimension].id]]].one, parameter[]] return[call[name[JSONObject], parameter[name[db_dimension]]]]
keyword[def] identifier[add_dimension] ( identifier[dimension] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[numpy] . identifier[isscalar] ( identifier[dimension] ): identifier[dimension] ={ literal[string] : identifier[dimension] } identifier[new_dimension] = identifier[Dimension] () identifier[new_dimension] . identifier[name] = identifier[dimension] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[dimension] keyword[and] identifier[dimension] [ literal[string] ] keyword[is] keyword[not] keyword[None] : identifier[new_dimension] . identifier[description] = identifier[dimension] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[dimension] keyword[and] identifier[dimension] [ literal[string] ] keyword[is] keyword[not] keyword[None] : identifier[new_dimension] . identifier[project_id] = identifier[dimension] [ literal[string] ] identifier[db] . identifier[DBSession] . identifier[add] ( identifier[new_dimension] ) identifier[db] . identifier[DBSession] . identifier[flush] () identifier[db_dimension] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Dimension] ). identifier[filter] ( identifier[Dimension] . identifier[id] == identifier[new_dimension] . identifier[id] ). identifier[one] () keyword[return] identifier[JSONObject] ( identifier[db_dimension] )
def add_dimension(dimension, **kwargs): """ Add the dimension defined into the object "dimension" to the DB If dimension["project_id"] is None it means that the dimension is global, otherwise is property of a project If the dimension exists emits an exception """ if numpy.isscalar(dimension): # If it is a scalar, converts to an Object dimension = {'name': dimension} # depends on [control=['if'], data=[]] new_dimension = Dimension() new_dimension.name = dimension['name'] if 'description' in dimension and dimension['description'] is not None: new_dimension.description = dimension['description'] # depends on [control=['if'], data=[]] if 'project_id' in dimension and dimension['project_id'] is not None: new_dimension.project_id = dimension['project_id'] # depends on [control=['if'], data=[]] # Save on DB db.DBSession.add(new_dimension) db.DBSession.flush() # Load all the record db_dimension = db.DBSession.query(Dimension).filter(Dimension.id == new_dimension.id).one() return JSONObject(db_dimension)
def put_integration(restApiId=None, resourceId=None, httpMethod=None, type=None, integrationHttpMethod=None, uri=None, credentials=None, requestParameters=None, requestTemplates=None, passthroughBehavior=None, cacheNamespace=None, cacheKeyParameters=None, contentHandling=None): """ Represents a put integration. See also: AWS API Documentation :example: response = client.put_integration( restApiId='string', resourceId='string', httpMethod='string', type='HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', integrationHttpMethod='string', uri='string', credentials='string', requestParameters={ 'string': 'string' }, requestTemplates={ 'string': 'string' }, passthroughBehavior='string', cacheNamespace='string', cacheKeyParameters=[ 'string', ], contentHandling='CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' ) :type restApiId: string :param restApiId: [REQUIRED] Specifies a put integration request's API identifier. :type resourceId: string :param resourceId: [REQUIRED] Specifies a put integration request's resource ID. :type httpMethod: string :param httpMethod: [REQUIRED] Specifies a put integration request's HTTP method. :type type: string :param type: [REQUIRED] Specifies a put integration input's type. :type integrationHttpMethod: string :param integrationHttpMethod: Specifies a put integration HTTP method. When the integration type is HTTP or AWS, this field is required. :type uri: string :param uri: Specifies a put integration input's Uniform Resource Identifier (URI). When the integration type is HTTP or AWS, this field is required. For integration with Lambda as an AWS service proxy, this value is of the 'arn:aws:apigateway:region:lambda:path/2015-03-31/functions/functionArn/invocations' format. :type credentials: string :param credentials: Specifies whether credentials are required for a put integration. :type requestParameters: dict :param requestParameters: A key-value map specifying request parameters that are passed from the method request to the back end. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the back end. The method request parameter value must match the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name must be a valid and unique method request parameter name. (string) -- (string) -- :type requestTemplates: dict :param requestTemplates: Represents a map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. The content type value is the key in this map, and the template (as a String) is the value. (string) -- (string) -- :type passthroughBehavior: string :param passthroughBehavior: Specifies the pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the requestTemplates property on the Integration resource. There are three valid values: WHEN_NO_MATCH , WHEN_NO_TEMPLATES , and NEVER . WHEN_NO_MATCH passes the request body for unmapped content types through to the integration back end without transformation. NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media Type' response. WHEN_NO_TEMPLATES allows pass-through when the integration has NO content types mapped to templates. However if there is at least one content type defined, unmapped content types will be rejected with the same 415 response. :type cacheNamespace: string :param cacheNamespace: Specifies a put integration input's cache namespace. :type cacheKeyParameters: list :param cacheKeyParameters: Specifies a put integration input's cache key parameters. (string) -- :type contentHandling: string :param contentHandling: Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT , with the following behaviors: CONVERT_TO_BINARY : Converts a request payload from a Base64-encoded string to the corresponding binary blob. CONVERT_TO_TEXT : Converts a request payload from a binary blob to a Base64-encoded string. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. :rtype: dict :return: { 'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', 'httpMethod': 'string', 'uri': 'string', 'credentials': 'string', 'requestParameters': { 'string': 'string' }, 'requestTemplates': { 'string': 'string' }, 'passthroughBehavior': 'string', 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT', 'cacheNamespace': 'string', 'cacheKeyParameters': [ 'string', ], 'integrationResponses': { 'string': { 'statusCode': 'string', 'selectionPattern': 'string', 'responseParameters': { 'string': 'string' }, 'responseTemplates': { 'string': 'string' }, 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' } } } :returns: (string) -- (string) -- """ pass
def function[put_integration, parameter[restApiId, resourceId, httpMethod, type, integrationHttpMethod, uri, credentials, requestParameters, requestTemplates, passthroughBehavior, cacheNamespace, cacheKeyParameters, contentHandling]]: constant[ Represents a put integration. See also: AWS API Documentation :example: response = client.put_integration( restApiId='string', resourceId='string', httpMethod='string', type='HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', integrationHttpMethod='string', uri='string', credentials='string', requestParameters={ 'string': 'string' }, requestTemplates={ 'string': 'string' }, passthroughBehavior='string', cacheNamespace='string', cacheKeyParameters=[ 'string', ], contentHandling='CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' ) :type restApiId: string :param restApiId: [REQUIRED] Specifies a put integration request's API identifier. :type resourceId: string :param resourceId: [REQUIRED] Specifies a put integration request's resource ID. :type httpMethod: string :param httpMethod: [REQUIRED] Specifies a put integration request's HTTP method. :type type: string :param type: [REQUIRED] Specifies a put integration input's type. :type integrationHttpMethod: string :param integrationHttpMethod: Specifies a put integration HTTP method. When the integration type is HTTP or AWS, this field is required. :type uri: string :param uri: Specifies a put integration input's Uniform Resource Identifier (URI). When the integration type is HTTP or AWS, this field is required. For integration with Lambda as an AWS service proxy, this value is of the 'arn:aws:apigateway:region:lambda:path/2015-03-31/functions/functionArn/invocations' format. :type credentials: string :param credentials: Specifies whether credentials are required for a put integration. :type requestParameters: dict :param requestParameters: A key-value map specifying request parameters that are passed from the method request to the back end. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the back end. The method request parameter value must match the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name must be a valid and unique method request parameter name. (string) -- (string) -- :type requestTemplates: dict :param requestTemplates: Represents a map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. The content type value is the key in this map, and the template (as a String) is the value. (string) -- (string) -- :type passthroughBehavior: string :param passthroughBehavior: Specifies the pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the requestTemplates property on the Integration resource. There are three valid values: WHEN_NO_MATCH , WHEN_NO_TEMPLATES , and NEVER . WHEN_NO_MATCH passes the request body for unmapped content types through to the integration back end without transformation. NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media Type' response. WHEN_NO_TEMPLATES allows pass-through when the integration has NO content types mapped to templates. However if there is at least one content type defined, unmapped content types will be rejected with the same 415 response. :type cacheNamespace: string :param cacheNamespace: Specifies a put integration input's cache namespace. :type cacheKeyParameters: list :param cacheKeyParameters: Specifies a put integration input's cache key parameters. (string) -- :type contentHandling: string :param contentHandling: Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT , with the following behaviors: CONVERT_TO_BINARY : Converts a request payload from a Base64-encoded string to the corresponding binary blob. CONVERT_TO_TEXT : Converts a request payload from a binary blob to a Base64-encoded string. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. :rtype: dict :return: { 'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', 'httpMethod': 'string', 'uri': 'string', 'credentials': 'string', 'requestParameters': { 'string': 'string' }, 'requestTemplates': { 'string': 'string' }, 'passthroughBehavior': 'string', 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT', 'cacheNamespace': 'string', 'cacheKeyParameters': [ 'string', ], 'integrationResponses': { 'string': { 'statusCode': 'string', 'selectionPattern': 'string', 'responseParameters': { 'string': 'string' }, 'responseTemplates': { 'string': 'string' }, 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' } } } :returns: (string) -- (string) -- ] pass
keyword[def] identifier[put_integration] ( identifier[restApiId] = keyword[None] , identifier[resourceId] = keyword[None] , identifier[httpMethod] = keyword[None] , identifier[type] = keyword[None] , identifier[integrationHttpMethod] = keyword[None] , identifier[uri] = keyword[None] , identifier[credentials] = keyword[None] , identifier[requestParameters] = keyword[None] , identifier[requestTemplates] = keyword[None] , identifier[passthroughBehavior] = keyword[None] , identifier[cacheNamespace] = keyword[None] , identifier[cacheKeyParameters] = keyword[None] , identifier[contentHandling] = keyword[None] ): literal[string] keyword[pass]
def put_integration(restApiId=None, resourceId=None, httpMethod=None, type=None, integrationHttpMethod=None, uri=None, credentials=None, requestParameters=None, requestTemplates=None, passthroughBehavior=None, cacheNamespace=None, cacheKeyParameters=None, contentHandling=None): """ Represents a put integration. See also: AWS API Documentation :example: response = client.put_integration( restApiId='string', resourceId='string', httpMethod='string', type='HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', integrationHttpMethod='string', uri='string', credentials='string', requestParameters={ 'string': 'string' }, requestTemplates={ 'string': 'string' }, passthroughBehavior='string', cacheNamespace='string', cacheKeyParameters=[ 'string', ], contentHandling='CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' ) :type restApiId: string :param restApiId: [REQUIRED] Specifies a put integration request's API identifier. :type resourceId: string :param resourceId: [REQUIRED] Specifies a put integration request's resource ID. :type httpMethod: string :param httpMethod: [REQUIRED] Specifies a put integration request's HTTP method. :type type: string :param type: [REQUIRED] Specifies a put integration input's type. :type integrationHttpMethod: string :param integrationHttpMethod: Specifies a put integration HTTP method. When the integration type is HTTP or AWS, this field is required. :type uri: string :param uri: Specifies a put integration input's Uniform Resource Identifier (URI). When the integration type is HTTP or AWS, this field is required. For integration with Lambda as an AWS service proxy, this value is of the 'arn:aws:apigateway:region:lambda:path/2015-03-31/functions/functionArn/invocations' format. :type credentials: string :param credentials: Specifies whether credentials are required for a put integration. :type requestParameters: dict :param requestParameters: A key-value map specifying request parameters that are passed from the method request to the back end. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the back end. The method request parameter value must match the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name must be a valid and unique method request parameter name. (string) -- (string) -- :type requestTemplates: dict :param requestTemplates: Represents a map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. The content type value is the key in this map, and the template (as a String) is the value. (string) -- (string) -- :type passthroughBehavior: string :param passthroughBehavior: Specifies the pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the requestTemplates property on the Integration resource. There are three valid values: WHEN_NO_MATCH , WHEN_NO_TEMPLATES , and NEVER . WHEN_NO_MATCH passes the request body for unmapped content types through to the integration back end without transformation. NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media Type' response. WHEN_NO_TEMPLATES allows pass-through when the integration has NO content types mapped to templates. However if there is at least one content type defined, unmapped content types will be rejected with the same 415 response. :type cacheNamespace: string :param cacheNamespace: Specifies a put integration input's cache namespace. :type cacheKeyParameters: list :param cacheKeyParameters: Specifies a put integration input's cache key parameters. (string) -- :type contentHandling: string :param contentHandling: Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT , with the following behaviors: CONVERT_TO_BINARY : Converts a request payload from a Base64-encoded string to the corresponding binary blob. CONVERT_TO_TEXT : Converts a request payload from a binary blob to a Base64-encoded string. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. :rtype: dict :return: { 'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', 'httpMethod': 'string', 'uri': 'string', 'credentials': 'string', 'requestParameters': { 'string': 'string' }, 'requestTemplates': { 'string': 'string' }, 'passthroughBehavior': 'string', 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT', 'cacheNamespace': 'string', 'cacheKeyParameters': [ 'string', ], 'integrationResponses': { 'string': { 'statusCode': 'string', 'selectionPattern': 'string', 'responseParameters': { 'string': 'string' }, 'responseTemplates': { 'string': 'string' }, 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' } } } :returns: (string) -- (string) -- """ pass
def check_shm(): """ Check /dev/shm right permissions :return: None """ import stat shm_path = '/dev/shm' if os.name == 'posix' and os.path.exists(shm_path): # We get the access rights, and we check them mode = stat.S_IMODE(os.lstat(shm_path)[stat.ST_MODE]) if not mode & stat.S_IWUSR or not mode & stat.S_IRUSR: logger.critical("The directory %s is not writable or readable." "Please make it read writable: %s", shm_path, shm_path) print("The directory %s is not writable or readable." "Please make it read writable: %s" % (shm_path, shm_path)) sys.exit(2)
def function[check_shm, parameter[]]: constant[ Check /dev/shm right permissions :return: None ] import module[stat] variable[shm_path] assign[=] constant[/dev/shm] if <ast.BoolOp object at 0x7da207f02650> begin[:] variable[mode] assign[=] call[name[stat].S_IMODE, parameter[call[call[name[os].lstat, parameter[name[shm_path]]]][name[stat].ST_MODE]]] if <ast.BoolOp object at 0x7da207f009a0> begin[:] call[name[logger].critical, parameter[constant[The directory %s is not writable or readable.Please make it read writable: %s], name[shm_path], name[shm_path]]] call[name[print], parameter[binary_operation[constant[The directory %s is not writable or readable.Please make it read writable: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18fe91c60>, <ast.Name object at 0x7da18fe926b0>]]]]] call[name[sys].exit, parameter[constant[2]]]
keyword[def] identifier[check_shm] (): literal[string] keyword[import] identifier[stat] identifier[shm_path] = literal[string] keyword[if] identifier[os] . identifier[name] == literal[string] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[shm_path] ): identifier[mode] = identifier[stat] . identifier[S_IMODE] ( identifier[os] . identifier[lstat] ( identifier[shm_path] )[ identifier[stat] . identifier[ST_MODE] ]) keyword[if] keyword[not] identifier[mode] & identifier[stat] . identifier[S_IWUSR] keyword[or] keyword[not] identifier[mode] & identifier[stat] . identifier[S_IRUSR] : identifier[logger] . identifier[critical] ( literal[string] literal[string] , identifier[shm_path] , identifier[shm_path] ) identifier[print] ( literal[string] literal[string] %( identifier[shm_path] , identifier[shm_path] )) identifier[sys] . identifier[exit] ( literal[int] )
def check_shm(): """ Check /dev/shm right permissions :return: None """ import stat shm_path = '/dev/shm' if os.name == 'posix' and os.path.exists(shm_path): # We get the access rights, and we check them mode = stat.S_IMODE(os.lstat(shm_path)[stat.ST_MODE]) if not mode & stat.S_IWUSR or not mode & stat.S_IRUSR: logger.critical('The directory %s is not writable or readable.Please make it read writable: %s', shm_path, shm_path) print('The directory %s is not writable or readable.Please make it read writable: %s' % (shm_path, shm_path)) sys.exit(2) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def string(self) -> str: """Return str(self).""" start, end = self._span return self._lststr[0][start:end]
def function[string, parameter[self]]: constant[Return str(self).] <ast.Tuple object at 0x7da20c991990> assign[=] name[self]._span return[call[call[name[self]._lststr][constant[0]]][<ast.Slice object at 0x7da1b025d060>]]
keyword[def] identifier[string] ( identifier[self] )-> identifier[str] : literal[string] identifier[start] , identifier[end] = identifier[self] . identifier[_span] keyword[return] identifier[self] . identifier[_lststr] [ literal[int] ][ identifier[start] : identifier[end] ]
def string(self) -> str: """Return str(self).""" (start, end) = self._span return self._lststr[0][start:end]
def get_mask_selection(self, selection, out=None, fields=None): """Retrieve a selection of individual items, by providing a Boolean array of the same shape as the array against which the selection is being made, where True values indicate a selected item. Parameters ---------- selection : ndarray, bool A Boolean array of the same shape as the array against which the selection is being made. out : ndarray, optional If given, load the selected data directly into this array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. Returns ------- out : ndarray A NumPy array containing the data for the requested selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> z = zarr.array(np.arange(100).reshape(10, 10)) Retrieve items by specifying a maks:: >>> sel = np.zeros_like(z, dtype=bool) >>> sel[1, 1] = True >>> sel[4, 4] = True >>> z.get_mask_selection(sel) array([11, 44]) For convenience, the mask selection functionality is also available via the `vindex` property, e.g.:: >>> z.vindex[sel] array([11, 44]) Notes ----- Mask indexing is a form of vectorized or inner indexing, and is equivalent to coordinate indexing. Internally the mask array is converted to coordinate arrays by calling `np.nonzero`. See Also -------- get_basic_selection, set_basic_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, set_coordinate_selection, vindex, oindex, __getitem__, __setitem__ """ # refresh metadata if not self._cache_metadata: self._load_metadata() # check args check_fields(fields, self._dtype) # setup indexer indexer = MaskIndexer(selection, self) return self._get_selection(indexer=indexer, out=out, fields=fields)
def function[get_mask_selection, parameter[self, selection, out, fields]]: constant[Retrieve a selection of individual items, by providing a Boolean array of the same shape as the array against which the selection is being made, where True values indicate a selected item. Parameters ---------- selection : ndarray, bool A Boolean array of the same shape as the array against which the selection is being made. out : ndarray, optional If given, load the selected data directly into this array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. Returns ------- out : ndarray A NumPy array containing the data for the requested selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> z = zarr.array(np.arange(100).reshape(10, 10)) Retrieve items by specifying a maks:: >>> sel = np.zeros_like(z, dtype=bool) >>> sel[1, 1] = True >>> sel[4, 4] = True >>> z.get_mask_selection(sel) array([11, 44]) For convenience, the mask selection functionality is also available via the `vindex` property, e.g.:: >>> z.vindex[sel] array([11, 44]) Notes ----- Mask indexing is a form of vectorized or inner indexing, and is equivalent to coordinate indexing. Internally the mask array is converted to coordinate arrays by calling `np.nonzero`. See Also -------- get_basic_selection, set_basic_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, set_coordinate_selection, vindex, oindex, __getitem__, __setitem__ ] if <ast.UnaryOp object at 0x7da1b19ed7b0> begin[:] call[name[self]._load_metadata, parameter[]] call[name[check_fields], parameter[name[fields], name[self]._dtype]] variable[indexer] assign[=] call[name[MaskIndexer], parameter[name[selection], name[self]]] return[call[name[self]._get_selection, parameter[]]]
keyword[def] identifier[get_mask_selection] ( identifier[self] , identifier[selection] , identifier[out] = keyword[None] , identifier[fields] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_cache_metadata] : identifier[self] . identifier[_load_metadata] () identifier[check_fields] ( identifier[fields] , identifier[self] . identifier[_dtype] ) identifier[indexer] = identifier[MaskIndexer] ( identifier[selection] , identifier[self] ) keyword[return] identifier[self] . identifier[_get_selection] ( identifier[indexer] = identifier[indexer] , identifier[out] = identifier[out] , identifier[fields] = identifier[fields] )
def get_mask_selection(self, selection, out=None, fields=None): """Retrieve a selection of individual items, by providing a Boolean array of the same shape as the array against which the selection is being made, where True values indicate a selected item. Parameters ---------- selection : ndarray, bool A Boolean array of the same shape as the array against which the selection is being made. out : ndarray, optional If given, load the selected data directly into this array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. Returns ------- out : ndarray A NumPy array containing the data for the requested selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> z = zarr.array(np.arange(100).reshape(10, 10)) Retrieve items by specifying a maks:: >>> sel = np.zeros_like(z, dtype=bool) >>> sel[1, 1] = True >>> sel[4, 4] = True >>> z.get_mask_selection(sel) array([11, 44]) For convenience, the mask selection functionality is also available via the `vindex` property, e.g.:: >>> z.vindex[sel] array([11, 44]) Notes ----- Mask indexing is a form of vectorized or inner indexing, and is equivalent to coordinate indexing. Internally the mask array is converted to coordinate arrays by calling `np.nonzero`. See Also -------- get_basic_selection, set_basic_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, set_coordinate_selection, vindex, oindex, __getitem__, __setitem__ """ # refresh metadata if not self._cache_metadata: self._load_metadata() # depends on [control=['if'], data=[]] # check args check_fields(fields, self._dtype) # setup indexer indexer = MaskIndexer(selection, self) return self._get_selection(indexer=indexer, out=out, fields=fields)
def set_voltage(self, volt, ramp=False): """Sets the output voltage of monsoon. Args: volt: Voltage to set the output to. ramp: If true, the output voltage will be increased gradually to prevent tripping Monsoon overvoltage. """ if ramp: self.mon.RampVoltage(self.mon.start_voltage, volt) else: self.mon.SetVoltage(volt)
def function[set_voltage, parameter[self, volt, ramp]]: constant[Sets the output voltage of monsoon. Args: volt: Voltage to set the output to. ramp: If true, the output voltage will be increased gradually to prevent tripping Monsoon overvoltage. ] if name[ramp] begin[:] call[name[self].mon.RampVoltage, parameter[name[self].mon.start_voltage, name[volt]]]
keyword[def] identifier[set_voltage] ( identifier[self] , identifier[volt] , identifier[ramp] = keyword[False] ): literal[string] keyword[if] identifier[ramp] : identifier[self] . identifier[mon] . identifier[RampVoltage] ( identifier[self] . identifier[mon] . identifier[start_voltage] , identifier[volt] ) keyword[else] : identifier[self] . identifier[mon] . identifier[SetVoltage] ( identifier[volt] )
def set_voltage(self, volt, ramp=False): """Sets the output voltage of monsoon. Args: volt: Voltage to set the output to. ramp: If true, the output voltage will be increased gradually to prevent tripping Monsoon overvoltage. """ if ramp: self.mon.RampVoltage(self.mon.start_voltage, volt) # depends on [control=['if'], data=[]] else: self.mon.SetVoltage(volt)
def run(self, messages): """Returns some analytics about this autograder run.""" statistics = {} statistics['time'] = str(datetime.now()) statistics['time-utc'] = str(datetime.utcnow()) statistics['unlock'] = self.args.unlock if self.args.question: statistics['question'] = [t.name for t in self.assignment.specified_tests] statistics['requested-questions'] = self.args.question if self.args.suite: statistics['requested-suite'] = self.args.suite if self.args.case: statistics['requested-case'] = self.args.case messages['analytics'] = statistics self.log_run(messages)
def function[run, parameter[self, messages]]: constant[Returns some analytics about this autograder run.] variable[statistics] assign[=] dictionary[[], []] call[name[statistics]][constant[time]] assign[=] call[name[str], parameter[call[name[datetime].now, parameter[]]]] call[name[statistics]][constant[time-utc]] assign[=] call[name[str], parameter[call[name[datetime].utcnow, parameter[]]]] call[name[statistics]][constant[unlock]] assign[=] name[self].args.unlock if name[self].args.question begin[:] call[name[statistics]][constant[question]] assign[=] <ast.ListComp object at 0x7da1b26af880> call[name[statistics]][constant[requested-questions]] assign[=] name[self].args.question if name[self].args.suite begin[:] call[name[statistics]][constant[requested-suite]] assign[=] name[self].args.suite if name[self].args.case begin[:] call[name[statistics]][constant[requested-case]] assign[=] name[self].args.case call[name[messages]][constant[analytics]] assign[=] name[statistics] call[name[self].log_run, parameter[name[messages]]]
keyword[def] identifier[run] ( identifier[self] , identifier[messages] ): literal[string] identifier[statistics] ={} identifier[statistics] [ literal[string] ]= identifier[str] ( identifier[datetime] . identifier[now] ()) identifier[statistics] [ literal[string] ]= identifier[str] ( identifier[datetime] . identifier[utcnow] ()) identifier[statistics] [ literal[string] ]= identifier[self] . identifier[args] . identifier[unlock] keyword[if] identifier[self] . identifier[args] . identifier[question] : identifier[statistics] [ literal[string] ]=[ identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[assignment] . identifier[specified_tests] ] identifier[statistics] [ literal[string] ]= identifier[self] . identifier[args] . identifier[question] keyword[if] identifier[self] . identifier[args] . identifier[suite] : identifier[statistics] [ literal[string] ]= identifier[self] . identifier[args] . identifier[suite] keyword[if] identifier[self] . identifier[args] . identifier[case] : identifier[statistics] [ literal[string] ]= identifier[self] . identifier[args] . identifier[case] identifier[messages] [ literal[string] ]= identifier[statistics] identifier[self] . identifier[log_run] ( identifier[messages] )
def run(self, messages): """Returns some analytics about this autograder run.""" statistics = {} statistics['time'] = str(datetime.now()) statistics['time-utc'] = str(datetime.utcnow()) statistics['unlock'] = self.args.unlock if self.args.question: statistics['question'] = [t.name for t in self.assignment.specified_tests] statistics['requested-questions'] = self.args.question if self.args.suite: statistics['requested-suite'] = self.args.suite # depends on [control=['if'], data=[]] if self.args.case: statistics['requested-case'] = self.args.case # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] messages['analytics'] = statistics self.log_run(messages)
def negated(input_words, include_nt=True): """ Determine if input contains negation words """ input_words = [str(w).lower() for w in input_words] neg_words = [] neg_words.extend(NEGATE) for word in neg_words: if word in input_words: return True if include_nt: for word in input_words: if "n't" in word: return True if "least" in input_words: i = input_words.index("least") if i > 0 and input_words[i - 1] != "at": return True return False
def function[negated, parameter[input_words, include_nt]]: constant[ Determine if input contains negation words ] variable[input_words] assign[=] <ast.ListComp object at 0x7da1b20555d0> variable[neg_words] assign[=] list[[]] call[name[neg_words].extend, parameter[name[NEGATE]]] for taget[name[word]] in starred[name[neg_words]] begin[:] if compare[name[word] in name[input_words]] begin[:] return[constant[True]] if name[include_nt] begin[:] for taget[name[word]] in starred[name[input_words]] begin[:] if compare[constant[n't] in name[word]] begin[:] return[constant[True]] if compare[constant[least] in name[input_words]] begin[:] variable[i] assign[=] call[name[input_words].index, parameter[constant[least]]] if <ast.BoolOp object at 0x7da1b20556f0> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[negated] ( identifier[input_words] , identifier[include_nt] = keyword[True] ): literal[string] identifier[input_words] =[ identifier[str] ( identifier[w] ). identifier[lower] () keyword[for] identifier[w] keyword[in] identifier[input_words] ] identifier[neg_words] =[] identifier[neg_words] . identifier[extend] ( identifier[NEGATE] ) keyword[for] identifier[word] keyword[in] identifier[neg_words] : keyword[if] identifier[word] keyword[in] identifier[input_words] : keyword[return] keyword[True] keyword[if] identifier[include_nt] : keyword[for] identifier[word] keyword[in] identifier[input_words] : keyword[if] literal[string] keyword[in] identifier[word] : keyword[return] keyword[True] keyword[if] literal[string] keyword[in] identifier[input_words] : identifier[i] = identifier[input_words] . identifier[index] ( literal[string] ) keyword[if] identifier[i] > literal[int] keyword[and] identifier[input_words] [ identifier[i] - literal[int] ]!= literal[string] : keyword[return] keyword[True] keyword[return] keyword[False]
def negated(input_words, include_nt=True): """ Determine if input contains negation words """ input_words = [str(w).lower() for w in input_words] neg_words = [] neg_words.extend(NEGATE) for word in neg_words: if word in input_words: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] if include_nt: for word in input_words: if "n't" in word: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]] if 'least' in input_words: i = input_words.index('least') if i > 0 and input_words[i - 1] != 'at': return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['input_words']] return False
def opt_display(self, display): """ Set value for display option """ key = get_enum_key(display, DISPLAYS) if key is not None: self.conf["display"] = key self.display = DISPLAYS[key] print("Set display %r" % key) else: print("Unknown display %r" % display)
def function[opt_display, parameter[self, display]]: constant[ Set value for display option ] variable[key] assign[=] call[name[get_enum_key], parameter[name[display], name[DISPLAYS]]] if compare[name[key] is_not constant[None]] begin[:] call[name[self].conf][constant[display]] assign[=] name[key] name[self].display assign[=] call[name[DISPLAYS]][name[key]] call[name[print], parameter[binary_operation[constant[Set display %r] <ast.Mod object at 0x7da2590d6920> name[key]]]]
keyword[def] identifier[opt_display] ( identifier[self] , identifier[display] ): literal[string] identifier[key] = identifier[get_enum_key] ( identifier[display] , identifier[DISPLAYS] ) keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[conf] [ literal[string] ]= identifier[key] identifier[self] . identifier[display] = identifier[DISPLAYS] [ identifier[key] ] identifier[print] ( literal[string] % identifier[key] ) keyword[else] : identifier[print] ( literal[string] % identifier[display] )
def opt_display(self, display): """ Set value for display option """ key = get_enum_key(display, DISPLAYS) if key is not None: self.conf['display'] = key self.display = DISPLAYS[key] print('Set display %r' % key) # depends on [control=['if'], data=['key']] else: print('Unknown display %r' % display)
def cdpop(): """ Return the last directory. Returns absolute path to new working directory. """ if len(_cdhist) >= 1: old = _cdhist.pop() # Pop from history. os.chdir(old) return old else: return pwd()
def function[cdpop, parameter[]]: constant[ Return the last directory. Returns absolute path to new working directory. ] if compare[call[name[len], parameter[name[_cdhist]]] greater_or_equal[>=] constant[1]] begin[:] variable[old] assign[=] call[name[_cdhist].pop, parameter[]] call[name[os].chdir, parameter[name[old]]] return[name[old]]
keyword[def] identifier[cdpop] (): literal[string] keyword[if] identifier[len] ( identifier[_cdhist] )>= literal[int] : identifier[old] = identifier[_cdhist] . identifier[pop] () identifier[os] . identifier[chdir] ( identifier[old] ) keyword[return] identifier[old] keyword[else] : keyword[return] identifier[pwd] ()
def cdpop(): """ Return the last directory. Returns absolute path to new working directory. """ if len(_cdhist) >= 1: old = _cdhist.pop() # Pop from history. os.chdir(old) return old # depends on [control=['if'], data=[]] else: return pwd()
def security(): """View for security page.""" sessions = SessionActivity.query_by_user( user_id=current_user.get_id() ).all() master_session = None for index, session in enumerate(sessions): if SessionActivity.is_current(session.sid_s): master_session = session del sessions[index] return render_template( current_app.config['ACCOUNTS_SETTINGS_SECURITY_TEMPLATE'], formclass=RevokeForm, sessions=[master_session] + sessions, is_current=SessionActivity.is_current )
def function[security, parameter[]]: constant[View for security page.] variable[sessions] assign[=] call[call[name[SessionActivity].query_by_user, parameter[]].all, parameter[]] variable[master_session] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da1b1a1e4a0>, <ast.Name object at 0x7da1b1a1e410>]]] in starred[call[name[enumerate], parameter[name[sessions]]]] begin[:] if call[name[SessionActivity].is_current, parameter[name[session].sid_s]] begin[:] variable[master_session] assign[=] name[session] <ast.Delete object at 0x7da2041d9930> return[call[name[render_template], parameter[call[name[current_app].config][constant[ACCOUNTS_SETTINGS_SECURITY_TEMPLATE]]]]]
keyword[def] identifier[security] (): literal[string] identifier[sessions] = identifier[SessionActivity] . identifier[query_by_user] ( identifier[user_id] = identifier[current_user] . identifier[get_id] () ). identifier[all] () identifier[master_session] = keyword[None] keyword[for] identifier[index] , identifier[session] keyword[in] identifier[enumerate] ( identifier[sessions] ): keyword[if] identifier[SessionActivity] . identifier[is_current] ( identifier[session] . identifier[sid_s] ): identifier[master_session] = identifier[session] keyword[del] identifier[sessions] [ identifier[index] ] keyword[return] identifier[render_template] ( identifier[current_app] . identifier[config] [ literal[string] ], identifier[formclass] = identifier[RevokeForm] , identifier[sessions] =[ identifier[master_session] ]+ identifier[sessions] , identifier[is_current] = identifier[SessionActivity] . identifier[is_current] )
def security(): """View for security page.""" sessions = SessionActivity.query_by_user(user_id=current_user.get_id()).all() master_session = None for (index, session) in enumerate(sessions): if SessionActivity.is_current(session.sid_s): master_session = session del sessions[index] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return render_template(current_app.config['ACCOUNTS_SETTINGS_SECURITY_TEMPLATE'], formclass=RevokeForm, sessions=[master_session] + sessions, is_current=SessionActivity.is_current)
def starts_expanded(name): """Return True if directory is a parent of initial cwd.""" if name is '/': return True l = name.split(dir_sep()) if len(l) > len(_initial_cwd): return False if l != _initial_cwd[:len(l)]: return False return True
def function[starts_expanded, parameter[name]]: constant[Return True if directory is a parent of initial cwd.] if compare[name[name] is constant[/]] begin[:] return[constant[True]] variable[l] assign[=] call[name[name].split, parameter[call[name[dir_sep], parameter[]]]] if compare[call[name[len], parameter[name[l]]] greater[>] call[name[len], parameter[name[_initial_cwd]]]] begin[:] return[constant[False]] if compare[name[l] not_equal[!=] call[name[_initial_cwd]][<ast.Slice object at 0x7da20c992b00>]] begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[starts_expanded] ( identifier[name] ): literal[string] keyword[if] identifier[name] keyword[is] literal[string] : keyword[return] keyword[True] identifier[l] = identifier[name] . identifier[split] ( identifier[dir_sep] ()) keyword[if] identifier[len] ( identifier[l] )> identifier[len] ( identifier[_initial_cwd] ): keyword[return] keyword[False] keyword[if] identifier[l] != identifier[_initial_cwd] [: identifier[len] ( identifier[l] )]: keyword[return] keyword[False] keyword[return] keyword[True]
def starts_expanded(name): """Return True if directory is a parent of initial cwd.""" if name is '/': return True # depends on [control=['if'], data=[]] l = name.split(dir_sep()) if len(l) > len(_initial_cwd): return False # depends on [control=['if'], data=[]] if l != _initial_cwd[:len(l)]: return False # depends on [control=['if'], data=[]] return True
def import_datatable(self, l_datatable, schema='datatable', col_key=0): """ import a datatable (grid) by using the schema:table:column as keys. e.g. Sample input ( via cls_database.py -> test.csv) TERM,GENDER,ID,tot1,tot2 5320,M,78,18,66 1310,M,78,10,12 Loads the following: """ key = '' hdr = l_datatable.get_header() schema_root_key = schema + ':' + os.path.basename(l_datatable.name) + ':' print(hdr) row_num = 0 for row_num, row in enumerate(l_datatable.get_arr()): #print(row) for col_num, col in enumerate(row): #print('col_num, col = ', col_num, col) if col and col_num < len(hdr): key = schema_root_key + row[col_key] + ':' + hdr[col_num] self.connection.set(key, col) #self.connection.lpush(key, col) print ('loaded ', str(row_num) , ' rows')
def function[import_datatable, parameter[self, l_datatable, schema, col_key]]: constant[ import a datatable (grid) by using the schema:table:column as keys. e.g. Sample input ( via cls_database.py -> test.csv) TERM,GENDER,ID,tot1,tot2 5320,M,78,18,66 1310,M,78,10,12 Loads the following: ] variable[key] assign[=] constant[] variable[hdr] assign[=] call[name[l_datatable].get_header, parameter[]] variable[schema_root_key] assign[=] binary_operation[binary_operation[binary_operation[name[schema] + constant[:]] + call[name[os].path.basename, parameter[name[l_datatable].name]]] + constant[:]] call[name[print], parameter[name[hdr]]] variable[row_num] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da18bcc8040>, <ast.Name object at 0x7da18bccbbb0>]]] in starred[call[name[enumerate], parameter[call[name[l_datatable].get_arr, parameter[]]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bcc8310>, <ast.Name object at 0x7da18bcc8940>]]] in starred[call[name[enumerate], parameter[name[row]]]] begin[:] if <ast.BoolOp object at 0x7da18bccbb50> begin[:] variable[key] assign[=] binary_operation[binary_operation[binary_operation[name[schema_root_key] + call[name[row]][name[col_key]]] + constant[:]] + call[name[hdr]][name[col_num]]] call[name[self].connection.set, parameter[name[key], name[col]]] call[name[print], parameter[constant[loaded ], call[name[str], parameter[name[row_num]]], constant[ rows]]]
keyword[def] identifier[import_datatable] ( identifier[self] , identifier[l_datatable] , identifier[schema] = literal[string] , identifier[col_key] = literal[int] ): literal[string] identifier[key] = literal[string] identifier[hdr] = identifier[l_datatable] . identifier[get_header] () identifier[schema_root_key] = identifier[schema] + literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[l_datatable] . identifier[name] )+ literal[string] identifier[print] ( identifier[hdr] ) identifier[row_num] = literal[int] keyword[for] identifier[row_num] , identifier[row] keyword[in] identifier[enumerate] ( identifier[l_datatable] . identifier[get_arr] ()): keyword[for] identifier[col_num] , identifier[col] keyword[in] identifier[enumerate] ( identifier[row] ): keyword[if] identifier[col] keyword[and] identifier[col_num] < identifier[len] ( identifier[hdr] ): identifier[key] = identifier[schema_root_key] + identifier[row] [ identifier[col_key] ]+ literal[string] + identifier[hdr] [ identifier[col_num] ] identifier[self] . identifier[connection] . identifier[set] ( identifier[key] , identifier[col] ) identifier[print] ( literal[string] , identifier[str] ( identifier[row_num] ), literal[string] )
def import_datatable(self, l_datatable, schema='datatable', col_key=0): """ import a datatable (grid) by using the schema:table:column as keys. e.g. Sample input ( via cls_database.py -> test.csv) TERM,GENDER,ID,tot1,tot2 5320,M,78,18,66 1310,M,78,10,12 Loads the following: """ key = '' hdr = l_datatable.get_header() schema_root_key = schema + ':' + os.path.basename(l_datatable.name) + ':' print(hdr) row_num = 0 for (row_num, row) in enumerate(l_datatable.get_arr()): #print(row) for (col_num, col) in enumerate(row): #print('col_num, col = ', col_num, col) if col and col_num < len(hdr): key = schema_root_key + row[col_key] + ':' + hdr[col_num] self.connection.set(key, col) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] #self.connection.lpush(key, col) print('loaded ', str(row_num), ' rows')
def duplicateAnalysis(analysis): """ Duplicate an analysis consist on creating a new analysis with the same analysis service for the same sample. It is used in order to reduce the error procedure probability because both results must be similar. :base: the analysis object used as the creation base. """ ar = analysis.aq_parent kw = analysis.getKeyword() # Rename the analysis to make way for it's successor. # Support multiple duplicates by renaming to *-0, *-1, etc cnt = [x for x in ar.objectValues("Analysis") if x.getId().startswith(kw)] a_id = "{0}-{1}".format(kw, len(cnt)) dup = create_analysis(ar, analysis, id=a_id, Retested=True) return dup
def function[duplicateAnalysis, parameter[analysis]]: constant[ Duplicate an analysis consist on creating a new analysis with the same analysis service for the same sample. It is used in order to reduce the error procedure probability because both results must be similar. :base: the analysis object used as the creation base. ] variable[ar] assign[=] name[analysis].aq_parent variable[kw] assign[=] call[name[analysis].getKeyword, parameter[]] variable[cnt] assign[=] <ast.ListComp object at 0x7da1b23124a0> variable[a_id] assign[=] call[constant[{0}-{1}].format, parameter[name[kw], call[name[len], parameter[name[cnt]]]]] variable[dup] assign[=] call[name[create_analysis], parameter[name[ar], name[analysis]]] return[name[dup]]
keyword[def] identifier[duplicateAnalysis] ( identifier[analysis] ): literal[string] identifier[ar] = identifier[analysis] . identifier[aq_parent] identifier[kw] = identifier[analysis] . identifier[getKeyword] () identifier[cnt] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[ar] . identifier[objectValues] ( literal[string] ) keyword[if] identifier[x] . identifier[getId] (). identifier[startswith] ( identifier[kw] )] identifier[a_id] = literal[string] . identifier[format] ( identifier[kw] , identifier[len] ( identifier[cnt] )) identifier[dup] = identifier[create_analysis] ( identifier[ar] , identifier[analysis] , identifier[id] = identifier[a_id] , identifier[Retested] = keyword[True] ) keyword[return] identifier[dup]
def duplicateAnalysis(analysis): """ Duplicate an analysis consist on creating a new analysis with the same analysis service for the same sample. It is used in order to reduce the error procedure probability because both results must be similar. :base: the analysis object used as the creation base. """ ar = analysis.aq_parent kw = analysis.getKeyword() # Rename the analysis to make way for it's successor. # Support multiple duplicates by renaming to *-0, *-1, etc cnt = [x for x in ar.objectValues('Analysis') if x.getId().startswith(kw)] a_id = '{0}-{1}'.format(kw, len(cnt)) dup = create_analysis(ar, analysis, id=a_id, Retested=True) return dup
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): """Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. """ if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
def function[conv2d_fixed_padding, parameter[inputs, filters, kernel_size, strides, data_format, use_td, targeting_rate, keep_prob, is_training]]: constant[Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. ] if compare[name[strides] greater[>] constant[1]] begin[:] variable[inputs] assign[=] call[name[fixed_padding], parameter[name[inputs], name[kernel_size]]] if name[use_td] begin[:] variable[inputs_shape] assign[=] call[name[common_layers].shape_list, parameter[name[inputs]]] if compare[name[use_td] equal[==] constant[weight]] begin[:] if compare[name[data_format] equal[==] constant[channels_last]] begin[:] variable[size] assign[=] binary_operation[binary_operation[name[kernel_size] * name[kernel_size]] * call[name[inputs_shape]][<ast.UnaryOp object at 0x7da1b205a440>]] variable[targeting_count] assign[=] binary_operation[name[targeting_rate] * call[name[tf].to_float, parameter[name[size]]]] variable[targeting_fn] assign[=] name[common_layers].weight_targeting variable[y] assign[=] call[name[common_layers].td_conv, parameter[name[inputs], name[filters], name[kernel_size], name[targeting_count], name[targeting_fn], name[keep_prob], name[is_training]]] return[name[y]]
keyword[def] identifier[conv2d_fixed_padding] ( identifier[inputs] , identifier[filters] , identifier[kernel_size] , identifier[strides] , identifier[data_format] = literal[string] , identifier[use_td] = keyword[False] , identifier[targeting_rate] = keyword[None] , identifier[keep_prob] = keyword[None] , identifier[is_training] = keyword[None] ): literal[string] keyword[if] identifier[strides] > literal[int] : identifier[inputs] = identifier[fixed_padding] ( identifier[inputs] , identifier[kernel_size] , identifier[data_format] = identifier[data_format] ) keyword[if] identifier[use_td] : identifier[inputs_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[inputs] ) keyword[if] identifier[use_td] == literal[string] : keyword[if] identifier[data_format] == literal[string] : identifier[size] = identifier[kernel_size] * identifier[kernel_size] * identifier[inputs_shape] [- literal[int] ] keyword[else] : identifier[size] = identifier[kernel_size] * identifier[kernel_size] * identifier[inputs_shape] [ literal[int] ] identifier[targeting_count] = identifier[targeting_rate] * identifier[tf] . identifier[to_float] ( identifier[size] ) identifier[targeting_fn] = identifier[common_layers] . identifier[weight_targeting] keyword[elif] identifier[use_td] == literal[string] : identifier[targeting_count] = identifier[targeting_rate] * identifier[filters] identifier[targeting_fn] = identifier[common_layers] . identifier[unit_targeting] keyword[else] : keyword[raise] identifier[Exception] ( literal[string] % identifier[use_td] ) identifier[y] = identifier[common_layers] . identifier[td_conv] ( identifier[inputs] , identifier[filters] , identifier[kernel_size] , identifier[targeting_count] , identifier[targeting_fn] , identifier[keep_prob] , identifier[is_training] , identifier[do_prune] = keyword[True] , identifier[strides] = identifier[strides] , identifier[padding] =( literal[string] keyword[if] identifier[strides] == literal[int] keyword[else] literal[string] ), identifier[data_format] = identifier[data_format] , identifier[use_bias] = keyword[False] , identifier[kernel_initializer] = identifier[tf] . identifier[variance_scaling_initializer] ()) keyword[else] : identifier[y] = identifier[layers] (). identifier[Conv2D] ( identifier[filters] = identifier[filters] , identifier[kernel_size] = identifier[kernel_size] , identifier[strides] = identifier[strides] , identifier[padding] =( literal[string] keyword[if] identifier[strides] == literal[int] keyword[else] literal[string] ), identifier[use_bias] = keyword[False] , identifier[kernel_initializer] = identifier[tf] . identifier[variance_scaling_initializer] (), identifier[data_format] = identifier[data_format] )( identifier[inputs] ) keyword[return] identifier[y]
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format='channels_first', use_td=False, targeting_rate=None, keep_prob=None, is_training=None): """Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. """ if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) # depends on [control=['if'], data=[]] if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == 'weight': if data_format == 'channels_last': size = kernel_size * kernel_size * inputs_shape[-1] # depends on [control=['if'], data=[]] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting # depends on [control=['if'], data=[]] elif use_td == 'unit': targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting # depends on [control=['if'], data=[]] else: raise Exception('Unrecognized targeted dropout type: %s' % use_td) y = common_layers.td_conv(inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding='SAME' if strides == 1 else 'VALID', data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) # depends on [control=['if'], data=[]] else: y = layers().Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='SAME' if strides == 1 else 'VALID', use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
def selection_range(self): """ Returns the selected lines boundaries (start line, end line) :return: tuple(int, int) """ editor = self._editor doc = editor.document() start = doc.findBlock( editor.textCursor().selectionStart()).blockNumber() end = doc.findBlock( editor.textCursor().selectionEnd()).blockNumber() text_cursor = QTextCursor(editor.textCursor()) text_cursor.setPosition(editor.textCursor().selectionEnd()) if text_cursor.columnNumber() == 0 and start != end: end -= 1 return start, end
def function[selection_range, parameter[self]]: constant[ Returns the selected lines boundaries (start line, end line) :return: tuple(int, int) ] variable[editor] assign[=] name[self]._editor variable[doc] assign[=] call[name[editor].document, parameter[]] variable[start] assign[=] call[call[name[doc].findBlock, parameter[call[call[name[editor].textCursor, parameter[]].selectionStart, parameter[]]]].blockNumber, parameter[]] variable[end] assign[=] call[call[name[doc].findBlock, parameter[call[call[name[editor].textCursor, parameter[]].selectionEnd, parameter[]]]].blockNumber, parameter[]] variable[text_cursor] assign[=] call[name[QTextCursor], parameter[call[name[editor].textCursor, parameter[]]]] call[name[text_cursor].setPosition, parameter[call[call[name[editor].textCursor, parameter[]].selectionEnd, parameter[]]]] if <ast.BoolOp object at 0x7da18ede7bb0> begin[:] <ast.AugAssign object at 0x7da18ede5c00> return[tuple[[<ast.Name object at 0x7da18ede64d0>, <ast.Name object at 0x7da18ede5960>]]]
keyword[def] identifier[selection_range] ( identifier[self] ): literal[string] identifier[editor] = identifier[self] . identifier[_editor] identifier[doc] = identifier[editor] . identifier[document] () identifier[start] = identifier[doc] . identifier[findBlock] ( identifier[editor] . identifier[textCursor] (). identifier[selectionStart] ()). identifier[blockNumber] () identifier[end] = identifier[doc] . identifier[findBlock] ( identifier[editor] . identifier[textCursor] (). identifier[selectionEnd] ()). identifier[blockNumber] () identifier[text_cursor] = identifier[QTextCursor] ( identifier[editor] . identifier[textCursor] ()) identifier[text_cursor] . identifier[setPosition] ( identifier[editor] . identifier[textCursor] (). identifier[selectionEnd] ()) keyword[if] identifier[text_cursor] . identifier[columnNumber] ()== literal[int] keyword[and] identifier[start] != identifier[end] : identifier[end] -= literal[int] keyword[return] identifier[start] , identifier[end]
def selection_range(self): """ Returns the selected lines boundaries (start line, end line) :return: tuple(int, int) """ editor = self._editor doc = editor.document() start = doc.findBlock(editor.textCursor().selectionStart()).blockNumber() end = doc.findBlock(editor.textCursor().selectionEnd()).blockNumber() text_cursor = QTextCursor(editor.textCursor()) text_cursor.setPosition(editor.textCursor().selectionEnd()) if text_cursor.columnNumber() == 0 and start != end: end -= 1 # depends on [control=['if'], data=[]] return (start, end)
def addSynonym( self, class_id, synonym, synonym_type=None): """ Add the synonym as a property of the class cid. Assume it is an exact synonym, unless otherwise specified :param g: :param cid: class id :param synonym: the literal synonym label :param synonym_type: the CURIE of the synonym type (not the URI) :return: """ if synonym_type is None: synonym_type = self.globaltt['has_exact_synonym'] if synonym is not None: self.graph.addTriple( class_id, synonym_type, synonym, object_is_literal=True)
def function[addSynonym, parameter[self, class_id, synonym, synonym_type]]: constant[ Add the synonym as a property of the class cid. Assume it is an exact synonym, unless otherwise specified :param g: :param cid: class id :param synonym: the literal synonym label :param synonym_type: the CURIE of the synonym type (not the URI) :return: ] if compare[name[synonym_type] is constant[None]] begin[:] variable[synonym_type] assign[=] call[name[self].globaltt][constant[has_exact_synonym]] if compare[name[synonym] is_not constant[None]] begin[:] call[name[self].graph.addTriple, parameter[name[class_id], name[synonym_type], name[synonym]]]
keyword[def] identifier[addSynonym] ( identifier[self] , identifier[class_id] , identifier[synonym] , identifier[synonym_type] = keyword[None] ): literal[string] keyword[if] identifier[synonym_type] keyword[is] keyword[None] : identifier[synonym_type] = identifier[self] . identifier[globaltt] [ literal[string] ] keyword[if] identifier[synonym] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[class_id] , identifier[synonym_type] , identifier[synonym] , identifier[object_is_literal] = keyword[True] )
def addSynonym(self, class_id, synonym, synonym_type=None): """ Add the synonym as a property of the class cid. Assume it is an exact synonym, unless otherwise specified :param g: :param cid: class id :param synonym: the literal synonym label :param synonym_type: the CURIE of the synonym type (not the URI) :return: """ if synonym_type is None: synonym_type = self.globaltt['has_exact_synonym'] # depends on [control=['if'], data=['synonym_type']] if synonym is not None: self.graph.addTriple(class_id, synonym_type, synonym, object_is_literal=True) # depends on [control=['if'], data=['synonym']]
def make_spindles(events, power_peaks, powers, dat_det, dat_orig, time, s_freq): """Create dict for each spindle, based on events of time points. Parameters ---------- events : ndarray (dtype='int') N x 3 matrix with start, peak, end samples, and peak frequency power_peaks : ndarray (dtype='float') peak in power spectrum for each event powers : ndarray (dtype='float') average power in power spectrum for each event dat_det : ndarray (dtype='float') vector with the data after detection-transformation (to compute peak) dat_orig : ndarray (dtype='float') vector with the raw data on which detection was performed time : ndarray (dtype='float') vector with time points s_freq : float sampling frequency Returns ------- list of dict list of all the spindles, with information about start_time, peak_time, end_time (s), peak_val (signal units), area_under_curve (signal units * s), peak_freq (Hz) """ i, events = _remove_duplicate(events, dat_det) power_peaks = power_peaks[i] spindles = [] for i, one_peak, one_pwr in zip(events, power_peaks, powers): one_spindle = {'start': time[i[0]], 'end': time[i[2] - 1], 'peak_time': time[i[1]], 'peak_val_det': dat_det[i[1]], 'peak_val_orig': dat_orig[i[1]], 'dur': (i[2] - i[0]) / s_freq, 'auc_det': sum(dat_det[i[0]:i[2]]) / s_freq, 'auc_orig': sum(dat_orig[i[0]:i[2]]) / s_freq, 'rms_det': sqrt(mean(square(dat_det[i[0]:i[2]]))), 'rms_orig': sqrt(mean(square(dat_orig[i[0]:i[2]]))), 'power_orig': one_pwr, 'peak_freq': one_peak, 'ptp_det': ptp(dat_det[i[0]:i[2]]), 'ptp_orig': ptp(dat_orig[i[0]:i[2]]) } spindles.append(one_spindle) return spindles
def function[make_spindles, parameter[events, power_peaks, powers, dat_det, dat_orig, time, s_freq]]: constant[Create dict for each spindle, based on events of time points. Parameters ---------- events : ndarray (dtype='int') N x 3 matrix with start, peak, end samples, and peak frequency power_peaks : ndarray (dtype='float') peak in power spectrum for each event powers : ndarray (dtype='float') average power in power spectrum for each event dat_det : ndarray (dtype='float') vector with the data after detection-transformation (to compute peak) dat_orig : ndarray (dtype='float') vector with the raw data on which detection was performed time : ndarray (dtype='float') vector with time points s_freq : float sampling frequency Returns ------- list of dict list of all the spindles, with information about start_time, peak_time, end_time (s), peak_val (signal units), area_under_curve (signal units * s), peak_freq (Hz) ] <ast.Tuple object at 0x7da1b0d55030> assign[=] call[name[_remove_duplicate], parameter[name[events], name[dat_det]]] variable[power_peaks] assign[=] call[name[power_peaks]][name[i]] variable[spindles] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b0d577c0>, <ast.Name object at 0x7da1b0d55840>, <ast.Name object at 0x7da1b0d55330>]]] in starred[call[name[zip], parameter[name[events], name[power_peaks], name[powers]]]] begin[:] variable[one_spindle] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d574f0>, <ast.Constant object at 0x7da1b0d56fe0>, <ast.Constant object at 0x7da1b0d56b90>, <ast.Constant object at 0x7da1b0d574c0>, <ast.Constant object at 0x7da1b0d57ee0>, <ast.Constant object at 0x7da1b0d55ed0>, <ast.Constant object at 0x7da1b0d54c10>, <ast.Constant object at 0x7da1b0d56d10>, <ast.Constant object at 0x7da1b0d54430>, <ast.Constant object at 0x7da1b0d55fc0>, <ast.Constant object at 0x7da1b0d55c60>, <ast.Constant object at 0x7da1b0d56560>, <ast.Constant object at 0x7da1b0d57070>, <ast.Constant object at 0x7da1b0d565f0>], [<ast.Subscript object at 0x7da1b0d56950>, <ast.Subscript object at 0x7da1b0d56050>, <ast.Subscript object at 0x7da1b0d562c0>, <ast.Subscript object at 0x7da1b0d55a50>, <ast.Subscript object at 0x7da1b0d570d0>, <ast.BinOp object at 0x7da1b0d56cb0>, <ast.BinOp object at 0x7da1b0d56770>, <ast.BinOp object at 0x7da1b0d57ac0>, <ast.Call object at 0x7da1b0d56f20>, <ast.Call object at 0x7da1b0d54970>, <ast.Name object at 0x7da1b0d55e40>, <ast.Name object at 0x7da1b0d555d0>, <ast.Call object at 0x7da1b0d566e0>, <ast.Call object at 0x7da1b0d55ff0>]] call[name[spindles].append, parameter[name[one_spindle]]] return[name[spindles]]
keyword[def] identifier[make_spindles] ( identifier[events] , identifier[power_peaks] , identifier[powers] , identifier[dat_det] , identifier[dat_orig] , identifier[time] , identifier[s_freq] ): literal[string] identifier[i] , identifier[events] = identifier[_remove_duplicate] ( identifier[events] , identifier[dat_det] ) identifier[power_peaks] = identifier[power_peaks] [ identifier[i] ] identifier[spindles] =[] keyword[for] identifier[i] , identifier[one_peak] , identifier[one_pwr] keyword[in] identifier[zip] ( identifier[events] , identifier[power_peaks] , identifier[powers] ): identifier[one_spindle] ={ literal[string] : identifier[time] [ identifier[i] [ literal[int] ]], literal[string] : identifier[time] [ identifier[i] [ literal[int] ]- literal[int] ], literal[string] : identifier[time] [ identifier[i] [ literal[int] ]], literal[string] : identifier[dat_det] [ identifier[i] [ literal[int] ]], literal[string] : identifier[dat_orig] [ identifier[i] [ literal[int] ]], literal[string] :( identifier[i] [ literal[int] ]- identifier[i] [ literal[int] ])/ identifier[s_freq] , literal[string] : identifier[sum] ( identifier[dat_det] [ identifier[i] [ literal[int] ]: identifier[i] [ literal[int] ]])/ identifier[s_freq] , literal[string] : identifier[sum] ( identifier[dat_orig] [ identifier[i] [ literal[int] ]: identifier[i] [ literal[int] ]])/ identifier[s_freq] , literal[string] : identifier[sqrt] ( identifier[mean] ( identifier[square] ( identifier[dat_det] [ identifier[i] [ literal[int] ]: identifier[i] [ literal[int] ]]))), literal[string] : identifier[sqrt] ( identifier[mean] ( identifier[square] ( identifier[dat_orig] [ identifier[i] [ literal[int] ]: identifier[i] [ literal[int] ]]))), literal[string] : identifier[one_pwr] , literal[string] : identifier[one_peak] , literal[string] : identifier[ptp] ( identifier[dat_det] [ identifier[i] [ literal[int] ]: identifier[i] [ literal[int] ]]), literal[string] : identifier[ptp] ( identifier[dat_orig] [ identifier[i] [ literal[int] ]: identifier[i] [ literal[int] ]]) } identifier[spindles] . identifier[append] ( identifier[one_spindle] ) keyword[return] identifier[spindles]
def make_spindles(events, power_peaks, powers, dat_det, dat_orig, time, s_freq): """Create dict for each spindle, based on events of time points. Parameters ---------- events : ndarray (dtype='int') N x 3 matrix with start, peak, end samples, and peak frequency power_peaks : ndarray (dtype='float') peak in power spectrum for each event powers : ndarray (dtype='float') average power in power spectrum for each event dat_det : ndarray (dtype='float') vector with the data after detection-transformation (to compute peak) dat_orig : ndarray (dtype='float') vector with the raw data on which detection was performed time : ndarray (dtype='float') vector with time points s_freq : float sampling frequency Returns ------- list of dict list of all the spindles, with information about start_time, peak_time, end_time (s), peak_val (signal units), area_under_curve (signal units * s), peak_freq (Hz) """ (i, events) = _remove_duplicate(events, dat_det) power_peaks = power_peaks[i] spindles = [] for (i, one_peak, one_pwr) in zip(events, power_peaks, powers): one_spindle = {'start': time[i[0]], 'end': time[i[2] - 1], 'peak_time': time[i[1]], 'peak_val_det': dat_det[i[1]], 'peak_val_orig': dat_orig[i[1]], 'dur': (i[2] - i[0]) / s_freq, 'auc_det': sum(dat_det[i[0]:i[2]]) / s_freq, 'auc_orig': sum(dat_orig[i[0]:i[2]]) / s_freq, 'rms_det': sqrt(mean(square(dat_det[i[0]:i[2]]))), 'rms_orig': sqrt(mean(square(dat_orig[i[0]:i[2]]))), 'power_orig': one_pwr, 'peak_freq': one_peak, 'ptp_det': ptp(dat_det[i[0]:i[2]]), 'ptp_orig': ptp(dat_orig[i[0]:i[2]])} spindles.append(one_spindle) # depends on [control=['for'], data=[]] return spindles
def temp_path(file_name=None): """ Gets a temp path. Kwargs: file_name (str) : if file name is specified, it gets appended to the temp dir. Usage:: temp_file_path = temp_path("myfile") copyfile("myfile", temp_file_path) # copies 'myfile' to '/tmp/myfile' """ if file_name is None: file_name = generate_timestamped_string("wtf_temp_file") return os.path.join(tempfile.gettempdir(), file_name)
def function[temp_path, parameter[file_name]]: constant[ Gets a temp path. Kwargs: file_name (str) : if file name is specified, it gets appended to the temp dir. Usage:: temp_file_path = temp_path("myfile") copyfile("myfile", temp_file_path) # copies 'myfile' to '/tmp/myfile' ] if compare[name[file_name] is constant[None]] begin[:] variable[file_name] assign[=] call[name[generate_timestamped_string], parameter[constant[wtf_temp_file]]] return[call[name[os].path.join, parameter[call[name[tempfile].gettempdir, parameter[]], name[file_name]]]]
keyword[def] identifier[temp_path] ( identifier[file_name] = keyword[None] ): literal[string] keyword[if] identifier[file_name] keyword[is] keyword[None] : identifier[file_name] = identifier[generate_timestamped_string] ( literal[string] ) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[tempfile] . identifier[gettempdir] (), identifier[file_name] )
def temp_path(file_name=None): """ Gets a temp path. Kwargs: file_name (str) : if file name is specified, it gets appended to the temp dir. Usage:: temp_file_path = temp_path("myfile") copyfile("myfile", temp_file_path) # copies 'myfile' to '/tmp/myfile' """ if file_name is None: file_name = generate_timestamped_string('wtf_temp_file') # depends on [control=['if'], data=['file_name']] return os.path.join(tempfile.gettempdir(), file_name)
def _from_dict(cls, _dict): """Initialize a ValueCollection object from a json dictionary.""" args = {} if 'values' in _dict: args['values'] = [ Value._from_dict(x) for x in (_dict.get('values')) ] else: raise ValueError( 'Required property \'values\' not present in ValueCollection JSON' ) if 'pagination' in _dict: args['pagination'] = Pagination._from_dict(_dict.get('pagination')) else: raise ValueError( 'Required property \'pagination\' not present in ValueCollection JSON' ) return cls(**args)
def function[_from_dict, parameter[cls, _dict]]: constant[Initialize a ValueCollection object from a json dictionary.] variable[args] assign[=] dictionary[[], []] if compare[constant[values] in name[_dict]] begin[:] call[name[args]][constant[values]] assign[=] <ast.ListComp object at 0x7da20c76c040> if compare[constant[pagination] in name[_dict]] begin[:] call[name[args]][constant[pagination]] assign[=] call[name[Pagination]._from_dict, parameter[call[name[_dict].get, parameter[constant[pagination]]]]] return[call[name[cls], parameter[]]]
keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ): literal[string] identifier[args] ={} keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]=[ identifier[Value] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] )) ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[Pagination] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] )) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[cls] (** identifier[args] )
def _from_dict(cls, _dict): """Initialize a ValueCollection object from a json dictionary.""" args = {} if 'values' in _dict: args['values'] = [Value._from_dict(x) for x in _dict.get('values')] # depends on [control=['if'], data=['_dict']] else: raise ValueError("Required property 'values' not present in ValueCollection JSON") if 'pagination' in _dict: args['pagination'] = Pagination._from_dict(_dict.get('pagination')) # depends on [control=['if'], data=['_dict']] else: raise ValueError("Required property 'pagination' not present in ValueCollection JSON") return cls(**args)
def find_root_tex_document(base_dir="."): """Find the tex article in the current directory that can be considered a root. We do this by searching contents for ``'\documentclass'``. Parameters ---------- base_dir : str Directory to search for LaTeX documents, relative to the current working directory. Returns ------- tex_path : str Path to the root tex document relative to the current working directory. """ log = logging.getLogger(__name__) for tex_path in iter_tex_documents(base_dir=base_dir): with codecs.open(tex_path, 'r', encoding='utf-8') as f: text = f.read() if len(docclass_pattern.findall(text)) > 0: log.debug("Found root tex {0}".format(tex_path)) return tex_path log.warning("Could not find a root .tex file") raise RootNotFound
def function[find_root_tex_document, parameter[base_dir]]: constant[Find the tex article in the current directory that can be considered a root. We do this by searching contents for ``'\documentclass'``. Parameters ---------- base_dir : str Directory to search for LaTeX documents, relative to the current working directory. Returns ------- tex_path : str Path to the root tex document relative to the current working directory. ] variable[log] assign[=] call[name[logging].getLogger, parameter[name[__name__]]] for taget[name[tex_path]] in starred[call[name[iter_tex_documents], parameter[]]] begin[:] with call[name[codecs].open, parameter[name[tex_path], constant[r]]] begin[:] variable[text] assign[=] call[name[f].read, parameter[]] if compare[call[name[len], parameter[call[name[docclass_pattern].findall, parameter[name[text]]]]] greater[>] constant[0]] begin[:] call[name[log].debug, parameter[call[constant[Found root tex {0}].format, parameter[name[tex_path]]]]] return[name[tex_path]] call[name[log].warning, parameter[constant[Could not find a root .tex file]]] <ast.Raise object at 0x7da1b26ac940>
keyword[def] identifier[find_root_tex_document] ( identifier[base_dir] = literal[string] ): literal[string] identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) keyword[for] identifier[tex_path] keyword[in] identifier[iter_tex_documents] ( identifier[base_dir] = identifier[base_dir] ): keyword[with] identifier[codecs] . identifier[open] ( identifier[tex_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] : identifier[text] = identifier[f] . identifier[read] () keyword[if] identifier[len] ( identifier[docclass_pattern] . identifier[findall] ( identifier[text] ))> literal[int] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[tex_path] )) keyword[return] identifier[tex_path] identifier[log] . identifier[warning] ( literal[string] ) keyword[raise] identifier[RootNotFound]
def find_root_tex_document(base_dir='.'): """Find the tex article in the current directory that can be considered a root. We do this by searching contents for ``'\\documentclass'``. Parameters ---------- base_dir : str Directory to search for LaTeX documents, relative to the current working directory. Returns ------- tex_path : str Path to the root tex document relative to the current working directory. """ log = logging.getLogger(__name__) for tex_path in iter_tex_documents(base_dir=base_dir): with codecs.open(tex_path, 'r', encoding='utf-8') as f: text = f.read() if len(docclass_pattern.findall(text)) > 0: log.debug('Found root tex {0}'.format(tex_path)) return tex_path # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['tex_path']] log.warning('Could not find a root .tex file') raise RootNotFound
def tcalc(nf, p): """ t-table for nf degrees of freedom (95% confidence) """ # if p == .05: if nf > 2: t = 4.3027 if nf > 3: t = 3.1824 if nf > 4: t = 2.7765 if nf > 5: t = 2.5706 if nf > 6: t = 2.4469 if nf > 7: t = 2.3646 if nf > 8: t = 2.3060 if nf > 9: t = 2.2622 if nf > 10: t = 2.2281 if nf > 11: t = 2.2010 if nf > 12: t = 2.1788 if nf > 13: t = 2.1604 if nf > 14: t = 2.1448 if nf > 15: t = 2.1315 if nf > 16: t = 2.1199 if nf > 17: t = 2.1098 if nf > 18: t = 2.1009 if nf > 19: t = 2.0930 if nf > 20: t = 2.0860 if nf > 21: t = 2.0796 if nf > 22: t = 2.0739 if nf > 23: t = 2.0687 if nf > 24: t = 2.0639 if nf > 25: t = 2.0595 if nf > 26: t = 2.0555 if nf > 27: t = 2.0518 if nf > 28: t = 2.0484 if nf > 29: t = 2.0452 if nf > 30: t = 2.0423 if nf > 31: t = 2.0395 if nf > 32: t = 2.0369 if nf > 33: t = 2.0345 if nf > 34: t = 2.0322 if nf > 35: t = 2.0301 if nf > 36: t = 2.0281 if nf > 37: t = 2.0262 if nf > 38: t = 2.0244 if nf > 39: t = 2.0227 if nf > 40: t = 2.0211 if nf > 41: t = 2.0195 if nf > 42: t = 2.0181 if nf > 43: t = 2.0167 if nf > 44: t = 2.0154 if nf > 45: t = 2.0141 if nf > 46: t = 2.0129 if nf > 47: t = 2.0117 if nf > 48: t = 2.0106 if nf > 49: t = 2.0096 if nf > 50: t = 2.0086 if nf > 51: t = 2.0076 if nf > 52: t = 2.0066 if nf > 53: t = 2.0057 if nf > 54: t = 2.0049 if nf > 55: t = 2.0040 if nf > 56: t = 2.0032 if nf > 57: t = 2.0025 if nf > 58: t = 2.0017 if nf > 59: t = 2.0010 if nf > 60: t = 2.0003 if nf > 61: t = 1.9996 if nf > 62: t = 1.9990 if nf > 63: t = 1.9983 if nf > 64: t = 1.9977 if nf > 65: t = 1.9971 if nf > 66: t = 1.9966 if nf > 67: t = 1.9960 if nf > 68: t = 1.9955 if nf > 69: t = 1.9949 if nf > 70: t = 1.9944 if nf > 71: t = 1.9939 if nf > 72: t = 1.9935 if nf > 73: t = 1.9930 if nf > 74: t = 1.9925 if nf > 75: t = 1.9921 if nf > 76: t = 1.9917 if nf > 77: t = 1.9913 if nf > 78: t = 1.9908 if nf > 79: t = 1.9905 if nf > 80: t = 1.9901 if nf > 81: t = 1.9897 if nf > 82: t = 1.9893 if nf > 83: t = 1.9890 if nf > 84: t = 1.9886 if nf > 85: t = 1.9883 if nf > 86: t = 1.9879 if nf > 87: t = 1.9876 if nf > 88: t = 1.9873 if nf > 89: t = 1.9870 if nf > 90: t = 1.9867 if nf > 91: t = 1.9864 if nf > 92: t = 1.9861 if nf > 93: t = 1.9858 if nf > 94: t = 1.9855 if nf > 95: t = 1.9852 if nf > 96: t = 1.9850 if nf > 97: t = 1.9847 if nf > 98: t = 1.9845 if nf > 99: t = 1.9842 if nf > 100: t = 1.9840 return t # elif p == .01: if nf > 2: t = 9.9250 if nf > 3: t = 5.8408 if nf > 4: t = 4.6041 if nf > 5: t = 4.0321 if nf > 6: t = 3.7074 if nf > 7: t = 3.4995 if nf > 8: t = 3.3554 if nf > 9: t = 3.2498 if nf > 10: t = 3.1693 if nf > 11: t = 3.1058 if nf > 12: t = 3.0545 if nf > 13: t = 3.0123 if nf > 14: t = 2.9768 if nf > 15: t = 2.9467 if nf > 16: t = 2.9208 if nf > 17: t = 2.8982 if nf > 18: t = 2.8784 if nf > 19: t = 2.8609 if nf > 20: t = 2.8453 if nf > 21: t = 2.8314 if nf > 22: t = 2.8188 if nf > 23: t = 2.8073 if nf > 24: t = 2.7970 if nf > 25: t = 2.7874 if nf > 26: t = 2.7787 if nf > 27: t = 2.7707 if nf > 28: t = 2.7633 if nf > 29: t = 2.7564 if nf > 30: t = 2.7500 if nf > 31: t = 2.7440 if nf > 32: t = 2.7385 if nf > 33: t = 2.7333 if nf > 34: t = 2.7284 if nf > 35: t = 2.7238 if nf > 36: t = 2.7195 if nf > 37: t = 2.7154 if nf > 38: t = 2.7116 if nf > 39: t = 2.7079 if nf > 40: t = 2.7045 if nf > 41: t = 2.7012 if nf > 42: t = 2.6981 if nf > 43: t = 2.6951 if nf > 44: t = 2.6923 if nf > 45: t = 2.6896 if nf > 46: t = 2.6870 if nf > 47: t = 2.6846 if nf > 48: t = 2.6822 if nf > 49: t = 2.6800 if nf > 50: t = 2.6778 if nf > 51: t = 2.6757 if nf > 52: t = 2.6737 if nf > 53: t = 2.6718 if nf > 54: t = 2.6700 if nf > 55: t = 2.6682 if nf > 56: t = 2.6665 if nf > 57: t = 2.6649 if nf > 58: t = 2.6633 if nf > 59: t = 2.6618 if nf > 60: t = 2.6603 if nf > 61: t = 2.6589 if nf > 62: t = 2.6575 if nf > 63: t = 2.6561 if nf > 64: t = 2.6549 if nf > 65: t = 2.6536 if nf > 66: t = 2.6524 if nf > 67: t = 2.6512 if nf > 68: t = 2.6501 if nf > 69: t = 2.6490 if nf > 70: t = 2.6479 if nf > 71: t = 2.6469 if nf > 72: t = 2.6458 if nf > 73: t = 2.6449 if nf > 74: t = 2.6439 if nf > 75: t = 2.6430 if nf > 76: t = 2.6421 if nf > 77: t = 2.6412 if nf > 78: t = 2.6403 if nf > 79: t = 2.6395 if nf > 80: t = 2.6387 if nf > 81: t = 2.6379 if nf > 82: t = 2.6371 if nf > 83: t = 2.6364 if nf > 84: t = 2.6356 if nf > 85: t = 2.6349 if nf > 86: t = 2.6342 if nf > 87: t = 2.6335 if nf > 88: t = 2.6329 if nf > 89: t = 2.6322 if nf > 90: t = 2.6316 if nf > 91: t = 2.6309 if nf > 92: t = 2.6303 if nf > 93: t = 2.6297 if nf > 94: t = 2.6291 if nf > 95: t = 2.6286 if nf > 96: t = 2.6280 if nf > 97: t = 2.6275 if nf > 98: t = 2.6269 if nf > 99: t = 2.6264 if nf > 100: t = 2.6259 return t return t else: return 0
def function[tcalc, parameter[nf, p]]: constant[ t-table for nf degrees of freedom (95% confidence) ] if compare[name[p] equal[==] constant[0.05]] begin[:] if compare[name[nf] greater[>] constant[2]] begin[:] variable[t] assign[=] constant[4.3027] if compare[name[nf] greater[>] constant[3]] begin[:] variable[t] assign[=] constant[3.1824] if compare[name[nf] greater[>] constant[4]] begin[:] variable[t] assign[=] constant[2.7765] if compare[name[nf] greater[>] constant[5]] begin[:] variable[t] assign[=] constant[2.5706] if compare[name[nf] greater[>] constant[6]] begin[:] variable[t] assign[=] constant[2.4469] if compare[name[nf] greater[>] constant[7]] begin[:] variable[t] assign[=] constant[2.3646] if compare[name[nf] greater[>] constant[8]] begin[:] variable[t] assign[=] constant[2.306] if compare[name[nf] greater[>] constant[9]] begin[:] variable[t] assign[=] constant[2.2622] if compare[name[nf] greater[>] constant[10]] begin[:] variable[t] assign[=] constant[2.2281] if compare[name[nf] greater[>] constant[11]] begin[:] variable[t] assign[=] constant[2.201] if compare[name[nf] greater[>] constant[12]] begin[:] variable[t] assign[=] constant[2.1788] if compare[name[nf] greater[>] constant[13]] begin[:] variable[t] assign[=] constant[2.1604] if compare[name[nf] greater[>] constant[14]] begin[:] variable[t] assign[=] constant[2.1448] if compare[name[nf] greater[>] constant[15]] begin[:] variable[t] assign[=] constant[2.1315] if compare[name[nf] greater[>] constant[16]] begin[:] variable[t] assign[=] constant[2.1199] if compare[name[nf] greater[>] constant[17]] begin[:] variable[t] assign[=] constant[2.1098] if compare[name[nf] greater[>] constant[18]] begin[:] variable[t] assign[=] constant[2.1009] if compare[name[nf] greater[>] constant[19]] begin[:] variable[t] assign[=] constant[2.093] if compare[name[nf] greater[>] constant[20]] begin[:] variable[t] assign[=] constant[2.086] if compare[name[nf] greater[>] constant[21]] begin[:] variable[t] assign[=] constant[2.0796] if compare[name[nf] greater[>] constant[22]] begin[:] variable[t] assign[=] constant[2.0739] if compare[name[nf] greater[>] constant[23]] begin[:] variable[t] assign[=] constant[2.0687] if compare[name[nf] greater[>] constant[24]] begin[:] variable[t] assign[=] constant[2.0639] if compare[name[nf] greater[>] constant[25]] begin[:] variable[t] assign[=] constant[2.0595] if compare[name[nf] greater[>] constant[26]] begin[:] variable[t] assign[=] constant[2.0555] if compare[name[nf] greater[>] constant[27]] begin[:] variable[t] assign[=] constant[2.0518] if compare[name[nf] greater[>] constant[28]] begin[:] variable[t] assign[=] constant[2.0484] if compare[name[nf] greater[>] constant[29]] begin[:] variable[t] assign[=] constant[2.0452] if compare[name[nf] greater[>] constant[30]] begin[:] variable[t] assign[=] constant[2.0423] if compare[name[nf] greater[>] constant[31]] begin[:] variable[t] assign[=] constant[2.0395] if compare[name[nf] greater[>] constant[32]] begin[:] variable[t] assign[=] constant[2.0369] if compare[name[nf] greater[>] constant[33]] begin[:] variable[t] assign[=] constant[2.0345] if compare[name[nf] greater[>] constant[34]] begin[:] variable[t] assign[=] constant[2.0322] if compare[name[nf] greater[>] constant[35]] begin[:] variable[t] assign[=] constant[2.0301] if compare[name[nf] greater[>] constant[36]] begin[:] variable[t] assign[=] constant[2.0281] if compare[name[nf] greater[>] constant[37]] begin[:] variable[t] assign[=] constant[2.0262] if compare[name[nf] greater[>] constant[38]] begin[:] variable[t] assign[=] constant[2.0244] if compare[name[nf] greater[>] constant[39]] begin[:] variable[t] assign[=] constant[2.0227] if compare[name[nf] greater[>] constant[40]] begin[:] variable[t] assign[=] constant[2.0211] if compare[name[nf] greater[>] constant[41]] begin[:] variable[t] assign[=] constant[2.0195] if compare[name[nf] greater[>] constant[42]] begin[:] variable[t] assign[=] constant[2.0181] if compare[name[nf] greater[>] constant[43]] begin[:] variable[t] assign[=] constant[2.0167] if compare[name[nf] greater[>] constant[44]] begin[:] variable[t] assign[=] constant[2.0154] if compare[name[nf] greater[>] constant[45]] begin[:] variable[t] assign[=] constant[2.0141] if compare[name[nf] greater[>] constant[46]] begin[:] variable[t] assign[=] constant[2.0129] if compare[name[nf] greater[>] constant[47]] begin[:] variable[t] assign[=] constant[2.0117] if compare[name[nf] greater[>] constant[48]] begin[:] variable[t] assign[=] constant[2.0106] if compare[name[nf] greater[>] constant[49]] begin[:] variable[t] assign[=] constant[2.0096] if compare[name[nf] greater[>] constant[50]] begin[:] variable[t] assign[=] constant[2.0086] if compare[name[nf] greater[>] constant[51]] begin[:] variable[t] assign[=] constant[2.0076] if compare[name[nf] greater[>] constant[52]] begin[:] variable[t] assign[=] constant[2.0066] if compare[name[nf] greater[>] constant[53]] begin[:] variable[t] assign[=] constant[2.0057] if compare[name[nf] greater[>] constant[54]] begin[:] variable[t] assign[=] constant[2.0049] if compare[name[nf] greater[>] constant[55]] begin[:] variable[t] assign[=] constant[2.004] if compare[name[nf] greater[>] constant[56]] begin[:] variable[t] assign[=] constant[2.0032] if compare[name[nf] greater[>] constant[57]] begin[:] variable[t] assign[=] constant[2.0025] if compare[name[nf] greater[>] constant[58]] begin[:] variable[t] assign[=] constant[2.0017] if compare[name[nf] greater[>] constant[59]] begin[:] variable[t] assign[=] constant[2.001] if compare[name[nf] greater[>] constant[60]] begin[:] variable[t] assign[=] constant[2.0003] if compare[name[nf] greater[>] constant[61]] begin[:] variable[t] assign[=] constant[1.9996] if compare[name[nf] greater[>] constant[62]] begin[:] variable[t] assign[=] constant[1.999] if compare[name[nf] greater[>] constant[63]] begin[:] variable[t] assign[=] constant[1.9983] if compare[name[nf] greater[>] constant[64]] begin[:] variable[t] assign[=] constant[1.9977] if compare[name[nf] greater[>] constant[65]] begin[:] variable[t] assign[=] constant[1.9971] if compare[name[nf] greater[>] constant[66]] begin[:] variable[t] assign[=] constant[1.9966] if compare[name[nf] greater[>] constant[67]] begin[:] variable[t] assign[=] constant[1.996] if compare[name[nf] greater[>] constant[68]] begin[:] variable[t] assign[=] constant[1.9955] if compare[name[nf] greater[>] constant[69]] begin[:] variable[t] assign[=] constant[1.9949] if compare[name[nf] greater[>] constant[70]] begin[:] variable[t] assign[=] constant[1.9944] if compare[name[nf] greater[>] constant[71]] begin[:] variable[t] assign[=] constant[1.9939] if compare[name[nf] greater[>] constant[72]] begin[:] variable[t] assign[=] constant[1.9935] if compare[name[nf] greater[>] constant[73]] begin[:] variable[t] assign[=] constant[1.993] if compare[name[nf] greater[>] constant[74]] begin[:] variable[t] assign[=] constant[1.9925] if compare[name[nf] greater[>] constant[75]] begin[:] variable[t] assign[=] constant[1.9921] if compare[name[nf] greater[>] constant[76]] begin[:] variable[t] assign[=] constant[1.9917] if compare[name[nf] greater[>] constant[77]] begin[:] variable[t] assign[=] constant[1.9913] if compare[name[nf] greater[>] constant[78]] begin[:] variable[t] assign[=] constant[1.9908] if compare[name[nf] greater[>] constant[79]] begin[:] variable[t] assign[=] constant[1.9905] if compare[name[nf] greater[>] constant[80]] begin[:] variable[t] assign[=] constant[1.9901] if compare[name[nf] greater[>] constant[81]] begin[:] variable[t] assign[=] constant[1.9897] if compare[name[nf] greater[>] constant[82]] begin[:] variable[t] assign[=] constant[1.9893] if compare[name[nf] greater[>] constant[83]] begin[:] variable[t] assign[=] constant[1.989] if compare[name[nf] greater[>] constant[84]] begin[:] variable[t] assign[=] constant[1.9886] if compare[name[nf] greater[>] constant[85]] begin[:] variable[t] assign[=] constant[1.9883] if compare[name[nf] greater[>] constant[86]] begin[:] variable[t] assign[=] constant[1.9879] if compare[name[nf] greater[>] constant[87]] begin[:] variable[t] assign[=] constant[1.9876] if compare[name[nf] greater[>] constant[88]] begin[:] variable[t] assign[=] constant[1.9873] if compare[name[nf] greater[>] constant[89]] begin[:] variable[t] assign[=] constant[1.987] if compare[name[nf] greater[>] constant[90]] begin[:] variable[t] assign[=] constant[1.9867] if compare[name[nf] greater[>] constant[91]] begin[:] variable[t] assign[=] constant[1.9864] if compare[name[nf] greater[>] constant[92]] begin[:] variable[t] assign[=] constant[1.9861] if compare[name[nf] greater[>] constant[93]] begin[:] variable[t] assign[=] constant[1.9858] if compare[name[nf] greater[>] constant[94]] begin[:] variable[t] assign[=] constant[1.9855] if compare[name[nf] greater[>] constant[95]] begin[:] variable[t] assign[=] constant[1.9852] if compare[name[nf] greater[>] constant[96]] begin[:] variable[t] assign[=] constant[1.985] if compare[name[nf] greater[>] constant[97]] begin[:] variable[t] assign[=] constant[1.9847] if compare[name[nf] greater[>] constant[98]] begin[:] variable[t] assign[=] constant[1.9845] if compare[name[nf] greater[>] constant[99]] begin[:] variable[t] assign[=] constant[1.9842] if compare[name[nf] greater[>] constant[100]] begin[:] variable[t] assign[=] constant[1.984] return[name[t]]
keyword[def] identifier[tcalc] ( identifier[nf] , identifier[p] ): literal[string] keyword[if] identifier[p] == literal[int] : keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[return] identifier[t] keyword[elif] identifier[p] == literal[int] : keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[if] identifier[nf] > literal[int] : identifier[t] = literal[int] keyword[return] identifier[t] keyword[return] identifier[t] keyword[else] : keyword[return] literal[int]
def tcalc(nf, p): """ t-table for nf degrees of freedom (95% confidence) """ # if p == 0.05: if nf > 2: t = 4.3027 # depends on [control=['if'], data=[]] if nf > 3: t = 3.1824 # depends on [control=['if'], data=[]] if nf > 4: t = 2.7765 # depends on [control=['if'], data=[]] if nf > 5: t = 2.5706 # depends on [control=['if'], data=[]] if nf > 6: t = 2.4469 # depends on [control=['if'], data=[]] if nf > 7: t = 2.3646 # depends on [control=['if'], data=[]] if nf > 8: t = 2.306 # depends on [control=['if'], data=[]] if nf > 9: t = 2.2622 # depends on [control=['if'], data=[]] if nf > 10: t = 2.2281 # depends on [control=['if'], data=[]] if nf > 11: t = 2.201 # depends on [control=['if'], data=[]] if nf > 12: t = 2.1788 # depends on [control=['if'], data=[]] if nf > 13: t = 2.1604 # depends on [control=['if'], data=[]] if nf > 14: t = 2.1448 # depends on [control=['if'], data=[]] if nf > 15: t = 2.1315 # depends on [control=['if'], data=[]] if nf > 16: t = 2.1199 # depends on [control=['if'], data=[]] if nf > 17: t = 2.1098 # depends on [control=['if'], data=[]] if nf > 18: t = 2.1009 # depends on [control=['if'], data=[]] if nf > 19: t = 2.093 # depends on [control=['if'], data=[]] if nf > 20: t = 2.086 # depends on [control=['if'], data=[]] if nf > 21: t = 2.0796 # depends on [control=['if'], data=[]] if nf > 22: t = 2.0739 # depends on [control=['if'], data=[]] if nf > 23: t = 2.0687 # depends on [control=['if'], data=[]] if nf > 24: t = 2.0639 # depends on [control=['if'], data=[]] if nf > 25: t = 2.0595 # depends on [control=['if'], data=[]] if nf > 26: t = 2.0555 # depends on [control=['if'], data=[]] if nf > 27: t = 2.0518 # depends on [control=['if'], data=[]] if nf > 28: t = 2.0484 # depends on [control=['if'], data=[]] if nf > 29: t = 2.0452 # depends on [control=['if'], data=[]] if nf > 30: t = 2.0423 # depends on [control=['if'], data=[]] if nf > 31: t = 2.0395 # depends on [control=['if'], data=[]] if nf > 32: t = 2.0369 # depends on [control=['if'], data=[]] if nf > 33: t = 2.0345 # depends on [control=['if'], data=[]] if nf > 34: t = 2.0322 # depends on [control=['if'], data=[]] if nf > 35: t = 2.0301 # depends on [control=['if'], data=[]] if nf > 36: t = 2.0281 # depends on [control=['if'], data=[]] if nf > 37: t = 2.0262 # depends on [control=['if'], data=[]] if nf > 38: t = 2.0244 # depends on [control=['if'], data=[]] if nf > 39: t = 2.0227 # depends on [control=['if'], data=[]] if nf > 40: t = 2.0211 # depends on [control=['if'], data=[]] if nf > 41: t = 2.0195 # depends on [control=['if'], data=[]] if nf > 42: t = 2.0181 # depends on [control=['if'], data=[]] if nf > 43: t = 2.0167 # depends on [control=['if'], data=[]] if nf > 44: t = 2.0154 # depends on [control=['if'], data=[]] if nf > 45: t = 2.0141 # depends on [control=['if'], data=[]] if nf > 46: t = 2.0129 # depends on [control=['if'], data=[]] if nf > 47: t = 2.0117 # depends on [control=['if'], data=[]] if nf > 48: t = 2.0106 # depends on [control=['if'], data=[]] if nf > 49: t = 2.0096 # depends on [control=['if'], data=[]] if nf > 50: t = 2.0086 # depends on [control=['if'], data=[]] if nf > 51: t = 2.0076 # depends on [control=['if'], data=[]] if nf > 52: t = 2.0066 # depends on [control=['if'], data=[]] if nf > 53: t = 2.0057 # depends on [control=['if'], data=[]] if nf > 54: t = 2.0049 # depends on [control=['if'], data=[]] if nf > 55: t = 2.004 # depends on [control=['if'], data=[]] if nf > 56: t = 2.0032 # depends on [control=['if'], data=[]] if nf > 57: t = 2.0025 # depends on [control=['if'], data=[]] if nf > 58: t = 2.0017 # depends on [control=['if'], data=[]] if nf > 59: t = 2.001 # depends on [control=['if'], data=[]] if nf > 60: t = 2.0003 # depends on [control=['if'], data=[]] if nf > 61: t = 1.9996 # depends on [control=['if'], data=[]] if nf > 62: t = 1.999 # depends on [control=['if'], data=[]] if nf > 63: t = 1.9983 # depends on [control=['if'], data=[]] if nf > 64: t = 1.9977 # depends on [control=['if'], data=[]] if nf > 65: t = 1.9971 # depends on [control=['if'], data=[]] if nf > 66: t = 1.9966 # depends on [control=['if'], data=[]] if nf > 67: t = 1.996 # depends on [control=['if'], data=[]] if nf > 68: t = 1.9955 # depends on [control=['if'], data=[]] if nf > 69: t = 1.9949 # depends on [control=['if'], data=[]] if nf > 70: t = 1.9944 # depends on [control=['if'], data=[]] if nf > 71: t = 1.9939 # depends on [control=['if'], data=[]] if nf > 72: t = 1.9935 # depends on [control=['if'], data=[]] if nf > 73: t = 1.993 # depends on [control=['if'], data=[]] if nf > 74: t = 1.9925 # depends on [control=['if'], data=[]] if nf > 75: t = 1.9921 # depends on [control=['if'], data=[]] if nf > 76: t = 1.9917 # depends on [control=['if'], data=[]] if nf > 77: t = 1.9913 # depends on [control=['if'], data=[]] if nf > 78: t = 1.9908 # depends on [control=['if'], data=[]] if nf > 79: t = 1.9905 # depends on [control=['if'], data=[]] if nf > 80: t = 1.9901 # depends on [control=['if'], data=[]] if nf > 81: t = 1.9897 # depends on [control=['if'], data=[]] if nf > 82: t = 1.9893 # depends on [control=['if'], data=[]] if nf > 83: t = 1.989 # depends on [control=['if'], data=[]] if nf > 84: t = 1.9886 # depends on [control=['if'], data=[]] if nf > 85: t = 1.9883 # depends on [control=['if'], data=[]] if nf > 86: t = 1.9879 # depends on [control=['if'], data=[]] if nf > 87: t = 1.9876 # depends on [control=['if'], data=[]] if nf > 88: t = 1.9873 # depends on [control=['if'], data=[]] if nf > 89: t = 1.987 # depends on [control=['if'], data=[]] if nf > 90: t = 1.9867 # depends on [control=['if'], data=[]] if nf > 91: t = 1.9864 # depends on [control=['if'], data=[]] if nf > 92: t = 1.9861 # depends on [control=['if'], data=[]] if nf > 93: t = 1.9858 # depends on [control=['if'], data=[]] if nf > 94: t = 1.9855 # depends on [control=['if'], data=[]] if nf > 95: t = 1.9852 # depends on [control=['if'], data=[]] if nf > 96: t = 1.985 # depends on [control=['if'], data=[]] if nf > 97: t = 1.9847 # depends on [control=['if'], data=[]] if nf > 98: t = 1.9845 # depends on [control=['if'], data=[]] if nf > 99: t = 1.9842 # depends on [control=['if'], data=[]] if nf > 100: t = 1.984 # depends on [control=['if'], data=[]] return t # depends on [control=['if'], data=[]] # elif p == 0.01: if nf > 2: t = 9.925 # depends on [control=['if'], data=[]] if nf > 3: t = 5.8408 # depends on [control=['if'], data=[]] if nf > 4: t = 4.6041 # depends on [control=['if'], data=[]] if nf > 5: t = 4.0321 # depends on [control=['if'], data=[]] if nf > 6: t = 3.7074 # depends on [control=['if'], data=[]] if nf > 7: t = 3.4995 # depends on [control=['if'], data=[]] if nf > 8: t = 3.3554 # depends on [control=['if'], data=[]] if nf > 9: t = 3.2498 # depends on [control=['if'], data=[]] if nf > 10: t = 3.1693 # depends on [control=['if'], data=[]] if nf > 11: t = 3.1058 # depends on [control=['if'], data=[]] if nf > 12: t = 3.0545 # depends on [control=['if'], data=[]] if nf > 13: t = 3.0123 # depends on [control=['if'], data=[]] if nf > 14: t = 2.9768 # depends on [control=['if'], data=[]] if nf > 15: t = 2.9467 # depends on [control=['if'], data=[]] if nf > 16: t = 2.9208 # depends on [control=['if'], data=[]] if nf > 17: t = 2.8982 # depends on [control=['if'], data=[]] if nf > 18: t = 2.8784 # depends on [control=['if'], data=[]] if nf > 19: t = 2.8609 # depends on [control=['if'], data=[]] if nf > 20: t = 2.8453 # depends on [control=['if'], data=[]] if nf > 21: t = 2.8314 # depends on [control=['if'], data=[]] if nf > 22: t = 2.8188 # depends on [control=['if'], data=[]] if nf > 23: t = 2.8073 # depends on [control=['if'], data=[]] if nf > 24: t = 2.797 # depends on [control=['if'], data=[]] if nf > 25: t = 2.7874 # depends on [control=['if'], data=[]] if nf > 26: t = 2.7787 # depends on [control=['if'], data=[]] if nf > 27: t = 2.7707 # depends on [control=['if'], data=[]] if nf > 28: t = 2.7633 # depends on [control=['if'], data=[]] if nf > 29: t = 2.7564 # depends on [control=['if'], data=[]] if nf > 30: t = 2.75 # depends on [control=['if'], data=[]] if nf > 31: t = 2.744 # depends on [control=['if'], data=[]] if nf > 32: t = 2.7385 # depends on [control=['if'], data=[]] if nf > 33: t = 2.7333 # depends on [control=['if'], data=[]] if nf > 34: t = 2.7284 # depends on [control=['if'], data=[]] if nf > 35: t = 2.7238 # depends on [control=['if'], data=[]] if nf > 36: t = 2.7195 # depends on [control=['if'], data=[]] if nf > 37: t = 2.7154 # depends on [control=['if'], data=[]] if nf > 38: t = 2.7116 # depends on [control=['if'], data=[]] if nf > 39: t = 2.7079 # depends on [control=['if'], data=[]] if nf > 40: t = 2.7045 # depends on [control=['if'], data=[]] if nf > 41: t = 2.7012 # depends on [control=['if'], data=[]] if nf > 42: t = 2.6981 # depends on [control=['if'], data=[]] if nf > 43: t = 2.6951 # depends on [control=['if'], data=[]] if nf > 44: t = 2.6923 # depends on [control=['if'], data=[]] if nf > 45: t = 2.6896 # depends on [control=['if'], data=[]] if nf > 46: t = 2.687 # depends on [control=['if'], data=[]] if nf > 47: t = 2.6846 # depends on [control=['if'], data=[]] if nf > 48: t = 2.6822 # depends on [control=['if'], data=[]] if nf > 49: t = 2.68 # depends on [control=['if'], data=[]] if nf > 50: t = 2.6778 # depends on [control=['if'], data=[]] if nf > 51: t = 2.6757 # depends on [control=['if'], data=[]] if nf > 52: t = 2.6737 # depends on [control=['if'], data=[]] if nf > 53: t = 2.6718 # depends on [control=['if'], data=[]] if nf > 54: t = 2.67 # depends on [control=['if'], data=[]] if nf > 55: t = 2.6682 # depends on [control=['if'], data=[]] if nf > 56: t = 2.6665 # depends on [control=['if'], data=[]] if nf > 57: t = 2.6649 # depends on [control=['if'], data=[]] if nf > 58: t = 2.6633 # depends on [control=['if'], data=[]] if nf > 59: t = 2.6618 # depends on [control=['if'], data=[]] if nf > 60: t = 2.6603 # depends on [control=['if'], data=[]] if nf > 61: t = 2.6589 # depends on [control=['if'], data=[]] if nf > 62: t = 2.6575 # depends on [control=['if'], data=[]] if nf > 63: t = 2.6561 # depends on [control=['if'], data=[]] if nf > 64: t = 2.6549 # depends on [control=['if'], data=[]] if nf > 65: t = 2.6536 # depends on [control=['if'], data=[]] if nf > 66: t = 2.6524 # depends on [control=['if'], data=[]] if nf > 67: t = 2.6512 # depends on [control=['if'], data=[]] if nf > 68: t = 2.6501 # depends on [control=['if'], data=[]] if nf > 69: t = 2.649 # depends on [control=['if'], data=[]] if nf > 70: t = 2.6479 # depends on [control=['if'], data=[]] if nf > 71: t = 2.6469 # depends on [control=['if'], data=[]] if nf > 72: t = 2.6458 # depends on [control=['if'], data=[]] if nf > 73: t = 2.6449 # depends on [control=['if'], data=[]] if nf > 74: t = 2.6439 # depends on [control=['if'], data=[]] if nf > 75: t = 2.643 # depends on [control=['if'], data=[]] if nf > 76: t = 2.6421 # depends on [control=['if'], data=[]] if nf > 77: t = 2.6412 # depends on [control=['if'], data=[]] if nf > 78: t = 2.6403 # depends on [control=['if'], data=[]] if nf > 79: t = 2.6395 # depends on [control=['if'], data=[]] if nf > 80: t = 2.6387 # depends on [control=['if'], data=[]] if nf > 81: t = 2.6379 # depends on [control=['if'], data=[]] if nf > 82: t = 2.6371 # depends on [control=['if'], data=[]] if nf > 83: t = 2.6364 # depends on [control=['if'], data=[]] if nf > 84: t = 2.6356 # depends on [control=['if'], data=[]] if nf > 85: t = 2.6349 # depends on [control=['if'], data=[]] if nf > 86: t = 2.6342 # depends on [control=['if'], data=[]] if nf > 87: t = 2.6335 # depends on [control=['if'], data=[]] if nf > 88: t = 2.6329 # depends on [control=['if'], data=[]] if nf > 89: t = 2.6322 # depends on [control=['if'], data=[]] if nf > 90: t = 2.6316 # depends on [control=['if'], data=[]] if nf > 91: t = 2.6309 # depends on [control=['if'], data=[]] if nf > 92: t = 2.6303 # depends on [control=['if'], data=[]] if nf > 93: t = 2.6297 # depends on [control=['if'], data=[]] if nf > 94: t = 2.6291 # depends on [control=['if'], data=[]] if nf > 95: t = 2.6286 # depends on [control=['if'], data=[]] if nf > 96: t = 2.628 # depends on [control=['if'], data=[]] if nf > 97: t = 2.6275 # depends on [control=['if'], data=[]] if nf > 98: t = 2.6269 # depends on [control=['if'], data=[]] if nf > 99: t = 2.6264 # depends on [control=['if'], data=[]] if nf > 100: t = 2.6259 # depends on [control=['if'], data=[]] return t return t # depends on [control=['if'], data=[]] else: return 0
def _use_color(msg, ansi_fmt, output_stream): ''' Based on :data:`~exhale.configs.alwaysColorize`, returns the colorized or non-colorized output when ``output_stream`` is not a TTY (e.g. redirecting to a file). **Parameters** ``msg`` (str) The message that is going to be printed by the caller of this method. ``ansi_fmt`` (str) The ANSI color format to use when coloring is supposed to happen. ``output_stream`` (file) Assumed to be either ``sys.stdout`` or ``sys.stderr``. **Return** ``str`` The message ``msg`` in color, or not, depending on both :data:`~exhale.configs.alwaysColorize` and whether or not the ``output_stream`` is a TTY. ''' if configs._on_rtd or (not configs.alwaysColorize and not output_stream.isatty()): log = msg else: log = colorize(msg, ansi_fmt) return log
def function[_use_color, parameter[msg, ansi_fmt, output_stream]]: constant[ Based on :data:`~exhale.configs.alwaysColorize`, returns the colorized or non-colorized output when ``output_stream`` is not a TTY (e.g. redirecting to a file). **Parameters** ``msg`` (str) The message that is going to be printed by the caller of this method. ``ansi_fmt`` (str) The ANSI color format to use when coloring is supposed to happen. ``output_stream`` (file) Assumed to be either ``sys.stdout`` or ``sys.stderr``. **Return** ``str`` The message ``msg`` in color, or not, depending on both :data:`~exhale.configs.alwaysColorize` and whether or not the ``output_stream`` is a TTY. ] if <ast.BoolOp object at 0x7da1b0668820> begin[:] variable[log] assign[=] name[msg] return[name[log]]
keyword[def] identifier[_use_color] ( identifier[msg] , identifier[ansi_fmt] , identifier[output_stream] ): literal[string] keyword[if] identifier[configs] . identifier[_on_rtd] keyword[or] ( keyword[not] identifier[configs] . identifier[alwaysColorize] keyword[and] keyword[not] identifier[output_stream] . identifier[isatty] ()): identifier[log] = identifier[msg] keyword[else] : identifier[log] = identifier[colorize] ( identifier[msg] , identifier[ansi_fmt] ) keyword[return] identifier[log]
def _use_color(msg, ansi_fmt, output_stream): """ Based on :data:`~exhale.configs.alwaysColorize`, returns the colorized or non-colorized output when ``output_stream`` is not a TTY (e.g. redirecting to a file). **Parameters** ``msg`` (str) The message that is going to be printed by the caller of this method. ``ansi_fmt`` (str) The ANSI color format to use when coloring is supposed to happen. ``output_stream`` (file) Assumed to be either ``sys.stdout`` or ``sys.stderr``. **Return** ``str`` The message ``msg`` in color, or not, depending on both :data:`~exhale.configs.alwaysColorize` and whether or not the ``output_stream`` is a TTY. """ if configs._on_rtd or (not configs.alwaysColorize and (not output_stream.isatty())): log = msg # depends on [control=['if'], data=[]] else: log = colorize(msg, ansi_fmt) return log
def systemInformationType13(): """SYSTEM INFORMATION TYPE 13 Section 9.1.43a""" a = L2PseudoLength(l2pLength=0x00) b = TpPd(pd=0x6) c = MessageType(mesType=0x0) # 00000000 d = Si13RestOctets() packet = a / b / c / d return packet
def function[systemInformationType13, parameter[]]: constant[SYSTEM INFORMATION TYPE 13 Section 9.1.43a] variable[a] assign[=] call[name[L2PseudoLength], parameter[]] variable[b] assign[=] call[name[TpPd], parameter[]] variable[c] assign[=] call[name[MessageType], parameter[]] variable[d] assign[=] call[name[Si13RestOctets], parameter[]] variable[packet] assign[=] binary_operation[binary_operation[binary_operation[name[a] / name[b]] / name[c]] / name[d]] return[name[packet]]
keyword[def] identifier[systemInformationType13] (): literal[string] identifier[a] = identifier[L2PseudoLength] ( identifier[l2pLength] = literal[int] ) identifier[b] = identifier[TpPd] ( identifier[pd] = literal[int] ) identifier[c] = identifier[MessageType] ( identifier[mesType] = literal[int] ) identifier[d] = identifier[Si13RestOctets] () identifier[packet] = identifier[a] / identifier[b] / identifier[c] / identifier[d] keyword[return] identifier[packet]
def systemInformationType13(): """SYSTEM INFORMATION TYPE 13 Section 9.1.43a""" a = L2PseudoLength(l2pLength=0) b = TpPd(pd=6) c = MessageType(mesType=0) # 00000000 d = Si13RestOctets() packet = a / b / c / d return packet
def moving_frequency(self, data_frame): """ This method returns moving frequency :param data_frame: the data frame :type data_frame: pandas.DataFrame :return diff_mov_freq: frequency :rtype diff_mov_freq: float """ f = [] for i in range(0, (data_frame.td[-1].astype('int') - self.window)): f.append(sum(data_frame.action_type[(data_frame.td >= i) & (data_frame.td < (i + self.window))] == 1) / float(self.window)) diff_mov_freq = (np.array(f[1:-1]) - np.array(f[0:-2])) / np.array(f[0:-2]) duration = math.ceil(data_frame.td[-1]) return diff_mov_freq, duration
def function[moving_frequency, parameter[self, data_frame]]: constant[ This method returns moving frequency :param data_frame: the data frame :type data_frame: pandas.DataFrame :return diff_mov_freq: frequency :rtype diff_mov_freq: float ] variable[f] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[call[call[name[data_frame].td][<ast.UnaryOp object at 0x7da1b1b620e0>].astype, parameter[constant[int]]] - name[self].window]]]] begin[:] call[name[f].append, parameter[binary_operation[call[name[sum], parameter[compare[call[name[data_frame].action_type][binary_operation[compare[name[data_frame].td greater_or_equal[>=] name[i]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[data_frame].td less[<] binary_operation[name[i] + name[self].window]]]] equal[==] constant[1]]]] / call[name[float], parameter[name[self].window]]]]] variable[diff_mov_freq] assign[=] binary_operation[binary_operation[call[name[np].array, parameter[call[name[f]][<ast.Slice object at 0x7da1b1b7d150>]]] - call[name[np].array, parameter[call[name[f]][<ast.Slice object at 0x7da1b1b7eb90>]]]] / call[name[np].array, parameter[call[name[f]][<ast.Slice object at 0x7da1b1b7d1b0>]]]] variable[duration] assign[=] call[name[math].ceil, parameter[call[name[data_frame].td][<ast.UnaryOp object at 0x7da1b1b7e350>]]] return[tuple[[<ast.Name object at 0x7da1b1b7fbe0>, <ast.Name object at 0x7da1b1b7cbe0>]]]
keyword[def] identifier[moving_frequency] ( identifier[self] , identifier[data_frame] ): literal[string] identifier[f] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ,( identifier[data_frame] . identifier[td] [- literal[int] ]. identifier[astype] ( literal[string] )- identifier[self] . identifier[window] )): identifier[f] . identifier[append] ( identifier[sum] ( identifier[data_frame] . identifier[action_type] [( identifier[data_frame] . identifier[td] >= identifier[i] )&( identifier[data_frame] . identifier[td] <( identifier[i] + identifier[self] . identifier[window] ))]== literal[int] )/ identifier[float] ( identifier[self] . identifier[window] )) identifier[diff_mov_freq] =( identifier[np] . identifier[array] ( identifier[f] [ literal[int] :- literal[int] ])- identifier[np] . identifier[array] ( identifier[f] [ literal[int] :- literal[int] ]))/ identifier[np] . identifier[array] ( identifier[f] [ literal[int] :- literal[int] ]) identifier[duration] = identifier[math] . identifier[ceil] ( identifier[data_frame] . identifier[td] [- literal[int] ]) keyword[return] identifier[diff_mov_freq] , identifier[duration]
def moving_frequency(self, data_frame): """ This method returns moving frequency :param data_frame: the data frame :type data_frame: pandas.DataFrame :return diff_mov_freq: frequency :rtype diff_mov_freq: float """ f = [] for i in range(0, data_frame.td[-1].astype('int') - self.window): f.append(sum(data_frame.action_type[(data_frame.td >= i) & (data_frame.td < i + self.window)] == 1) / float(self.window)) # depends on [control=['for'], data=['i']] diff_mov_freq = (np.array(f[1:-1]) - np.array(f[0:-2])) / np.array(f[0:-2]) duration = math.ceil(data_frame.td[-1]) return (diff_mov_freq, duration)
def print_hex(data): """Debugging method to print out frames in hex.""" hex_msg = "" for c in data: hex_msg += "\\x" + format(c, "02x") _LOGGER.debug(hex_msg)
def function[print_hex, parameter[data]]: constant[Debugging method to print out frames in hex.] variable[hex_msg] assign[=] constant[] for taget[name[c]] in starred[name[data]] begin[:] <ast.AugAssign object at 0x7da2041d86d0> call[name[_LOGGER].debug, parameter[name[hex_msg]]]
keyword[def] identifier[print_hex] ( identifier[data] ): literal[string] identifier[hex_msg] = literal[string] keyword[for] identifier[c] keyword[in] identifier[data] : identifier[hex_msg] += literal[string] + identifier[format] ( identifier[c] , literal[string] ) identifier[_LOGGER] . identifier[debug] ( identifier[hex_msg] )
def print_hex(data): """Debugging method to print out frames in hex.""" hex_msg = '' for c in data: hex_msg += '\\x' + format(c, '02x') # depends on [control=['for'], data=['c']] _LOGGER.debug(hex_msg)
def get_object_executor(obj, green_mode=None): """Returns the proper executor for the given object. If the object has *_executors* and *_green_mode* members it returns the submit callable for the executor corresponding to the green_mode. Otherwise it returns the global executor for the given green_mode. Note: *None* is a valid object. :returns: submit callable""" # Get green mode if green_mode is None: green_mode = get_object_green_mode(obj) # Get executor executor = None if hasattr(obj, '_executors'): executor = obj._executors.get(green_mode, None) if executor is None: executor = get_executor(green_mode) # Get submitter return executor
def function[get_object_executor, parameter[obj, green_mode]]: constant[Returns the proper executor for the given object. If the object has *_executors* and *_green_mode* members it returns the submit callable for the executor corresponding to the green_mode. Otherwise it returns the global executor for the given green_mode. Note: *None* is a valid object. :returns: submit callable] if compare[name[green_mode] is constant[None]] begin[:] variable[green_mode] assign[=] call[name[get_object_green_mode], parameter[name[obj]]] variable[executor] assign[=] constant[None] if call[name[hasattr], parameter[name[obj], constant[_executors]]] begin[:] variable[executor] assign[=] call[name[obj]._executors.get, parameter[name[green_mode], constant[None]]] if compare[name[executor] is constant[None]] begin[:] variable[executor] assign[=] call[name[get_executor], parameter[name[green_mode]]] return[name[executor]]
keyword[def] identifier[get_object_executor] ( identifier[obj] , identifier[green_mode] = keyword[None] ): literal[string] keyword[if] identifier[green_mode] keyword[is] keyword[None] : identifier[green_mode] = identifier[get_object_green_mode] ( identifier[obj] ) identifier[executor] = keyword[None] keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ): identifier[executor] = identifier[obj] . identifier[_executors] . identifier[get] ( identifier[green_mode] , keyword[None] ) keyword[if] identifier[executor] keyword[is] keyword[None] : identifier[executor] = identifier[get_executor] ( identifier[green_mode] ) keyword[return] identifier[executor]
def get_object_executor(obj, green_mode=None): """Returns the proper executor for the given object. If the object has *_executors* and *_green_mode* members it returns the submit callable for the executor corresponding to the green_mode. Otherwise it returns the global executor for the given green_mode. Note: *None* is a valid object. :returns: submit callable""" # Get green mode if green_mode is None: green_mode = get_object_green_mode(obj) # depends on [control=['if'], data=['green_mode']] # Get executor executor = None if hasattr(obj, '_executors'): executor = obj._executors.get(green_mode, None) # depends on [control=['if'], data=[]] if executor is None: executor = get_executor(green_mode) # depends on [control=['if'], data=['executor']] # Get submitter return executor
def make_model(self, use_name_as_key=False, include_mods=False, include_complexes=False): """Assemble the graph from the assembler's list of INDRA Statements. Parameters ---------- use_name_as_key : boolean If True, uses the name of the agent as the key to the nodes in the network. If False (default) uses the matches_key() of the agent. include_mods : boolean If True, adds Modification statements into the graph as directed edges. Default is False. include_complexes : boolean If True, creates two edges (in both directions) between all pairs of nodes in Complex statements. Default is False. """ self.graph = nx.DiGraph() self._use_name_as_key = use_name_as_key for st in self.stmts: support_all = len(st.evidence) support_pmid = len(set([ev.pmid for ev in st.evidence if ev.pmid is not None])) attr = {'polarity': 'unknown', 'support_all': support_all, 'support_pmid': support_pmid} if isinstance(st, RegulateActivity): attr['polarity'] = ('positive' if st.is_activation else 'negative') self._add_node_edge(st.subj, st.obj, attr) elif include_mods and isinstance(st, Modification): self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) elif include_mods and \ (isinstance(st, Gap) or isinstance(st, DecreaseAmount)): attr['polarity'] = 'negative' self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) elif include_mods and \ (isinstance(st, Gef) or isinstance(st, IncreaseAmount)): attr['polarity'] = 'positive' self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) elif include_complexes and isinstance(st, Complex): # Create s->t edges between all possible pairs of complex # members for node1, node2 in itertools.permutations(st.members, 2): self._add_node_edge(node1, node2, attr)
def function[make_model, parameter[self, use_name_as_key, include_mods, include_complexes]]: constant[Assemble the graph from the assembler's list of INDRA Statements. Parameters ---------- use_name_as_key : boolean If True, uses the name of the agent as the key to the nodes in the network. If False (default) uses the matches_key() of the agent. include_mods : boolean If True, adds Modification statements into the graph as directed edges. Default is False. include_complexes : boolean If True, creates two edges (in both directions) between all pairs of nodes in Complex statements. Default is False. ] name[self].graph assign[=] call[name[nx].DiGraph, parameter[]] name[self]._use_name_as_key assign[=] name[use_name_as_key] for taget[name[st]] in starred[name[self].stmts] begin[:] variable[support_all] assign[=] call[name[len], parameter[name[st].evidence]] variable[support_pmid] assign[=] call[name[len], parameter[call[name[set], parameter[<ast.ListComp object at 0x7da18ede7370>]]]] variable[attr] assign[=] dictionary[[<ast.Constant object at 0x7da18ede4190>, <ast.Constant object at 0x7da18ede5ed0>, <ast.Constant object at 0x7da18ede63b0>], [<ast.Constant object at 0x7da18ede7460>, <ast.Name object at 0x7da18ede6f50>, <ast.Name object at 0x7da18ede4f40>]] if call[name[isinstance], parameter[name[st], name[RegulateActivity]]] begin[:] call[name[attr]][constant[polarity]] assign[=] <ast.IfExp object at 0x7da18ede4f70> call[name[self]._add_node_edge, parameter[name[st].subj, name[st].obj, name[attr]]]
keyword[def] identifier[make_model] ( identifier[self] , identifier[use_name_as_key] = keyword[False] , identifier[include_mods] = keyword[False] , identifier[include_complexes] = keyword[False] ): literal[string] identifier[self] . identifier[graph] = identifier[nx] . identifier[DiGraph] () identifier[self] . identifier[_use_name_as_key] = identifier[use_name_as_key] keyword[for] identifier[st] keyword[in] identifier[self] . identifier[stmts] : identifier[support_all] = identifier[len] ( identifier[st] . identifier[evidence] ) identifier[support_pmid] = identifier[len] ( identifier[set] ([ identifier[ev] . identifier[pmid] keyword[for] identifier[ev] keyword[in] identifier[st] . identifier[evidence] keyword[if] identifier[ev] . identifier[pmid] keyword[is] keyword[not] keyword[None] ])) identifier[attr] ={ literal[string] : literal[string] , literal[string] : identifier[support_all] , literal[string] : identifier[support_pmid] } keyword[if] identifier[isinstance] ( identifier[st] , identifier[RegulateActivity] ): identifier[attr] [ literal[string] ]=( literal[string] keyword[if] identifier[st] . identifier[is_activation] keyword[else] literal[string] ) identifier[self] . identifier[_add_node_edge] ( identifier[st] . identifier[subj] , identifier[st] . identifier[obj] , identifier[attr] ) keyword[elif] identifier[include_mods] keyword[and] identifier[isinstance] ( identifier[st] , identifier[Modification] ): identifier[self] . identifier[_add_node_edge] ( identifier[st] . identifier[agent_list] ()[ literal[int] ], identifier[st] . identifier[agent_list] ()[ literal[int] ], identifier[attr] ) keyword[elif] identifier[include_mods] keyword[and] ( identifier[isinstance] ( identifier[st] , identifier[Gap] ) keyword[or] identifier[isinstance] ( identifier[st] , identifier[DecreaseAmount] )): identifier[attr] [ literal[string] ]= literal[string] identifier[self] . identifier[_add_node_edge] ( identifier[st] . identifier[agent_list] ()[ literal[int] ], identifier[st] . identifier[agent_list] ()[ literal[int] ], identifier[attr] ) keyword[elif] identifier[include_mods] keyword[and] ( identifier[isinstance] ( identifier[st] , identifier[Gef] ) keyword[or] identifier[isinstance] ( identifier[st] , identifier[IncreaseAmount] )): identifier[attr] [ literal[string] ]= literal[string] identifier[self] . identifier[_add_node_edge] ( identifier[st] . identifier[agent_list] ()[ literal[int] ], identifier[st] . identifier[agent_list] ()[ literal[int] ], identifier[attr] ) keyword[elif] identifier[include_complexes] keyword[and] identifier[isinstance] ( identifier[st] , identifier[Complex] ): keyword[for] identifier[node1] , identifier[node2] keyword[in] identifier[itertools] . identifier[permutations] ( identifier[st] . identifier[members] , literal[int] ): identifier[self] . identifier[_add_node_edge] ( identifier[node1] , identifier[node2] , identifier[attr] )
def make_model(self, use_name_as_key=False, include_mods=False, include_complexes=False): """Assemble the graph from the assembler's list of INDRA Statements. Parameters ---------- use_name_as_key : boolean If True, uses the name of the agent as the key to the nodes in the network. If False (default) uses the matches_key() of the agent. include_mods : boolean If True, adds Modification statements into the graph as directed edges. Default is False. include_complexes : boolean If True, creates two edges (in both directions) between all pairs of nodes in Complex statements. Default is False. """ self.graph = nx.DiGraph() self._use_name_as_key = use_name_as_key for st in self.stmts: support_all = len(st.evidence) support_pmid = len(set([ev.pmid for ev in st.evidence if ev.pmid is not None])) attr = {'polarity': 'unknown', 'support_all': support_all, 'support_pmid': support_pmid} if isinstance(st, RegulateActivity): attr['polarity'] = 'positive' if st.is_activation else 'negative' self._add_node_edge(st.subj, st.obj, attr) # depends on [control=['if'], data=[]] elif include_mods and isinstance(st, Modification): self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) # depends on [control=['if'], data=[]] elif include_mods and (isinstance(st, Gap) or isinstance(st, DecreaseAmount)): attr['polarity'] = 'negative' self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) # depends on [control=['if'], data=[]] elif include_mods and (isinstance(st, Gef) or isinstance(st, IncreaseAmount)): attr['polarity'] = 'positive' self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) # depends on [control=['if'], data=[]] elif include_complexes and isinstance(st, Complex): # Create s->t edges between all possible pairs of complex # members for (node1, node2) in itertools.permutations(st.members, 2): self._add_node_edge(node1, node2, attr) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['st']]
def config_start(args): '''Invoke a task (method configuration), on given entity in given space''' # Try to use call caching (job avoidance)? Flexibly accept range of answers cache = getattr(args, "cache", True) cache = cache is True or (cache.lower() in ["y", "true", "yes", "t", "1"]) if not args.namespace: args.namespace = fcconfig.method_ns if not args.namespace: raise RuntimeError("namespace not provided, or configured by default") r = fapi.create_submission(args.project, args.workspace,args.namespace, args.config, args.entity, args.entity_type, args.expression, use_callcache=cache) fapi._check_response_code(r, 201) id = r.json()['submissionId'] return ("Started {0}/{1} in {2}/{3}: id={4}".format( args.namespace, args.config, args.project, args.workspace, id)), id
def function[config_start, parameter[args]]: constant[Invoke a task (method configuration), on given entity in given space] variable[cache] assign[=] call[name[getattr], parameter[name[args], constant[cache], constant[True]]] variable[cache] assign[=] <ast.BoolOp object at 0x7da1b1a2d0c0> if <ast.UnaryOp object at 0x7da1b1a2f2e0> begin[:] name[args].namespace assign[=] name[fcconfig].method_ns if <ast.UnaryOp object at 0x7da1b1a2cdc0> begin[:] <ast.Raise object at 0x7da1b1a2ee00> variable[r] assign[=] call[name[fapi].create_submission, parameter[name[args].project, name[args].workspace, name[args].namespace, name[args].config, name[args].entity, name[args].entity_type, name[args].expression]] call[name[fapi]._check_response_code, parameter[name[r], constant[201]]] variable[id] assign[=] call[call[name[r].json, parameter[]]][constant[submissionId]] return[tuple[[<ast.Call object at 0x7da1b1a44880>, <ast.Name object at 0x7da1b1a44130>]]]
keyword[def] identifier[config_start] ( identifier[args] ): literal[string] identifier[cache] = identifier[getattr] ( identifier[args] , literal[string] , keyword[True] ) identifier[cache] = identifier[cache] keyword[is] keyword[True] keyword[or] ( identifier[cache] . identifier[lower] () keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[if] keyword[not] identifier[args] . identifier[namespace] : identifier[args] . identifier[namespace] = identifier[fcconfig] . identifier[method_ns] keyword[if] keyword[not] identifier[args] . identifier[namespace] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[r] = identifier[fapi] . identifier[create_submission] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] , identifier[args] . identifier[namespace] , identifier[args] . identifier[config] , identifier[args] . identifier[entity] , identifier[args] . identifier[entity_type] , identifier[args] . identifier[expression] , identifier[use_callcache] = identifier[cache] ) identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] ) identifier[id] = identifier[r] . identifier[json] ()[ literal[string] ] keyword[return] ( literal[string] . identifier[format] ( identifier[args] . identifier[namespace] , identifier[args] . identifier[config] , identifier[args] . identifier[project] , identifier[args] . identifier[workspace] , identifier[id] )), identifier[id]
def config_start(args): """Invoke a task (method configuration), on given entity in given space""" # Try to use call caching (job avoidance)? Flexibly accept range of answers cache = getattr(args, 'cache', True) cache = cache is True or cache.lower() in ['y', 'true', 'yes', 't', '1'] if not args.namespace: args.namespace = fcconfig.method_ns # depends on [control=['if'], data=[]] if not args.namespace: raise RuntimeError('namespace not provided, or configured by default') # depends on [control=['if'], data=[]] r = fapi.create_submission(args.project, args.workspace, args.namespace, args.config, args.entity, args.entity_type, args.expression, use_callcache=cache) fapi._check_response_code(r, 201) id = r.json()['submissionId'] return ('Started {0}/{1} in {2}/{3}: id={4}'.format(args.namespace, args.config, args.project, args.workspace, id), id)
def sql_reset(app, style, connection): "Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module." return sql_delete(app, style, connection) + sql_all(app, style, connection)
def function[sql_reset, parameter[app, style, connection]]: constant[Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module.] return[binary_operation[call[name[sql_delete], parameter[name[app], name[style], name[connection]]] + call[name[sql_all], parameter[name[app], name[style], name[connection]]]]]
keyword[def] identifier[sql_reset] ( identifier[app] , identifier[style] , identifier[connection] ): literal[string] keyword[return] identifier[sql_delete] ( identifier[app] , identifier[style] , identifier[connection] )+ identifier[sql_all] ( identifier[app] , identifier[style] , identifier[connection] )
def sql_reset(app, style, connection): """Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module.""" return sql_delete(app, style, connection) + sql_all(app, style, connection)
def _get_intermediate_file(self, name, machine_and_compiler_dependent=True, binary=False, fp=True): """ Create or open intermediate file (may be used for caching). Will replace files older than kernel file, machine file or kerncraft version. :param machine_and_compiler_dependent: set to False if file content does not depend on machine file or compiler settings :param fp: if False, will only return file name, not file object :paarm binary: if True, use binary mode for file access :return: (file object or file name, boolean if already existent and up-to-date) """ if self._filename: base_name = os.path.join(os.path.dirname(self._filename), '.' + os.path.basename(self._filename) + '_kerncraft') else: base_name = tempfile.mkdtemp() if not self._keep_intermediates: # Remove directory and all content up on program exit atexit.register(shutil.rmtree, base_name) if machine_and_compiler_dependent: compiler, compiler_args = self._machine.get_compiler() compiler_args = '_'.join(compiler_args).replace('/', '') base_name += '/{}/{}/{}/'.format( self._machine.get_identifier(), compiler, compiler_args) # Create dirs recursively os.makedirs(base_name, exist_ok=True) # Build actual file path file_path = os.path.join(base_name, name) already_exists = False # Check if file exists and is still fresh if os.path.exists(file_path): file_modified = datetime.utcfromtimestamp(os.stat(file_path).st_mtime) if (file_modified < self._machine.get_last_modified_datetime() or file_modified < kerncraft.get_last_modified_datetime() or (self._filename and file_modified < datetime.utcfromtimestamp(os.stat(self._filename).st_mtime))): os.remove(file_path) else: already_exists = True if fp: if already_exists: mode = 'r+' else: mode = 'w' if binary: mode += 'b' f = open(file_path, mode) return f, already_exists else: return reduce_path(file_path), already_exists
def function[_get_intermediate_file, parameter[self, name, machine_and_compiler_dependent, binary, fp]]: constant[ Create or open intermediate file (may be used for caching). Will replace files older than kernel file, machine file or kerncraft version. :param machine_and_compiler_dependent: set to False if file content does not depend on machine file or compiler settings :param fp: if False, will only return file name, not file object :paarm binary: if True, use binary mode for file access :return: (file object or file name, boolean if already existent and up-to-date) ] if name[self]._filename begin[:] variable[base_name] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[self]._filename]], binary_operation[binary_operation[constant[.] + call[name[os].path.basename, parameter[name[self]._filename]]] + constant[_kerncraft]]]] if <ast.UnaryOp object at 0x7da18f00e3b0> begin[:] call[name[atexit].register, parameter[name[shutil].rmtree, name[base_name]]] if name[machine_and_compiler_dependent] begin[:] <ast.Tuple object at 0x7da18bc72cb0> assign[=] call[name[self]._machine.get_compiler, parameter[]] variable[compiler_args] assign[=] call[call[constant[_].join, parameter[name[compiler_args]]].replace, parameter[constant[/], constant[]]] <ast.AugAssign object at 0x7da18bc71420> call[name[os].makedirs, parameter[name[base_name]]] variable[file_path] assign[=] call[name[os].path.join, parameter[name[base_name], name[name]]] variable[already_exists] assign[=] constant[False] if call[name[os].path.exists, parameter[name[file_path]]] begin[:] variable[file_modified] assign[=] call[name[datetime].utcfromtimestamp, parameter[call[name[os].stat, parameter[name[file_path]]].st_mtime]] if <ast.BoolOp object at 0x7da18bc73f40> begin[:] call[name[os].remove, parameter[name[file_path]]] if name[fp] begin[:] if name[already_exists] begin[:] variable[mode] assign[=] constant[r+] if name[binary] begin[:] <ast.AugAssign object at 0x7da18bc72740> variable[f] assign[=] call[name[open], parameter[name[file_path], name[mode]]] return[tuple[[<ast.Name object at 0x7da1b26af160>, <ast.Name object at 0x7da1b26ae170>]]]
keyword[def] identifier[_get_intermediate_file] ( identifier[self] , identifier[name] , identifier[machine_and_compiler_dependent] = keyword[True] , identifier[binary] = keyword[False] , identifier[fp] = keyword[True] ): literal[string] keyword[if] identifier[self] . identifier[_filename] : identifier[base_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[_filename] ), literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[_filename] )+ literal[string] ) keyword[else] : identifier[base_name] = identifier[tempfile] . identifier[mkdtemp] () keyword[if] keyword[not] identifier[self] . identifier[_keep_intermediates] : identifier[atexit] . identifier[register] ( identifier[shutil] . identifier[rmtree] , identifier[base_name] ) keyword[if] identifier[machine_and_compiler_dependent] : identifier[compiler] , identifier[compiler_args] = identifier[self] . identifier[_machine] . identifier[get_compiler] () identifier[compiler_args] = literal[string] . identifier[join] ( identifier[compiler_args] ). identifier[replace] ( literal[string] , literal[string] ) identifier[base_name] += literal[string] . identifier[format] ( identifier[self] . identifier[_machine] . identifier[get_identifier] (), identifier[compiler] , identifier[compiler_args] ) identifier[os] . identifier[makedirs] ( identifier[base_name] , identifier[exist_ok] = keyword[True] ) identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_name] , identifier[name] ) identifier[already_exists] = keyword[False] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_path] ): identifier[file_modified] = identifier[datetime] . identifier[utcfromtimestamp] ( identifier[os] . identifier[stat] ( identifier[file_path] ). identifier[st_mtime] ) keyword[if] ( identifier[file_modified] < identifier[self] . identifier[_machine] . identifier[get_last_modified_datetime] () keyword[or] identifier[file_modified] < identifier[kerncraft] . identifier[get_last_modified_datetime] () keyword[or] ( identifier[self] . identifier[_filename] keyword[and] identifier[file_modified] < identifier[datetime] . identifier[utcfromtimestamp] ( identifier[os] . identifier[stat] ( identifier[self] . identifier[_filename] ). identifier[st_mtime] ))): identifier[os] . identifier[remove] ( identifier[file_path] ) keyword[else] : identifier[already_exists] = keyword[True] keyword[if] identifier[fp] : keyword[if] identifier[already_exists] : identifier[mode] = literal[string] keyword[else] : identifier[mode] = literal[string] keyword[if] identifier[binary] : identifier[mode] += literal[string] identifier[f] = identifier[open] ( identifier[file_path] , identifier[mode] ) keyword[return] identifier[f] , identifier[already_exists] keyword[else] : keyword[return] identifier[reduce_path] ( identifier[file_path] ), identifier[already_exists]
def _get_intermediate_file(self, name, machine_and_compiler_dependent=True, binary=False, fp=True): """ Create or open intermediate file (may be used for caching). Will replace files older than kernel file, machine file or kerncraft version. :param machine_and_compiler_dependent: set to False if file content does not depend on machine file or compiler settings :param fp: if False, will only return file name, not file object :paarm binary: if True, use binary mode for file access :return: (file object or file name, boolean if already existent and up-to-date) """ if self._filename: base_name = os.path.join(os.path.dirname(self._filename), '.' + os.path.basename(self._filename) + '_kerncraft') # depends on [control=['if'], data=[]] else: base_name = tempfile.mkdtemp() if not self._keep_intermediates: # Remove directory and all content up on program exit atexit.register(shutil.rmtree, base_name) # depends on [control=['if'], data=[]] if machine_and_compiler_dependent: (compiler, compiler_args) = self._machine.get_compiler() compiler_args = '_'.join(compiler_args).replace('/', '') base_name += '/{}/{}/{}/'.format(self._machine.get_identifier(), compiler, compiler_args) # depends on [control=['if'], data=[]] # Create dirs recursively os.makedirs(base_name, exist_ok=True) # Build actual file path file_path = os.path.join(base_name, name) already_exists = False # Check if file exists and is still fresh if os.path.exists(file_path): file_modified = datetime.utcfromtimestamp(os.stat(file_path).st_mtime) if file_modified < self._machine.get_last_modified_datetime() or file_modified < kerncraft.get_last_modified_datetime() or (self._filename and file_modified < datetime.utcfromtimestamp(os.stat(self._filename).st_mtime)): os.remove(file_path) # depends on [control=['if'], data=[]] else: already_exists = True # depends on [control=['if'], data=[]] if fp: if already_exists: mode = 'r+' # depends on [control=['if'], data=[]] else: mode = 'w' if binary: mode += 'b' # depends on [control=['if'], data=[]] f = open(file_path, mode) return (f, already_exists) # depends on [control=['if'], data=[]] else: return (reduce_path(file_path), already_exists)
def create_api_model(restApiId, modelName, modelDescription, schema, contentType='application/json', region=None, key=None, keyid=None, profile=None): ''' Create a new model in a given API with a given schema, currently only contentType supported is 'application/json' CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_model restApiId modelName modelDescription '<schema>' 'content-type' ''' try: schema_json = salt.utils.json.dumps(schema) if isinstance(schema, dict) else schema conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) model = conn.create_model(restApiId=restApiId, name=modelName, description=modelDescription, schema=schema_json, contentType=contentType) return {'created': True, 'model': _convert_datetime_str(model)} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def function[create_api_model, parameter[restApiId, modelName, modelDescription, schema, contentType, region, key, keyid, profile]]: constant[ Create a new model in a given API with a given schema, currently only contentType supported is 'application/json' CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_model restApiId modelName modelDescription '<schema>' 'content-type' ] <ast.Try object at 0x7da1b20b95a0>
keyword[def] identifier[create_api_model] ( identifier[restApiId] , identifier[modelName] , identifier[modelDescription] , identifier[schema] , identifier[contentType] = literal[string] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] keyword[try] : identifier[schema_json] = identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[schema] ) keyword[if] identifier[isinstance] ( identifier[schema] , identifier[dict] ) keyword[else] identifier[schema] identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) identifier[model] = identifier[conn] . identifier[create_model] ( identifier[restApiId] = identifier[restApiId] , identifier[name] = identifier[modelName] , identifier[description] = identifier[modelDescription] , identifier[schema] = identifier[schema_json] , identifier[contentType] = identifier[contentType] ) keyword[return] { literal[string] : keyword[True] , literal[string] : identifier[_convert_datetime_str] ( identifier[model] )} keyword[except] identifier[ClientError] keyword[as] identifier[e] : keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )}
def create_api_model(restApiId, modelName, modelDescription, schema, contentType='application/json', region=None, key=None, keyid=None, profile=None): """ Create a new model in a given API with a given schema, currently only contentType supported is 'application/json' CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_model restApiId modelName modelDescription '<schema>' 'content-type' """ try: schema_json = salt.utils.json.dumps(schema) if isinstance(schema, dict) else schema conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) model = conn.create_model(restApiId=restApiId, name=modelName, description=modelDescription, schema=schema_json, contentType=contentType) return {'created': True, 'model': _convert_datetime_str(model)} # depends on [control=['try'], data=[]] except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']]
def has_transition(self, state): """ Lookup if any transition exists from current model state using current method """ if state in self.transitions: return True if '*' in self.transitions: return True if '+' in self.transitions and self.transitions['+'].target != state: return True return False
def function[has_transition, parameter[self, state]]: constant[ Lookup if any transition exists from current model state using current method ] if compare[name[state] in name[self].transitions] begin[:] return[constant[True]] if compare[constant[*] in name[self].transitions] begin[:] return[constant[True]] if <ast.BoolOp object at 0x7da1b1eefa60> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[has_transition] ( identifier[self] , identifier[state] ): literal[string] keyword[if] identifier[state] keyword[in] identifier[self] . identifier[transitions] : keyword[return] keyword[True] keyword[if] literal[string] keyword[in] identifier[self] . identifier[transitions] : keyword[return] keyword[True] keyword[if] literal[string] keyword[in] identifier[self] . identifier[transitions] keyword[and] identifier[self] . identifier[transitions] [ literal[string] ]. identifier[target] != identifier[state] : keyword[return] keyword[True] keyword[return] keyword[False]
def has_transition(self, state): """ Lookup if any transition exists from current model state using current method """ if state in self.transitions: return True # depends on [control=['if'], data=[]] if '*' in self.transitions: return True # depends on [control=['if'], data=[]] if '+' in self.transitions and self.transitions['+'].target != state: return True # depends on [control=['if'], data=[]] return False
def get(self, key, filepath): """Get configuration parameter. Reads 'key' configuration parameter from the configuration file given in 'filepath'. Configuration parameter in 'key' must follow the schema <section>.<option> . :param key: key to get :param filepath: configuration file """ if not filepath: raise RuntimeError("Configuration file not given") if not self.__check_config_key(key): raise RuntimeError("%s parameter does not exists" % key) if not os.path.isfile(filepath): raise RuntimeError("%s config file does not exist" % filepath) section, option = key.split('.') config = configparser.SafeConfigParser() config.read(filepath) try: option = config.get(section, option) self.display('config.tmpl', key=key, option=option) except (configparser.NoSectionError, configparser.NoOptionError): pass return CMD_SUCCESS
def function[get, parameter[self, key, filepath]]: constant[Get configuration parameter. Reads 'key' configuration parameter from the configuration file given in 'filepath'. Configuration parameter in 'key' must follow the schema <section>.<option> . :param key: key to get :param filepath: configuration file ] if <ast.UnaryOp object at 0x7da1b0efb430> begin[:] <ast.Raise object at 0x7da1b0efa0e0> if <ast.UnaryOp object at 0x7da1b0ef87c0> begin[:] <ast.Raise object at 0x7da1b0efb9d0> if <ast.UnaryOp object at 0x7da1b0efaa10> begin[:] <ast.Raise object at 0x7da1b0efb700> <ast.Tuple object at 0x7da1b0efb040> assign[=] call[name[key].split, parameter[constant[.]]] variable[config] assign[=] call[name[configparser].SafeConfigParser, parameter[]] call[name[config].read, parameter[name[filepath]]] <ast.Try object at 0x7da1b0efadd0> return[name[CMD_SUCCESS]]
keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[filepath] ): literal[string] keyword[if] keyword[not] identifier[filepath] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[__check_config_key] ( identifier[key] ): keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[key] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filepath] ): keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[filepath] ) identifier[section] , identifier[option] = identifier[key] . identifier[split] ( literal[string] ) identifier[config] = identifier[configparser] . identifier[SafeConfigParser] () identifier[config] . identifier[read] ( identifier[filepath] ) keyword[try] : identifier[option] = identifier[config] . identifier[get] ( identifier[section] , identifier[option] ) identifier[self] . identifier[display] ( literal[string] , identifier[key] = identifier[key] , identifier[option] = identifier[option] ) keyword[except] ( identifier[configparser] . identifier[NoSectionError] , identifier[configparser] . identifier[NoOptionError] ): keyword[pass] keyword[return] identifier[CMD_SUCCESS]
def get(self, key, filepath): """Get configuration parameter. Reads 'key' configuration parameter from the configuration file given in 'filepath'. Configuration parameter in 'key' must follow the schema <section>.<option> . :param key: key to get :param filepath: configuration file """ if not filepath: raise RuntimeError('Configuration file not given') # depends on [control=['if'], data=[]] if not self.__check_config_key(key): raise RuntimeError('%s parameter does not exists' % key) # depends on [control=['if'], data=[]] if not os.path.isfile(filepath): raise RuntimeError('%s config file does not exist' % filepath) # depends on [control=['if'], data=[]] (section, option) = key.split('.') config = configparser.SafeConfigParser() config.read(filepath) try: option = config.get(section, option) self.display('config.tmpl', key=key, option=option) # depends on [control=['try'], data=[]] except (configparser.NoSectionError, configparser.NoOptionError): pass # depends on [control=['except'], data=[]] return CMD_SUCCESS
def option_in_select(browser, select_name, option): """ Returns the Element specified by @option or None Looks at the real <select> not the select2 widget, since that doesn't create the DOM until we click on it. """ select = find_field(browser, 'select', select_name) assert select, "Cannot find a '{}' select.".format(select_name) try: return select.find_element_by_xpath(str( './/option[normalize-space(text())=%s]' % string_literal(option))) except NoSuchElementException: return None
def function[option_in_select, parameter[browser, select_name, option]]: constant[ Returns the Element specified by @option or None Looks at the real <select> not the select2 widget, since that doesn't create the DOM until we click on it. ] variable[select] assign[=] call[name[find_field], parameter[name[browser], constant[select], name[select_name]]] assert[name[select]] <ast.Try object at 0x7da18c4cec80>
keyword[def] identifier[option_in_select] ( identifier[browser] , identifier[select_name] , identifier[option] ): literal[string] identifier[select] = identifier[find_field] ( identifier[browser] , literal[string] , identifier[select_name] ) keyword[assert] identifier[select] , literal[string] . identifier[format] ( identifier[select_name] ) keyword[try] : keyword[return] identifier[select] . identifier[find_element_by_xpath] ( identifier[str] ( literal[string] % identifier[string_literal] ( identifier[option] ))) keyword[except] identifier[NoSuchElementException] : keyword[return] keyword[None]
def option_in_select(browser, select_name, option): """ Returns the Element specified by @option or None Looks at the real <select> not the select2 widget, since that doesn't create the DOM until we click on it. """ select = find_field(browser, 'select', select_name) assert select, "Cannot find a '{}' select.".format(select_name) try: return select.find_element_by_xpath(str('.//option[normalize-space(text())=%s]' % string_literal(option))) # depends on [control=['try'], data=[]] except NoSuchElementException: return None # depends on [control=['except'], data=[]]
def _getitem(self, key): """Return specified page of series from cache or file.""" key = int(key) if key < 0: key %= self._len if len(self._pages) == 1 and 0 < key < self._len: index = self._pages[0].index return self.parent.pages._getitem(index + key) return self._pages[key]
def function[_getitem, parameter[self, key]]: constant[Return specified page of series from cache or file.] variable[key] assign[=] call[name[int], parameter[name[key]]] if compare[name[key] less[<] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b1859810> if <ast.BoolOp object at 0x7da1b185a500> begin[:] variable[index] assign[=] call[name[self]._pages][constant[0]].index return[call[name[self].parent.pages._getitem, parameter[binary_operation[name[index] + name[key]]]]] return[call[name[self]._pages][name[key]]]
keyword[def] identifier[_getitem] ( identifier[self] , identifier[key] ): literal[string] identifier[key] = identifier[int] ( identifier[key] ) keyword[if] identifier[key] < literal[int] : identifier[key] %= identifier[self] . identifier[_len] keyword[if] identifier[len] ( identifier[self] . identifier[_pages] )== literal[int] keyword[and] literal[int] < identifier[key] < identifier[self] . identifier[_len] : identifier[index] = identifier[self] . identifier[_pages] [ literal[int] ]. identifier[index] keyword[return] identifier[self] . identifier[parent] . identifier[pages] . identifier[_getitem] ( identifier[index] + identifier[key] ) keyword[return] identifier[self] . identifier[_pages] [ identifier[key] ]
def _getitem(self, key): """Return specified page of series from cache or file.""" key = int(key) if key < 0: key %= self._len # depends on [control=['if'], data=['key']] if len(self._pages) == 1 and 0 < key < self._len: index = self._pages[0].index return self.parent.pages._getitem(index + key) # depends on [control=['if'], data=[]] return self._pages[key]
def add_permission(content_type, permission): """ Adds the passed in permission to that content type. Note that the permission passed in should be a single word, or verb. The proper 'codename' will be generated from that. """ # build our permission slug codename = "%s_%s" % (content_type.model, permission) # sys.stderr.write("Checking %s permission for %s\n" % (permission, content_type.name)) # does it already exist if not Permission.objects.filter(content_type=content_type, codename=codename): Permission.objects.create(content_type=content_type, codename=codename, name="Can %s %s" % (permission, content_type.name))
def function[add_permission, parameter[content_type, permission]]: constant[ Adds the passed in permission to that content type. Note that the permission passed in should be a single word, or verb. The proper 'codename' will be generated from that. ] variable[codename] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0f62080>, <ast.Name object at 0x7da1b0f620e0>]]] if <ast.UnaryOp object at 0x7da1b0f620b0> begin[:] call[name[Permission].objects.create, parameter[]]
keyword[def] identifier[add_permission] ( identifier[content_type] , identifier[permission] ): literal[string] identifier[codename] = literal[string] %( identifier[content_type] . identifier[model] , identifier[permission] ) keyword[if] keyword[not] identifier[Permission] . identifier[objects] . identifier[filter] ( identifier[content_type] = identifier[content_type] , identifier[codename] = identifier[codename] ): identifier[Permission] . identifier[objects] . identifier[create] ( identifier[content_type] = identifier[content_type] , identifier[codename] = identifier[codename] , identifier[name] = literal[string] %( identifier[permission] , identifier[content_type] . identifier[name] ))
def add_permission(content_type, permission): """ Adds the passed in permission to that content type. Note that the permission passed in should be a single word, or verb. The proper 'codename' will be generated from that. """ # build our permission slug codename = '%s_%s' % (content_type.model, permission) # sys.stderr.write("Checking %s permission for %s\n" % (permission, content_type.name)) # does it already exist if not Permission.objects.filter(content_type=content_type, codename=codename): Permission.objects.create(content_type=content_type, codename=codename, name='Can %s %s' % (permission, content_type.name)) # depends on [control=['if'], data=[]]
def pci_lookup_name1( access: (IN, ctypes.POINTER(pci_access)), buf: (IN, ctypes.c_char_p), size: (IN, ctypes.c_int), flags: (IN, ctypes.c_int), arg1: (IN, ctypes.c_int), ) -> ctypes.c_char_p: """ Conversion of PCI ID's to names (according to the pci.ids file). char *pci_lookup_name( struct pci_access *a, char *buf, int size, int flags, ... ) PCI_ABI; This is a variant of pci_lookup_name() that gets called with one argument. It is required because ctypes doesn't support varadic functions. """ pass
def function[pci_lookup_name1, parameter[access, buf, size, flags, arg1]]: constant[ Conversion of PCI ID's to names (according to the pci.ids file). char *pci_lookup_name( struct pci_access *a, char *buf, int size, int flags, ... ) PCI_ABI; This is a variant of pci_lookup_name() that gets called with one argument. It is required because ctypes doesn't support varadic functions. ] pass
keyword[def] identifier[pci_lookup_name1] ( identifier[access] :( identifier[IN] , identifier[ctypes] . identifier[POINTER] ( identifier[pci_access] )), identifier[buf] :( identifier[IN] , identifier[ctypes] . identifier[c_char_p] ), identifier[size] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ), identifier[flags] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ), identifier[arg1] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ), )-> identifier[ctypes] . identifier[c_char_p] : literal[string] keyword[pass]
def pci_lookup_name1(access: (IN, ctypes.POINTER(pci_access)), buf: (IN, ctypes.c_char_p), size: (IN, ctypes.c_int), flags: (IN, ctypes.c_int), arg1: (IN, ctypes.c_int)) -> ctypes.c_char_p: """ Conversion of PCI ID's to names (according to the pci.ids file). char *pci_lookup_name( struct pci_access *a, char *buf, int size, int flags, ... ) PCI_ABI; This is a variant of pci_lookup_name() that gets called with one argument. It is required because ctypes doesn't support varadic functions. """ pass
def LinShuReductionFactor(axiPot,R,sigmar,nonaxiPot=None, k=None,m=None,OmegaP=None): """ NAME: LinShuReductionFactor PURPOSE: Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation INPUT: axiPot - The background, axisymmetric potential R - Cylindrical radius (can be Quantity) sigmar - radial velocity dispersion of the population (can be Quantity) Then either provide: 1) m= m in the perturbation's m x phi (number of arms for a spiral) k= wavenumber (see Binney & Tremaine 2008) OmegaP= pattern speed (can be Quantity) 2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber OUTPUT: reduction factor HISTORY: 2014-08-23 - Written - Bovy (IAS) """ axiPot= flatten(axiPot) from galpy.potential import omegac, epifreq if nonaxiPot is None and (OmegaP is None or k is None or m is None): raise IOError("Need to specify either nonaxiPot= or m=, k=, OmegaP= for LinShuReductionFactor") elif not nonaxiPot is None: OmegaP= nonaxiPot.OmegaP() k= nonaxiPot.wavenumber(R) m= nonaxiPot.m() tepif= epifreq(axiPot,R) s= m*(OmegaP-omegac(axiPot,R))/tepif chi= sigmar**2.*k**2./tepif**2. return (1.-s**2.)/nu.sin(nu.pi*s)\ *integrate.quad(lambda t: nu.exp(-chi*(1.+nu.cos(t)))\ *nu.sin(s*t)*nu.sin(t), 0.,nu.pi)[0]
def function[LinShuReductionFactor, parameter[axiPot, R, sigmar, nonaxiPot, k, m, OmegaP]]: constant[ NAME: LinShuReductionFactor PURPOSE: Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation INPUT: axiPot - The background, axisymmetric potential R - Cylindrical radius (can be Quantity) sigmar - radial velocity dispersion of the population (can be Quantity) Then either provide: 1) m= m in the perturbation's m x phi (number of arms for a spiral) k= wavenumber (see Binney & Tremaine 2008) OmegaP= pattern speed (can be Quantity) 2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber OUTPUT: reduction factor HISTORY: 2014-08-23 - Written - Bovy (IAS) ] variable[axiPot] assign[=] call[name[flatten], parameter[name[axiPot]]] from relative_module[galpy.potential] import module[omegac], module[epifreq] if <ast.BoolOp object at 0x7da1b0c42890> begin[:] <ast.Raise object at 0x7da1b0cb5ab0> variable[tepif] assign[=] call[name[epifreq], parameter[name[axiPot], name[R]]] variable[s] assign[=] binary_operation[binary_operation[name[m] * binary_operation[name[OmegaP] - call[name[omegac], parameter[name[axiPot], name[R]]]]] / name[tepif]] variable[chi] assign[=] binary_operation[binary_operation[binary_operation[name[sigmar] ** constant[2.0]] * binary_operation[name[k] ** constant[2.0]]] / binary_operation[name[tepif] ** constant[2.0]]] return[binary_operation[binary_operation[binary_operation[constant[1.0] - binary_operation[name[s] ** constant[2.0]]] / call[name[nu].sin, parameter[binary_operation[name[nu].pi * name[s]]]]] * call[call[name[integrate].quad, parameter[<ast.Lambda object at 0x7da1b0cb47c0>, constant[0.0], name[nu].pi]]][constant[0]]]]
keyword[def] identifier[LinShuReductionFactor] ( identifier[axiPot] , identifier[R] , identifier[sigmar] , identifier[nonaxiPot] = keyword[None] , identifier[k] = keyword[None] , identifier[m] = keyword[None] , identifier[OmegaP] = keyword[None] ): literal[string] identifier[axiPot] = identifier[flatten] ( identifier[axiPot] ) keyword[from] identifier[galpy] . identifier[potential] keyword[import] identifier[omegac] , identifier[epifreq] keyword[if] identifier[nonaxiPot] keyword[is] keyword[None] keyword[and] ( identifier[OmegaP] keyword[is] keyword[None] keyword[or] identifier[k] keyword[is] keyword[None] keyword[or] identifier[m] keyword[is] keyword[None] ): keyword[raise] identifier[IOError] ( literal[string] ) keyword[elif] keyword[not] identifier[nonaxiPot] keyword[is] keyword[None] : identifier[OmegaP] = identifier[nonaxiPot] . identifier[OmegaP] () identifier[k] = identifier[nonaxiPot] . identifier[wavenumber] ( identifier[R] ) identifier[m] = identifier[nonaxiPot] . identifier[m] () identifier[tepif] = identifier[epifreq] ( identifier[axiPot] , identifier[R] ) identifier[s] = identifier[m] *( identifier[OmegaP] - identifier[omegac] ( identifier[axiPot] , identifier[R] ))/ identifier[tepif] identifier[chi] = identifier[sigmar] ** literal[int] * identifier[k] ** literal[int] / identifier[tepif] ** literal[int] keyword[return] ( literal[int] - identifier[s] ** literal[int] )/ identifier[nu] . identifier[sin] ( identifier[nu] . identifier[pi] * identifier[s] )* identifier[integrate] . identifier[quad] ( keyword[lambda] identifier[t] : identifier[nu] . identifier[exp] (- identifier[chi] *( literal[int] + identifier[nu] . identifier[cos] ( identifier[t] )))* identifier[nu] . identifier[sin] ( identifier[s] * identifier[t] )* identifier[nu] . identifier[sin] ( identifier[t] ), literal[int] , identifier[nu] . identifier[pi] )[ literal[int] ]
def LinShuReductionFactor(axiPot, R, sigmar, nonaxiPot=None, k=None, m=None, OmegaP=None): """ NAME: LinShuReductionFactor PURPOSE: Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation INPUT: axiPot - The background, axisymmetric potential R - Cylindrical radius (can be Quantity) sigmar - radial velocity dispersion of the population (can be Quantity) Then either provide: 1) m= m in the perturbation's m x phi (number of arms for a spiral) k= wavenumber (see Binney & Tremaine 2008) OmegaP= pattern speed (can be Quantity) 2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber OUTPUT: reduction factor HISTORY: 2014-08-23 - Written - Bovy (IAS) """ axiPot = flatten(axiPot) from galpy.potential import omegac, epifreq if nonaxiPot is None and (OmegaP is None or k is None or m is None): raise IOError('Need to specify either nonaxiPot= or m=, k=, OmegaP= for LinShuReductionFactor') # depends on [control=['if'], data=[]] elif not nonaxiPot is None: OmegaP = nonaxiPot.OmegaP() k = nonaxiPot.wavenumber(R) m = nonaxiPot.m() # depends on [control=['if'], data=[]] tepif = epifreq(axiPot, R) s = m * (OmegaP - omegac(axiPot, R)) / tepif chi = sigmar ** 2.0 * k ** 2.0 / tepif ** 2.0 return (1.0 - s ** 2.0) / nu.sin(nu.pi * s) * integrate.quad(lambda t: nu.exp(-chi * (1.0 + nu.cos(t))) * nu.sin(s * t) * nu.sin(t), 0.0, nu.pi)[0]
def dispatch(self, request, **kwargs): ''' Entry point for this class, here we decide basic stuff ''' # Check if this is a webservice request self.json_worker = (bool(getattr(self.request, "authtoken", False))) or (self.json is True) self.__authtoken = (bool(getattr(self.request, "authtoken", False))) # Check if this is an AJAX request if (request.is_ajax() or self.json_worker) and request.body: request.POST = QueryDict('').copy() body = request.body if type(request.body) == bytes: body = body.decode("utf-8") post = json.loads(body) for key in post: if type(post[key]) == dict and '__JSON_DATA__' in post[key]: post[key] = json.dumps(post[key]['__JSON_DATA__']) request.POST.update(post) # Set class internal variables self._setup(request) # Call the base implementation return super(GenModify, self).dispatch(request, **kwargs)
def function[dispatch, parameter[self, request]]: constant[ Entry point for this class, here we decide basic stuff ] name[self].json_worker assign[=] <ast.BoolOp object at 0x7da1b0d24c40> name[self].__authtoken assign[=] call[name[bool], parameter[call[name[getattr], parameter[name[self].request, constant[authtoken], constant[False]]]]] if <ast.BoolOp object at 0x7da20c7c9ed0> begin[:] name[request].POST assign[=] call[call[name[QueryDict], parameter[constant[]]].copy, parameter[]] variable[body] assign[=] name[request].body if compare[call[name[type], parameter[name[request].body]] equal[==] name[bytes]] begin[:] variable[body] assign[=] call[name[body].decode, parameter[constant[utf-8]]] variable[post] assign[=] call[name[json].loads, parameter[name[body]]] for taget[name[key]] in starred[name[post]] begin[:] if <ast.BoolOp object at 0x7da20c7cbee0> begin[:] call[name[post]][name[key]] assign[=] call[name[json].dumps, parameter[call[call[name[post]][name[key]]][constant[__JSON_DATA__]]]] call[name[request].POST.update, parameter[name[post]]] call[name[self]._setup, parameter[name[request]]] return[call[call[name[super], parameter[name[GenModify], name[self]]].dispatch, parameter[name[request]]]]
keyword[def] identifier[dispatch] ( identifier[self] , identifier[request] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[json_worker] =( identifier[bool] ( identifier[getattr] ( identifier[self] . identifier[request] , literal[string] , keyword[False] ))) keyword[or] ( identifier[self] . identifier[json] keyword[is] keyword[True] ) identifier[self] . identifier[__authtoken] =( identifier[bool] ( identifier[getattr] ( identifier[self] . identifier[request] , literal[string] , keyword[False] ))) keyword[if] ( identifier[request] . identifier[is_ajax] () keyword[or] identifier[self] . identifier[json_worker] ) keyword[and] identifier[request] . identifier[body] : identifier[request] . identifier[POST] = identifier[QueryDict] ( literal[string] ). identifier[copy] () identifier[body] = identifier[request] . identifier[body] keyword[if] identifier[type] ( identifier[request] . identifier[body] )== identifier[bytes] : identifier[body] = identifier[body] . identifier[decode] ( literal[string] ) identifier[post] = identifier[json] . identifier[loads] ( identifier[body] ) keyword[for] identifier[key] keyword[in] identifier[post] : keyword[if] identifier[type] ( identifier[post] [ identifier[key] ])== identifier[dict] keyword[and] literal[string] keyword[in] identifier[post] [ identifier[key] ]: identifier[post] [ identifier[key] ]= identifier[json] . identifier[dumps] ( identifier[post] [ identifier[key] ][ literal[string] ]) identifier[request] . identifier[POST] . identifier[update] ( identifier[post] ) identifier[self] . identifier[_setup] ( identifier[request] ) keyword[return] identifier[super] ( identifier[GenModify] , identifier[self] ). identifier[dispatch] ( identifier[request] ,** identifier[kwargs] )
def dispatch(self, request, **kwargs): """ Entry point for this class, here we decide basic stuff """ # Check if this is a webservice request self.json_worker = bool(getattr(self.request, 'authtoken', False)) or self.json is True self.__authtoken = bool(getattr(self.request, 'authtoken', False)) # Check if this is an AJAX request if (request.is_ajax() or self.json_worker) and request.body: request.POST = QueryDict('').copy() body = request.body if type(request.body) == bytes: body = body.decode('utf-8') # depends on [control=['if'], data=[]] post = json.loads(body) for key in post: if type(post[key]) == dict and '__JSON_DATA__' in post[key]: post[key] = json.dumps(post[key]['__JSON_DATA__']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] request.POST.update(post) # depends on [control=['if'], data=[]] # Set class internal variables self._setup(request) # Call the base implementation return super(GenModify, self).dispatch(request, **kwargs)
def optimized_binary_search_lower(tab, logsize): """Binary search in a table using bit operations :param tab: boolean monotone table of size :math:`2^\\textrm{logsize}` with tab[0] = False :param int logsize: :returns: last i such that not tab[i] :complexity: O(logsize) """ lo = 0 intervalsize = (1 << logsize) >> 1 while intervalsize > 0: if not tab[lo | intervalsize]: lo |= intervalsize intervalsize >>= 1 return lo
def function[optimized_binary_search_lower, parameter[tab, logsize]]: constant[Binary search in a table using bit operations :param tab: boolean monotone table of size :math:`2^\textrm{logsize}` with tab[0] = False :param int logsize: :returns: last i such that not tab[i] :complexity: O(logsize) ] variable[lo] assign[=] constant[0] variable[intervalsize] assign[=] binary_operation[binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[logsize]] <ast.RShift object at 0x7da2590d6a40> constant[1]] while compare[name[intervalsize] greater[>] constant[0]] begin[:] if <ast.UnaryOp object at 0x7da18bcc8910> begin[:] <ast.AugAssign object at 0x7da18bccabc0> <ast.AugAssign object at 0x7da18bcc9c30> return[name[lo]]
keyword[def] identifier[optimized_binary_search_lower] ( identifier[tab] , identifier[logsize] ): literal[string] identifier[lo] = literal[int] identifier[intervalsize] =( literal[int] << identifier[logsize] )>> literal[int] keyword[while] identifier[intervalsize] > literal[int] : keyword[if] keyword[not] identifier[tab] [ identifier[lo] | identifier[intervalsize] ]: identifier[lo] |= identifier[intervalsize] identifier[intervalsize] >>= literal[int] keyword[return] identifier[lo]
def optimized_binary_search_lower(tab, logsize): """Binary search in a table using bit operations :param tab: boolean monotone table of size :math:`2^\\textrm{logsize}` with tab[0] = False :param int logsize: :returns: last i such that not tab[i] :complexity: O(logsize) """ lo = 0 intervalsize = 1 << logsize >> 1 while intervalsize > 0: if not tab[lo | intervalsize]: lo |= intervalsize # depends on [control=['if'], data=[]] intervalsize >>= 1 # depends on [control=['while'], data=['intervalsize']] return lo
def binary_shrink(image, iterations=-1): """Shrink an image by repeatedly removing pixels which have partners above, to the left, to the right and below until the image doesn't change image - binary image to be manipulated iterations - # of times to shrink, -1 to shrink until idempotent There are horizontal/vertical thinners which detect a pixel on an edge with an interior pixel either horizontally or vertically attached like this: 0 0 0 X 1 X X 1 X and there are much more specific diagonal thinners which detect a pixel on the edge of a diagonal, like this: 0 0 0 0 1 0 0 0 1 Rotate each of these 4x to get the four directions for each """ global erode_table, binary_shrink_ulr_table, binary_shrink_lrl_table global binary_shrink_urb_table, binary_shrink_llt_table if erode_table is None: # # The erode table hits all patterns that can be eroded without # changing the euler_number erode_table = np.array([pattern_of(index)[1,1] and (scind.label(pattern_of(index-16))[1] != 1) for index in range(512)]) erode_table[index_of(np.ones((3,3), bool))] = True # # Each other table is more specific: a specific corner or a specific # edge must be on where the corner and edge are not adjacent # binary_shrink_ulr_table = ( erode_table | (make_table(False, np.array([[0,0,0], [1,1,0], [0,0,0]], bool), np.array([[0,0,0], [1,1,1], [0,0,0]],bool)) & make_table(False, np.array([[1,0,0], [0,1,0], [0,0,0]],bool), np.array([[1,0,0], [0,1,1], [0,1,1]],bool)))) binary_shrink_urb_table = ( erode_table | (make_table(False, np.array([[0,1,0], [0,1,0], [0,0,0]], bool), np.array([[0,1,0], [0,1,0], [0,1,0]],bool)) & make_table(False, np.array([[0,0,1], [0,1,0], [0,0,0]],bool), np.array([[0,0,1], [1,1,0], [1,1,0]],bool)))) binary_shrink_lrl_table = ( erode_table | (make_table(False, np.array([[0,0,0], [0,1,1], [0,0,0]], bool), np.array([[0,0,0], [1,1,1], [0,0,0]],bool)) & make_table(False, np.array([[0,0,0], [0,1,0], [0,0,1]], bool), np.array([[1,1,0], [1,1,0], [0,0,1]], bool)))) binary_shrink_llt_table = ( erode_table | (make_table(False, np.array([[0,0,0], [0,1,0], [0,1,0]], bool), np.array([[0,1,0], [0,1,0], [0,1,0]],bool)) & make_table(False, np.array([[0,0,0], [0,1,0], [1,0,0]], bool), np.array([[0,1,1], [0,1,1], [1,0,0]], bool)))) orig_image = image index_i, index_j, image = prepare_for_index_lookup(image, False) if iterations == -1: iterations = len(index_i) for i in range(iterations): pixel_count = len(index_i) for table in (binary_shrink_ulr_table, binary_shrink_urb_table, binary_shrink_lrl_table, binary_shrink_llt_table): index_i, index_j = index_lookup(index_i, index_j, image, table, 1) if len(index_i) == pixel_count: break image = extract_from_image_lookup(orig_image, index_i, index_j) return image
def function[binary_shrink, parameter[image, iterations]]: constant[Shrink an image by repeatedly removing pixels which have partners above, to the left, to the right and below until the image doesn't change image - binary image to be manipulated iterations - # of times to shrink, -1 to shrink until idempotent There are horizontal/vertical thinners which detect a pixel on an edge with an interior pixel either horizontally or vertically attached like this: 0 0 0 X 1 X X 1 X and there are much more specific diagonal thinners which detect a pixel on the edge of a diagonal, like this: 0 0 0 0 1 0 0 0 1 Rotate each of these 4x to get the four directions for each ] <ast.Global object at 0x7da204960b50> <ast.Global object at 0x7da204962920> if compare[name[erode_table] is constant[None]] begin[:] variable[erode_table] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da204963e20>]] call[name[erode_table]][call[name[index_of], parameter[call[name[np].ones, parameter[tuple[[<ast.Constant object at 0x7da204960460>, <ast.Constant object at 0x7da2049639a0>]], name[bool]]]]]] assign[=] constant[True] variable[binary_shrink_ulr_table] assign[=] binary_operation[name[erode_table] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da204963700>, <ast.List object at 0x7da204960f10>, <ast.List object at 0x7da204962860>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da204962650>, <ast.List object at 0x7da204960610>, <ast.List object at 0x7da204963340>]], name[bool]]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da2049608e0>, <ast.List object at 0x7da204960070>, <ast.List object at 0x7da204960e80>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da204963fd0>, <ast.List object at 0x7da204960670>, <ast.List object at 0x7da2049624d0>]], name[bool]]]]]]] variable[binary_shrink_urb_table] assign[=] binary_operation[name[erode_table] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da1b26ac820>, <ast.List object at 0x7da1b26acd90>, <ast.List object at 0x7da1b26ac130>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da1b26af8b0>, <ast.List object at 0x7da18dc9ac50>, <ast.List object at 0x7da18dc99030>]], name[bool]]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da18dc9baf0>, <ast.List object at 0x7da18dc9a110>, <ast.List object at 0x7da18dc9b2b0>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da18dc990c0>, <ast.List object at 0x7da20e9b2c20>, <ast.List object at 0x7da20e9b1b10>]], name[bool]]]]]]] variable[binary_shrink_lrl_table] assign[=] binary_operation[name[erode_table] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b2680>, <ast.List object at 0x7da20e9b0910>, <ast.List object at 0x7da20e9b3160>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b0040>, <ast.List object at 0x7da20e9b3880>, <ast.List object at 0x7da20e9b05e0>]], name[bool]]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b2020>, <ast.List object at 0x7da20e9b08e0>, <ast.List object at 0x7da20e9b1de0>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b2800>, <ast.List object at 0x7da20e9b2cb0>, <ast.List object at 0x7da20e9b1f30>]], name[bool]]]]]]] variable[binary_shrink_llt_table] assign[=] binary_operation[name[erode_table] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b2ef0>, <ast.List object at 0x7da20e9b1690>, <ast.List object at 0x7da20e9b35b0>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b3e80>, <ast.List object at 0x7da20e9b1990>, <ast.List object at 0x7da20e9b3910>]], name[bool]]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[make_table], parameter[constant[False], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b26e0>, <ast.List object at 0x7da20e9b0fd0>, <ast.List object at 0x7da20e9b3490>]], name[bool]]], call[name[np].array, parameter[list[[<ast.List object at 0x7da20e9b1570>, <ast.List object at 0x7da20e9b0a30>, <ast.List object at 0x7da20e9b2e60>]], name[bool]]]]]]] variable[orig_image] assign[=] name[image] <ast.Tuple object at 0x7da20e9b1c60> assign[=] call[name[prepare_for_index_lookup], parameter[name[image], constant[False]]] if compare[name[iterations] equal[==] <ast.UnaryOp object at 0x7da20e9b0670>] begin[:] variable[iterations] assign[=] call[name[len], parameter[name[index_i]]] for taget[name[i]] in starred[call[name[range], parameter[name[iterations]]]] begin[:] variable[pixel_count] assign[=] call[name[len], parameter[name[index_i]]] for taget[name[table]] in starred[tuple[[<ast.Name object at 0x7da18f58ef20>, <ast.Name object at 0x7da18f58ce80>, <ast.Name object at 0x7da18f58fa00>, <ast.Name object at 0x7da18f58fe80>]]] begin[:] <ast.Tuple object at 0x7da18f58f910> assign[=] call[name[index_lookup], parameter[name[index_i], name[index_j], name[image], name[table], constant[1]]] if compare[call[name[len], parameter[name[index_i]]] equal[==] name[pixel_count]] begin[:] break variable[image] assign[=] call[name[extract_from_image_lookup], parameter[name[orig_image], name[index_i], name[index_j]]] return[name[image]]
keyword[def] identifier[binary_shrink] ( identifier[image] , identifier[iterations] =- literal[int] ): literal[string] keyword[global] identifier[erode_table] , identifier[binary_shrink_ulr_table] , identifier[binary_shrink_lrl_table] keyword[global] identifier[binary_shrink_urb_table] , identifier[binary_shrink_llt_table] keyword[if] identifier[erode_table] keyword[is] keyword[None] : identifier[erode_table] = identifier[np] . identifier[array] ([ identifier[pattern_of] ( identifier[index] )[ literal[int] , literal[int] ] keyword[and] ( identifier[scind] . identifier[label] ( identifier[pattern_of] ( identifier[index] - literal[int] ))[ literal[int] ]!= literal[int] ) keyword[for] identifier[index] keyword[in] identifier[range] ( literal[int] )]) identifier[erode_table] [ identifier[index_of] ( identifier[np] . identifier[ones] (( literal[int] , literal[int] ), identifier[bool] ))]= keyword[True] identifier[binary_shrink_ulr_table] =( identifier[erode_table] | ( identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ))& identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] )))) identifier[binary_shrink_urb_table] =( identifier[erode_table] | ( identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ))& identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] )))) identifier[binary_shrink_lrl_table] =( identifier[erode_table] | ( identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ))& identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] )))) identifier[binary_shrink_llt_table] =( identifier[erode_table] | ( identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ))& identifier[make_table] ( keyword[False] , identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] ), identifier[np] . identifier[array] ([[ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ], [ literal[int] , literal[int] , literal[int] ]], identifier[bool] )))) identifier[orig_image] = identifier[image] identifier[index_i] , identifier[index_j] , identifier[image] = identifier[prepare_for_index_lookup] ( identifier[image] , keyword[False] ) keyword[if] identifier[iterations] ==- literal[int] : identifier[iterations] = identifier[len] ( identifier[index_i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[iterations] ): identifier[pixel_count] = identifier[len] ( identifier[index_i] ) keyword[for] identifier[table] keyword[in] ( identifier[binary_shrink_ulr_table] , identifier[binary_shrink_urb_table] , identifier[binary_shrink_lrl_table] , identifier[binary_shrink_llt_table] ): identifier[index_i] , identifier[index_j] = identifier[index_lookup] ( identifier[index_i] , identifier[index_j] , identifier[image] , identifier[table] , literal[int] ) keyword[if] identifier[len] ( identifier[index_i] )== identifier[pixel_count] : keyword[break] identifier[image] = identifier[extract_from_image_lookup] ( identifier[orig_image] , identifier[index_i] , identifier[index_j] ) keyword[return] identifier[image]
def binary_shrink(image, iterations=-1): """Shrink an image by repeatedly removing pixels which have partners above, to the left, to the right and below until the image doesn't change image - binary image to be manipulated iterations - # of times to shrink, -1 to shrink until idempotent There are horizontal/vertical thinners which detect a pixel on an edge with an interior pixel either horizontally or vertically attached like this: 0 0 0 X 1 X X 1 X and there are much more specific diagonal thinners which detect a pixel on the edge of a diagonal, like this: 0 0 0 0 1 0 0 0 1 Rotate each of these 4x to get the four directions for each """ global erode_table, binary_shrink_ulr_table, binary_shrink_lrl_table global binary_shrink_urb_table, binary_shrink_llt_table if erode_table is None: # # The erode table hits all patterns that can be eroded without # changing the euler_number erode_table = np.array([pattern_of(index)[1, 1] and scind.label(pattern_of(index - 16))[1] != 1 for index in range(512)]) erode_table[index_of(np.ones((3, 3), bool))] = True # # Each other table is more specific: a specific corner or a specific # edge must be on where the corner and edge are not adjacent # binary_shrink_ulr_table = erode_table | make_table(False, np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], bool), np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], bool)) & make_table(False, np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]], bool), np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]], bool)) binary_shrink_urb_table = erode_table | make_table(False, np.array([[0, 1, 0], [0, 1, 0], [0, 0, 0]], bool), np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], bool)) & make_table(False, np.array([[0, 0, 1], [0, 1, 0], [0, 0, 0]], bool), np.array([[0, 0, 1], [1, 1, 0], [1, 1, 0]], bool)) binary_shrink_lrl_table = erode_table | make_table(False, np.array([[0, 0, 0], [0, 1, 1], [0, 0, 0]], bool), np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], bool)) & make_table(False, np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]], bool), np.array([[1, 1, 0], [1, 1, 0], [0, 0, 1]], bool)) binary_shrink_llt_table = erode_table | make_table(False, np.array([[0, 0, 0], [0, 1, 0], [0, 1, 0]], bool), np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], bool)) & make_table(False, np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]], bool), np.array([[0, 1, 1], [0, 1, 1], [1, 0, 0]], bool)) # depends on [control=['if'], data=['erode_table']] orig_image = image (index_i, index_j, image) = prepare_for_index_lookup(image, False) if iterations == -1: iterations = len(index_i) # depends on [control=['if'], data=['iterations']] for i in range(iterations): pixel_count = len(index_i) for table in (binary_shrink_ulr_table, binary_shrink_urb_table, binary_shrink_lrl_table, binary_shrink_llt_table): (index_i, index_j) = index_lookup(index_i, index_j, image, table, 1) # depends on [control=['for'], data=['table']] if len(index_i) == pixel_count: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] image = extract_from_image_lookup(orig_image, index_i, index_j) return image
def estimate_column_scales( self, X_centered, row_scales): """ column_scale[j] ** 2 = mean{i in observed[:, j]}{ (X[i, j] - row_center[i] - column_center[j]) ** 2 ------------------------------------------------- row_scale[i] ** 2 } """ n_rows, n_cols = X_centered.shape row_scales = np.asarray(row_scales) if len(row_scales) != n_rows: raise ValueError("Expected length %s, got shape %s" % ( n_rows, row_scales.shape,)) column_variances = np.nanmean( X_centered ** 2 / (row_scales ** 2).reshape((n_rows, 1)), axis=0) column_variances[column_variances == 0] = 1.0 assert len(column_variances) == n_cols, "%d != %d" % ( len(column_variances), n_cols) return np.sqrt(column_variances)
def function[estimate_column_scales, parameter[self, X_centered, row_scales]]: constant[ column_scale[j] ** 2 = mean{i in observed[:, j]}{ (X[i, j] - row_center[i] - column_center[j]) ** 2 ------------------------------------------------- row_scale[i] ** 2 } ] <ast.Tuple object at 0x7da1b1a66980> assign[=] name[X_centered].shape variable[row_scales] assign[=] call[name[np].asarray, parameter[name[row_scales]]] if compare[call[name[len], parameter[name[row_scales]]] not_equal[!=] name[n_rows]] begin[:] <ast.Raise object at 0x7da1b1a64220> variable[column_variances] assign[=] call[name[np].nanmean, parameter[binary_operation[binary_operation[name[X_centered] ** constant[2]] / call[binary_operation[name[row_scales] ** constant[2]].reshape, parameter[tuple[[<ast.Name object at 0x7da1b1a66230>, <ast.Constant object at 0x7da1b1a65510>]]]]]]] call[name[column_variances]][compare[name[column_variances] equal[==] constant[0]]] assign[=] constant[1.0] assert[compare[call[name[len], parameter[name[column_variances]]] equal[==] name[n_cols]]] return[call[name[np].sqrt, parameter[name[column_variances]]]]
keyword[def] identifier[estimate_column_scales] ( identifier[self] , identifier[X_centered] , identifier[row_scales] ): literal[string] identifier[n_rows] , identifier[n_cols] = identifier[X_centered] . identifier[shape] identifier[row_scales] = identifier[np] . identifier[asarray] ( identifier[row_scales] ) keyword[if] identifier[len] ( identifier[row_scales] )!= identifier[n_rows] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[n_rows] , identifier[row_scales] . identifier[shape] ,)) identifier[column_variances] = identifier[np] . identifier[nanmean] ( identifier[X_centered] ** literal[int] /( identifier[row_scales] ** literal[int] ). identifier[reshape] (( identifier[n_rows] , literal[int] )), identifier[axis] = literal[int] ) identifier[column_variances] [ identifier[column_variances] == literal[int] ]= literal[int] keyword[assert] identifier[len] ( identifier[column_variances] )== identifier[n_cols] , literal[string] %( identifier[len] ( identifier[column_variances] ), identifier[n_cols] ) keyword[return] identifier[np] . identifier[sqrt] ( identifier[column_variances] )
def estimate_column_scales(self, X_centered, row_scales): """ column_scale[j] ** 2 = mean{i in observed[:, j]}{ (X[i, j] - row_center[i] - column_center[j]) ** 2 ------------------------------------------------- row_scale[i] ** 2 } """ (n_rows, n_cols) = X_centered.shape row_scales = np.asarray(row_scales) if len(row_scales) != n_rows: raise ValueError('Expected length %s, got shape %s' % (n_rows, row_scales.shape)) # depends on [control=['if'], data=['n_rows']] column_variances = np.nanmean(X_centered ** 2 / (row_scales ** 2).reshape((n_rows, 1)), axis=0) column_variances[column_variances == 0] = 1.0 assert len(column_variances) == n_cols, '%d != %d' % (len(column_variances), n_cols) return np.sqrt(column_variances)
def get_docker_tag(platform: str, registry: str) -> str: """:return: docker tag to be used for the container""" platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform) if not registry: registry = "mxnet_local" return "{0}/{1}".format(registry, platform)
def function[get_docker_tag, parameter[platform, registry]]: constant[:return: docker tag to be used for the container] variable[platform] assign[=] <ast.IfExp object at 0x7da1b2064880> if <ast.UnaryOp object at 0x7da1b2066710> begin[:] variable[registry] assign[=] constant[mxnet_local] return[call[constant[{0}/{1}].format, parameter[name[registry], name[platform]]]]
keyword[def] identifier[get_docker_tag] ( identifier[platform] : identifier[str] , identifier[registry] : identifier[str] )-> identifier[str] : literal[string] identifier[platform] = identifier[platform] keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[platform] keyword[for] identifier[x] keyword[in] [ literal[string] , literal[string] ]) keyword[else] literal[string] . identifier[format] ( identifier[platform] ) keyword[if] keyword[not] identifier[registry] : identifier[registry] = literal[string] keyword[return] literal[string] . identifier[format] ( identifier[registry] , identifier[platform] )
def get_docker_tag(platform: str, registry: str) -> str: """:return: docker tag to be used for the container""" platform = platform if any((x in platform for x in ['build.', 'publish.'])) else 'build.{}'.format(platform) if not registry: registry = 'mxnet_local' # depends on [control=['if'], data=[]] return '{0}/{1}'.format(registry, platform)
def _build_shebang(self, executable, post_interp): """ Build a shebang line. In the simple case (on Windows, or a shebang line which is not too long or contains spaces) use a simple formulation for the shebang. Otherwise, use /bin/sh as the executable, with a contrived shebang which allows the script to run either under Python or sh, using suitable quoting. Thanks to Harald Nordgren for his input. See also: http://www.in-ulm.de/~mascheck/various/shebang/#length https://hg.mozilla.org/mozilla-central/file/tip/mach """ if os.name != 'posix': simple_shebang = True else: # Add 3 for '#!' prefix and newline suffix. shebang_length = len(executable) + len(post_interp) + 3 if sys.platform == 'darwin': max_shebang_length = 512 else: max_shebang_length = 127 simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length)) if simple_shebang: result = b'#!' + executable + post_interp + b'\n' else: result = b'#!/bin/sh\n' result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' result += b"' '''" return result
def function[_build_shebang, parameter[self, executable, post_interp]]: constant[ Build a shebang line. In the simple case (on Windows, or a shebang line which is not too long or contains spaces) use a simple formulation for the shebang. Otherwise, use /bin/sh as the executable, with a contrived shebang which allows the script to run either under Python or sh, using suitable quoting. Thanks to Harald Nordgren for his input. See also: http://www.in-ulm.de/~mascheck/various/shebang/#length https://hg.mozilla.org/mozilla-central/file/tip/mach ] if compare[name[os].name not_equal[!=] constant[posix]] begin[:] variable[simple_shebang] assign[=] constant[True] if name[simple_shebang] begin[:] variable[result] assign[=] binary_operation[binary_operation[binary_operation[constant[b'#!'] + name[executable]] + name[post_interp]] + constant[b'\n']] return[name[result]]
keyword[def] identifier[_build_shebang] ( identifier[self] , identifier[executable] , identifier[post_interp] ): literal[string] keyword[if] identifier[os] . identifier[name] != literal[string] : identifier[simple_shebang] = keyword[True] keyword[else] : identifier[shebang_length] = identifier[len] ( identifier[executable] )+ identifier[len] ( identifier[post_interp] )+ literal[int] keyword[if] identifier[sys] . identifier[platform] == literal[string] : identifier[max_shebang_length] = literal[int] keyword[else] : identifier[max_shebang_length] = literal[int] identifier[simple_shebang] =(( literal[string] keyword[not] keyword[in] identifier[executable] ) keyword[and] ( identifier[shebang_length] <= identifier[max_shebang_length] )) keyword[if] identifier[simple_shebang] : identifier[result] = literal[string] + identifier[executable] + identifier[post_interp] + literal[string] keyword[else] : identifier[result] = literal[string] identifier[result] += literal[string] + identifier[executable] + identifier[post_interp] + literal[string] identifier[result] += literal[string] keyword[return] identifier[result]
def _build_shebang(self, executable, post_interp): """ Build a shebang line. In the simple case (on Windows, or a shebang line which is not too long or contains spaces) use a simple formulation for the shebang. Otherwise, use /bin/sh as the executable, with a contrived shebang which allows the script to run either under Python or sh, using suitable quoting. Thanks to Harald Nordgren for his input. See also: http://www.in-ulm.de/~mascheck/various/shebang/#length https://hg.mozilla.org/mozilla-central/file/tip/mach """ if os.name != 'posix': simple_shebang = True # depends on [control=['if'], data=[]] else: # Add 3 for '#!' prefix and newline suffix. shebang_length = len(executable) + len(post_interp) + 3 if sys.platform == 'darwin': max_shebang_length = 512 # depends on [control=['if'], data=[]] else: max_shebang_length = 127 simple_shebang = b' ' not in executable and shebang_length <= max_shebang_length if simple_shebang: result = b'#!' + executable + post_interp + b'\n' # depends on [control=['if'], data=[]] else: result = b'#!/bin/sh\n' result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' result += b"' '''" return result
def sanity_updates_after_move(self, oldpath, newpath): """ Updates the list of sql statements needed after moving nodes. 1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*) 2. update the number of children of parent nodes """ if ( self.node_cls.get_database_vendor('write') == 'mysql' and len(oldpath) != len(newpath) ): # no words can describe how dumb mysql is # we must update the depth of the branch in a different query self.stmts.append( self.get_mysql_update_depth_in_branch(newpath)) oldparentpath = self.node_cls._get_parent_path_from_path(oldpath) newparentpath = self.node_cls._get_parent_path_from_path(newpath) if ( (not oldparentpath and newparentpath) or (oldparentpath and not newparentpath) or (oldparentpath != newparentpath) ): # node changed parent, updating count if oldparentpath: self.stmts.append( self.get_sql_update_numchild(oldparentpath, 'dec')) if newparentpath: self.stmts.append( self.get_sql_update_numchild(newparentpath, 'inc'))
def function[sanity_updates_after_move, parameter[self, oldpath, newpath]]: constant[ Updates the list of sql statements needed after moving nodes. 1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*) 2. update the number of children of parent nodes ] if <ast.BoolOp object at 0x7da2041d8340> begin[:] call[name[self].stmts.append, parameter[call[name[self].get_mysql_update_depth_in_branch, parameter[name[newpath]]]]] variable[oldparentpath] assign[=] call[name[self].node_cls._get_parent_path_from_path, parameter[name[oldpath]]] variable[newparentpath] assign[=] call[name[self].node_cls._get_parent_path_from_path, parameter[name[newpath]]] if <ast.BoolOp object at 0x7da2041db8e0> begin[:] if name[oldparentpath] begin[:] call[name[self].stmts.append, parameter[call[name[self].get_sql_update_numchild, parameter[name[oldparentpath], constant[dec]]]]] if name[newparentpath] begin[:] call[name[self].stmts.append, parameter[call[name[self].get_sql_update_numchild, parameter[name[newparentpath], constant[inc]]]]]
keyword[def] identifier[sanity_updates_after_move] ( identifier[self] , identifier[oldpath] , identifier[newpath] ): literal[string] keyword[if] ( identifier[self] . identifier[node_cls] . identifier[get_database_vendor] ( literal[string] )== literal[string] keyword[and] identifier[len] ( identifier[oldpath] )!= identifier[len] ( identifier[newpath] ) ): identifier[self] . identifier[stmts] . identifier[append] ( identifier[self] . identifier[get_mysql_update_depth_in_branch] ( identifier[newpath] )) identifier[oldparentpath] = identifier[self] . identifier[node_cls] . identifier[_get_parent_path_from_path] ( identifier[oldpath] ) identifier[newparentpath] = identifier[self] . identifier[node_cls] . identifier[_get_parent_path_from_path] ( identifier[newpath] ) keyword[if] ( ( keyword[not] identifier[oldparentpath] keyword[and] identifier[newparentpath] ) keyword[or] ( identifier[oldparentpath] keyword[and] keyword[not] identifier[newparentpath] ) keyword[or] ( identifier[oldparentpath] != identifier[newparentpath] ) ): keyword[if] identifier[oldparentpath] : identifier[self] . identifier[stmts] . identifier[append] ( identifier[self] . identifier[get_sql_update_numchild] ( identifier[oldparentpath] , literal[string] )) keyword[if] identifier[newparentpath] : identifier[self] . identifier[stmts] . identifier[append] ( identifier[self] . identifier[get_sql_update_numchild] ( identifier[newparentpath] , literal[string] ))
def sanity_updates_after_move(self, oldpath, newpath): """ Updates the list of sql statements needed after moving nodes. 1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*) 2. update the number of children of parent nodes """ if self.node_cls.get_database_vendor('write') == 'mysql' and len(oldpath) != len(newpath): # no words can describe how dumb mysql is # we must update the depth of the branch in a different query self.stmts.append(self.get_mysql_update_depth_in_branch(newpath)) # depends on [control=['if'], data=[]] oldparentpath = self.node_cls._get_parent_path_from_path(oldpath) newparentpath = self.node_cls._get_parent_path_from_path(newpath) if not oldparentpath and newparentpath or (oldparentpath and (not newparentpath)) or oldparentpath != newparentpath: # node changed parent, updating count if oldparentpath: self.stmts.append(self.get_sql_update_numchild(oldparentpath, 'dec')) # depends on [control=['if'], data=[]] if newparentpath: self.stmts.append(self.get_sql_update_numchild(newparentpath, 'inc')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def __get_exception_message(self): """ This method extracts the message from an exception if there was an exception that occurred during the test, assuming that the exception was in a try/except block and not thrown. """ exception_info = sys.exc_info()[1] if hasattr(exception_info, 'msg'): exc_message = exception_info.msg elif hasattr(exception_info, 'message'): exc_message = exception_info.message else: exc_message = '(Unknown Exception)' return exc_message
def function[__get_exception_message, parameter[self]]: constant[ This method extracts the message from an exception if there was an exception that occurred during the test, assuming that the exception was in a try/except block and not thrown. ] variable[exception_info] assign[=] call[call[name[sys].exc_info, parameter[]]][constant[1]] if call[name[hasattr], parameter[name[exception_info], constant[msg]]] begin[:] variable[exc_message] assign[=] name[exception_info].msg return[name[exc_message]]
keyword[def] identifier[__get_exception_message] ( identifier[self] ): literal[string] identifier[exception_info] = identifier[sys] . identifier[exc_info] ()[ literal[int] ] keyword[if] identifier[hasattr] ( identifier[exception_info] , literal[string] ): identifier[exc_message] = identifier[exception_info] . identifier[msg] keyword[elif] identifier[hasattr] ( identifier[exception_info] , literal[string] ): identifier[exc_message] = identifier[exception_info] . identifier[message] keyword[else] : identifier[exc_message] = literal[string] keyword[return] identifier[exc_message]
def __get_exception_message(self): """ This method extracts the message from an exception if there was an exception that occurred during the test, assuming that the exception was in a try/except block and not thrown. """ exception_info = sys.exc_info()[1] if hasattr(exception_info, 'msg'): exc_message = exception_info.msg # depends on [control=['if'], data=[]] elif hasattr(exception_info, 'message'): exc_message = exception_info.message # depends on [control=['if'], data=[]] else: exc_message = '(Unknown Exception)' return exc_message
def send(self, use_open_peers=True, queue=True, **kw): """ send a transaction immediately. Failed transactions are picked up by the TxBroadcaster :param ip: specific peer IP to send tx to :param port: port of specific peer :param use_open_peers: use Arky's broadcast method """ if not use_open_peers: ip = kw.get('ip') port = kw.get('port') peer = 'http://{}:{}'.format(ip, port) res = arky.rest.POST.peer.transactions(peer=peer, transactions=[self.tx.tx]) else: res = arky.core.sendPayload(self.tx.tx) if self.tx.success != '0.0%': self.tx.error = None self.tx.success = True else: self.tx.error = res['messages'] self.tx.success = False self.tx.tries += 1 self.tx.res = res if queue: self.tx.send = True self.__save() return res
def function[send, parameter[self, use_open_peers, queue]]: constant[ send a transaction immediately. Failed transactions are picked up by the TxBroadcaster :param ip: specific peer IP to send tx to :param port: port of specific peer :param use_open_peers: use Arky's broadcast method ] if <ast.UnaryOp object at 0x7da2041dbf10> begin[:] variable[ip] assign[=] call[name[kw].get, parameter[constant[ip]]] variable[port] assign[=] call[name[kw].get, parameter[constant[port]]] variable[peer] assign[=] call[constant[http://{}:{}].format, parameter[name[ip], name[port]]] variable[res] assign[=] call[name[arky].rest.POST.peer.transactions, parameter[]] if compare[name[self].tx.success not_equal[!=] constant[0.0%]] begin[:] name[self].tx.error assign[=] constant[None] name[self].tx.success assign[=] constant[True] <ast.AugAssign object at 0x7da2041dae00> name[self].tx.res assign[=] name[res] if name[queue] begin[:] name[self].tx.send assign[=] constant[True] call[name[self].__save, parameter[]] return[name[res]]
keyword[def] identifier[send] ( identifier[self] , identifier[use_open_peers] = keyword[True] , identifier[queue] = keyword[True] ,** identifier[kw] ): literal[string] keyword[if] keyword[not] identifier[use_open_peers] : identifier[ip] = identifier[kw] . identifier[get] ( literal[string] ) identifier[port] = identifier[kw] . identifier[get] ( literal[string] ) identifier[peer] = literal[string] . identifier[format] ( identifier[ip] , identifier[port] ) identifier[res] = identifier[arky] . identifier[rest] . identifier[POST] . identifier[peer] . identifier[transactions] ( identifier[peer] = identifier[peer] , identifier[transactions] =[ identifier[self] . identifier[tx] . identifier[tx] ]) keyword[else] : identifier[res] = identifier[arky] . identifier[core] . identifier[sendPayload] ( identifier[self] . identifier[tx] . identifier[tx] ) keyword[if] identifier[self] . identifier[tx] . identifier[success] != literal[string] : identifier[self] . identifier[tx] . identifier[error] = keyword[None] identifier[self] . identifier[tx] . identifier[success] = keyword[True] keyword[else] : identifier[self] . identifier[tx] . identifier[error] = identifier[res] [ literal[string] ] identifier[self] . identifier[tx] . identifier[success] = keyword[False] identifier[self] . identifier[tx] . identifier[tries] += literal[int] identifier[self] . identifier[tx] . identifier[res] = identifier[res] keyword[if] identifier[queue] : identifier[self] . identifier[tx] . identifier[send] = keyword[True] identifier[self] . identifier[__save] () keyword[return] identifier[res]
def send(self, use_open_peers=True, queue=True, **kw): """ send a transaction immediately. Failed transactions are picked up by the TxBroadcaster :param ip: specific peer IP to send tx to :param port: port of specific peer :param use_open_peers: use Arky's broadcast method """ if not use_open_peers: ip = kw.get('ip') port = kw.get('port') peer = 'http://{}:{}'.format(ip, port) res = arky.rest.POST.peer.transactions(peer=peer, transactions=[self.tx.tx]) # depends on [control=['if'], data=[]] else: res = arky.core.sendPayload(self.tx.tx) if self.tx.success != '0.0%': self.tx.error = None self.tx.success = True # depends on [control=['if'], data=[]] else: self.tx.error = res['messages'] self.tx.success = False self.tx.tries += 1 self.tx.res = res if queue: self.tx.send = True # depends on [control=['if'], data=[]] self.__save() return res
def parse_branchset(self, branchset_node, depth, number, validate): """ Create :class:`BranchSet` object using data in ``branchset_node``. :param branchset_node: ``etree.Element`` object with tag "logicTreeBranchSet". :param depth: The sequential number of branchset's branching level, based on 0. :param number: Index number of this branchset inside branching level, based on 0. :param validate: Whether or not filters defined in branchset and the branchset itself should be validated. :returns: An instance of :class:`BranchSet` with filters applied but with no branches (they're attached in :meth:`parse_branches`). """ uncertainty_type = branchset_node.attrib.get('uncertaintyType') filters = dict((filtername, branchset_node.attrib.get(filtername)) for filtername in self.FILTERS if filtername in branchset_node.attrib) if validate: self.validate_filters(branchset_node, uncertainty_type, filters) filters = self.parse_filters(branchset_node, uncertainty_type, filters) branchset = BranchSet(uncertainty_type, filters) if validate: self.validate_branchset(branchset_node, depth, number, branchset) return branchset
def function[parse_branchset, parameter[self, branchset_node, depth, number, validate]]: constant[ Create :class:`BranchSet` object using data in ``branchset_node``. :param branchset_node: ``etree.Element`` object with tag "logicTreeBranchSet". :param depth: The sequential number of branchset's branching level, based on 0. :param number: Index number of this branchset inside branching level, based on 0. :param validate: Whether or not filters defined in branchset and the branchset itself should be validated. :returns: An instance of :class:`BranchSet` with filters applied but with no branches (they're attached in :meth:`parse_branches`). ] variable[uncertainty_type] assign[=] call[name[branchset_node].attrib.get, parameter[constant[uncertaintyType]]] variable[filters] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20c9907c0>]] if name[validate] begin[:] call[name[self].validate_filters, parameter[name[branchset_node], name[uncertainty_type], name[filters]]] variable[filters] assign[=] call[name[self].parse_filters, parameter[name[branchset_node], name[uncertainty_type], name[filters]]] variable[branchset] assign[=] call[name[BranchSet], parameter[name[uncertainty_type], name[filters]]] if name[validate] begin[:] call[name[self].validate_branchset, parameter[name[branchset_node], name[depth], name[number], name[branchset]]] return[name[branchset]]
keyword[def] identifier[parse_branchset] ( identifier[self] , identifier[branchset_node] , identifier[depth] , identifier[number] , identifier[validate] ): literal[string] identifier[uncertainty_type] = identifier[branchset_node] . identifier[attrib] . identifier[get] ( literal[string] ) identifier[filters] = identifier[dict] (( identifier[filtername] , identifier[branchset_node] . identifier[attrib] . identifier[get] ( identifier[filtername] )) keyword[for] identifier[filtername] keyword[in] identifier[self] . identifier[FILTERS] keyword[if] identifier[filtername] keyword[in] identifier[branchset_node] . identifier[attrib] ) keyword[if] identifier[validate] : identifier[self] . identifier[validate_filters] ( identifier[branchset_node] , identifier[uncertainty_type] , identifier[filters] ) identifier[filters] = identifier[self] . identifier[parse_filters] ( identifier[branchset_node] , identifier[uncertainty_type] , identifier[filters] ) identifier[branchset] = identifier[BranchSet] ( identifier[uncertainty_type] , identifier[filters] ) keyword[if] identifier[validate] : identifier[self] . identifier[validate_branchset] ( identifier[branchset_node] , identifier[depth] , identifier[number] , identifier[branchset] ) keyword[return] identifier[branchset]
def parse_branchset(self, branchset_node, depth, number, validate): """ Create :class:`BranchSet` object using data in ``branchset_node``. :param branchset_node: ``etree.Element`` object with tag "logicTreeBranchSet". :param depth: The sequential number of branchset's branching level, based on 0. :param number: Index number of this branchset inside branching level, based on 0. :param validate: Whether or not filters defined in branchset and the branchset itself should be validated. :returns: An instance of :class:`BranchSet` with filters applied but with no branches (they're attached in :meth:`parse_branches`). """ uncertainty_type = branchset_node.attrib.get('uncertaintyType') filters = dict(((filtername, branchset_node.attrib.get(filtername)) for filtername in self.FILTERS if filtername in branchset_node.attrib)) if validate: self.validate_filters(branchset_node, uncertainty_type, filters) # depends on [control=['if'], data=[]] filters = self.parse_filters(branchset_node, uncertainty_type, filters) branchset = BranchSet(uncertainty_type, filters) if validate: self.validate_branchset(branchset_node, depth, number, branchset) # depends on [control=['if'], data=[]] return branchset
def message(self, tree, spins, subtheta, auxvars): """Determine the energy of the elimination tree. Args: tree (dict): The current elimination tree spins (dict): The current fixed spins subtheta (dict): Theta with spins fixed. auxvars (dict): The auxiliary variables for the given spins. Returns: The formula for the energy of the tree. """ energy_sources = set() for v, children in tree.items(): aux = auxvars[v] assert all(u in spins for u in self._ancestors[v]) # build an iterable over all of the energies contributions # that we can exactly determine given v and our known spins # in these contributions we assume that v is positive def energy_contributions(): yield subtheta.linear[v] for u, bias in subtheta.adj[v].items(): if u in spins: yield SpinTimes(spins[u], bias) plus_energy = Plus(energy_contributions()) minus_energy = SpinTimes(-1, plus_energy) # if the variable has children, we need to recursively determine their energies if children: # set v to be positive spins[v] = 1 plus_energy = Plus(plus_energy, self.message(children, spins, subtheta, auxvars)) spins[v] = -1 minus_energy = Plus(minus_energy, self.message(children, spins, subtheta, auxvars)) del spins[v] # we now need a real-valued smt variable to be our message m = FreshSymbol(REAL) ancestor_aux = {auxvars[u] if spins[u] > 0 else Not(auxvars[u]) for u in self._ancestors[v]} plus_aux = And({aux}.union(ancestor_aux)) minus_aux = And({Not(aux)}.union(ancestor_aux)) self.assertions.update({LE(m, plus_energy), LE(m, minus_energy), Implies(plus_aux, GE(m, plus_energy)), Implies(minus_aux, GE(m, minus_energy)) }) energy_sources.add(m) return Plus(energy_sources)
def function[message, parameter[self, tree, spins, subtheta, auxvars]]: constant[Determine the energy of the elimination tree. Args: tree (dict): The current elimination tree spins (dict): The current fixed spins subtheta (dict): Theta with spins fixed. auxvars (dict): The auxiliary variables for the given spins. Returns: The formula for the energy of the tree. ] variable[energy_sources] assign[=] call[name[set], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0336cb0>, <ast.Name object at 0x7da1b03378e0>]]] in starred[call[name[tree].items, parameter[]]] begin[:] variable[aux] assign[=] call[name[auxvars]][name[v]] assert[call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b0334a00>]]] def function[energy_contributions, parameter[]]: <ast.Yield object at 0x7da1b0336b30> for taget[tuple[[<ast.Name object at 0x7da1b0335060>, <ast.Name object at 0x7da1b0334190>]]] in starred[call[call[name[subtheta].adj][name[v]].items, parameter[]]] begin[:] if compare[name[u] in name[spins]] begin[:] <ast.Yield object at 0x7da1b03357e0> variable[plus_energy] assign[=] call[name[Plus], parameter[call[name[energy_contributions], parameter[]]]] variable[minus_energy] assign[=] call[name[SpinTimes], parameter[<ast.UnaryOp object at 0x7da1b05f8520>, name[plus_energy]]] if name[children] begin[:] call[name[spins]][name[v]] assign[=] constant[1] variable[plus_energy] assign[=] call[name[Plus], parameter[name[plus_energy], call[name[self].message, parameter[name[children], name[spins], name[subtheta], name[auxvars]]]]] call[name[spins]][name[v]] assign[=] <ast.UnaryOp object at 0x7da1b05fada0> variable[minus_energy] assign[=] call[name[Plus], parameter[name[minus_energy], call[name[self].message, parameter[name[children], name[spins], name[subtheta], name[auxvars]]]]] <ast.Delete object at 0x7da1b05f9240> variable[m] assign[=] call[name[FreshSymbol], parameter[name[REAL]]] variable[ancestor_aux] assign[=] <ast.SetComp object at 0x7da1b05faa70> variable[plus_aux] assign[=] call[name[And], parameter[call[<ast.Set object at 0x7da1b05fa1a0>.union, parameter[name[ancestor_aux]]]]] variable[minus_aux] assign[=] call[name[And], parameter[call[<ast.Set object at 0x7da1b05fabc0>.union, parameter[name[ancestor_aux]]]]] call[name[self].assertions.update, parameter[<ast.Set object at 0x7da1b05f8ac0>]] call[name[energy_sources].add, parameter[name[m]]] return[call[name[Plus], parameter[name[energy_sources]]]]
keyword[def] identifier[message] ( identifier[self] , identifier[tree] , identifier[spins] , identifier[subtheta] , identifier[auxvars] ): literal[string] identifier[energy_sources] = identifier[set] () keyword[for] identifier[v] , identifier[children] keyword[in] identifier[tree] . identifier[items] (): identifier[aux] = identifier[auxvars] [ identifier[v] ] keyword[assert] identifier[all] ( identifier[u] keyword[in] identifier[spins] keyword[for] identifier[u] keyword[in] identifier[self] . identifier[_ancestors] [ identifier[v] ]) keyword[def] identifier[energy_contributions] (): keyword[yield] identifier[subtheta] . identifier[linear] [ identifier[v] ] keyword[for] identifier[u] , identifier[bias] keyword[in] identifier[subtheta] . identifier[adj] [ identifier[v] ]. identifier[items] (): keyword[if] identifier[u] keyword[in] identifier[spins] : keyword[yield] identifier[SpinTimes] ( identifier[spins] [ identifier[u] ], identifier[bias] ) identifier[plus_energy] = identifier[Plus] ( identifier[energy_contributions] ()) identifier[minus_energy] = identifier[SpinTimes] (- literal[int] , identifier[plus_energy] ) keyword[if] identifier[children] : identifier[spins] [ identifier[v] ]= literal[int] identifier[plus_energy] = identifier[Plus] ( identifier[plus_energy] , identifier[self] . identifier[message] ( identifier[children] , identifier[spins] , identifier[subtheta] , identifier[auxvars] )) identifier[spins] [ identifier[v] ]=- literal[int] identifier[minus_energy] = identifier[Plus] ( identifier[minus_energy] , identifier[self] . identifier[message] ( identifier[children] , identifier[spins] , identifier[subtheta] , identifier[auxvars] )) keyword[del] identifier[spins] [ identifier[v] ] identifier[m] = identifier[FreshSymbol] ( identifier[REAL] ) identifier[ancestor_aux] ={ identifier[auxvars] [ identifier[u] ] keyword[if] identifier[spins] [ identifier[u] ]> literal[int] keyword[else] identifier[Not] ( identifier[auxvars] [ identifier[u] ]) keyword[for] identifier[u] keyword[in] identifier[self] . identifier[_ancestors] [ identifier[v] ]} identifier[plus_aux] = identifier[And] ({ identifier[aux] }. identifier[union] ( identifier[ancestor_aux] )) identifier[minus_aux] = identifier[And] ({ identifier[Not] ( identifier[aux] )}. identifier[union] ( identifier[ancestor_aux] )) identifier[self] . identifier[assertions] . identifier[update] ({ identifier[LE] ( identifier[m] , identifier[plus_energy] ), identifier[LE] ( identifier[m] , identifier[minus_energy] ), identifier[Implies] ( identifier[plus_aux] , identifier[GE] ( identifier[m] , identifier[plus_energy] )), identifier[Implies] ( identifier[minus_aux] , identifier[GE] ( identifier[m] , identifier[minus_energy] )) }) identifier[energy_sources] . identifier[add] ( identifier[m] ) keyword[return] identifier[Plus] ( identifier[energy_sources] )
def message(self, tree, spins, subtheta, auxvars): """Determine the energy of the elimination tree. Args: tree (dict): The current elimination tree spins (dict): The current fixed spins subtheta (dict): Theta with spins fixed. auxvars (dict): The auxiliary variables for the given spins. Returns: The formula for the energy of the tree. """ energy_sources = set() for (v, children) in tree.items(): aux = auxvars[v] assert all((u in spins for u in self._ancestors[v])) # build an iterable over all of the energies contributions # that we can exactly determine given v and our known spins # in these contributions we assume that v is positive def energy_contributions(): yield subtheta.linear[v] for (u, bias) in subtheta.adj[v].items(): if u in spins: yield SpinTimes(spins[u], bias) # depends on [control=['if'], data=['u', 'spins']] # depends on [control=['for'], data=[]] plus_energy = Plus(energy_contributions()) minus_energy = SpinTimes(-1, plus_energy) # if the variable has children, we need to recursively determine their energies if children: # set v to be positive spins[v] = 1 plus_energy = Plus(plus_energy, self.message(children, spins, subtheta, auxvars)) spins[v] = -1 minus_energy = Plus(minus_energy, self.message(children, spins, subtheta, auxvars)) del spins[v] # depends on [control=['if'], data=[]] # we now need a real-valued smt variable to be our message m = FreshSymbol(REAL) ancestor_aux = {auxvars[u] if spins[u] > 0 else Not(auxvars[u]) for u in self._ancestors[v]} plus_aux = And({aux}.union(ancestor_aux)) minus_aux = And({Not(aux)}.union(ancestor_aux)) self.assertions.update({LE(m, plus_energy), LE(m, minus_energy), Implies(plus_aux, GE(m, plus_energy)), Implies(minus_aux, GE(m, minus_energy))}) energy_sources.add(m) # depends on [control=['for'], data=[]] return Plus(energy_sources)
def _init(): """Try loading each binding in turn Please note: the entire Qt module is replaced with this code: sys.modules["Qt"] = binding() This means no functions or variables can be called after this has executed. """ preferred = os.getenv("QT_PREFERRED_BINDING") verbose = os.getenv("QT_VERBOSE") is not None if preferred: # Debug mode, used in installer if preferred == "None": sys.modules[__name__].__wrapper_version__ = __version__ return available = { "PySide2": _pyside2, "PySide": _pyside, "PyQt5": _pyqt5, "PyQt4": _pyqt4 } if preferred not in available: raise ImportError("Preferred Qt binding \"%s\" " "not available" % preferred) binding = available[preferred] sys.modules[__name__] = binding() return else: for binding in (_pyside2, _pyqt5, _pyside, _pyqt4): if verbose: sys.stdout.write("Trying %s" % binding.__name__[1:]) try: sys.modules[__name__] = binding() return except ImportError as e: if verbose: sys.stdout.write(" - ImportError(\"%s\")\n" % e) continue # If not binding were found, throw this error raise ImportError("No Qt binding were found.")
def function[_init, parameter[]]: constant[Try loading each binding in turn Please note: the entire Qt module is replaced with this code: sys.modules["Qt"] = binding() This means no functions or variables can be called after this has executed. ] variable[preferred] assign[=] call[name[os].getenv, parameter[constant[QT_PREFERRED_BINDING]]] variable[verbose] assign[=] compare[call[name[os].getenv, parameter[constant[QT_VERBOSE]]] is_not constant[None]] if name[preferred] begin[:] if compare[name[preferred] equal[==] constant[None]] begin[:] call[name[sys].modules][name[__name__]].__wrapper_version__ assign[=] name[__version__] return[None] variable[available] assign[=] dictionary[[<ast.Constant object at 0x7da1b257de10>, <ast.Constant object at 0x7da1b257eda0>, <ast.Constant object at 0x7da1b257f670>, <ast.Constant object at 0x7da1b257d9c0>], [<ast.Name object at 0x7da1b257d540>, <ast.Name object at 0x7da1b257d840>, <ast.Name object at 0x7da1b257ca60>, <ast.Name object at 0x7da1b257c310>]] if compare[name[preferred] <ast.NotIn object at 0x7da2590d7190> name[available]] begin[:] <ast.Raise object at 0x7da1b257e260> variable[binding] assign[=] call[name[available]][name[preferred]] call[name[sys].modules][name[__name__]] assign[=] call[name[binding], parameter[]] return[None] <ast.Raise object at 0x7da1b257c280>
keyword[def] identifier[_init] (): literal[string] identifier[preferred] = identifier[os] . identifier[getenv] ( literal[string] ) identifier[verbose] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[if] identifier[preferred] : keyword[if] identifier[preferred] == literal[string] : identifier[sys] . identifier[modules] [ identifier[__name__] ]. identifier[__wrapper_version__] = identifier[__version__] keyword[return] identifier[available] ={ literal[string] : identifier[_pyside2] , literal[string] : identifier[_pyside] , literal[string] : identifier[_pyqt5] , literal[string] : identifier[_pyqt4] } keyword[if] identifier[preferred] keyword[not] keyword[in] identifier[available] : keyword[raise] identifier[ImportError] ( literal[string] literal[string] % identifier[preferred] ) identifier[binding] = identifier[available] [ identifier[preferred] ] identifier[sys] . identifier[modules] [ identifier[__name__] ]= identifier[binding] () keyword[return] keyword[else] : keyword[for] identifier[binding] keyword[in] ( identifier[_pyside2] , identifier[_pyqt5] , identifier[_pyside] , identifier[_pyqt4] ): keyword[if] identifier[verbose] : identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] % identifier[binding] . identifier[__name__] [ literal[int] :]) keyword[try] : identifier[sys] . identifier[modules] [ identifier[__name__] ]= identifier[binding] () keyword[return] keyword[except] identifier[ImportError] keyword[as] identifier[e] : keyword[if] identifier[verbose] : identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] % identifier[e] ) keyword[continue] keyword[raise] identifier[ImportError] ( literal[string] )
def _init(): """Try loading each binding in turn Please note: the entire Qt module is replaced with this code: sys.modules["Qt"] = binding() This means no functions or variables can be called after this has executed. """ preferred = os.getenv('QT_PREFERRED_BINDING') verbose = os.getenv('QT_VERBOSE') is not None if preferred: # Debug mode, used in installer if preferred == 'None': sys.modules[__name__].__wrapper_version__ = __version__ return # depends on [control=['if'], data=[]] available = {'PySide2': _pyside2, 'PySide': _pyside, 'PyQt5': _pyqt5, 'PyQt4': _pyqt4} if preferred not in available: raise ImportError('Preferred Qt binding "%s" not available' % preferred) # depends on [control=['if'], data=['preferred']] binding = available[preferred] sys.modules[__name__] = binding() return # depends on [control=['if'], data=[]] else: for binding in (_pyside2, _pyqt5, _pyside, _pyqt4): if verbose: sys.stdout.write('Trying %s' % binding.__name__[1:]) # depends on [control=['if'], data=[]] try: sys.modules[__name__] = binding() return # depends on [control=['try'], data=[]] except ImportError as e: if verbose: sys.stdout.write(' - ImportError("%s")\n' % e) # depends on [control=['if'], data=[]] continue # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['binding']] # If not binding were found, throw this error raise ImportError('No Qt binding were found.')
def generatePlugins(widgetPath = None, buildPath = None): """ Generates all the plugin files for the system and imports them. :param widgetPath | <str> || None buildPath | <str> || None """ if widgetPath is None: widgetPath = WIDGET_PATH if buildPath is None: buildPath = BUILD_PATH for basepath in widgetPath.split(os.path.pathsep): if not basepath: continue # load packaged widgets for filepath in glob.glob(os.path.join(basepath, '*/__init__.py')): generatePlugins(os.path.dirname(filepath), buildPath) # load module widgets for filepath in glob.glob(os.path.join(basepath, '*.py')): generatePlugin(filepath, buildPath)
def function[generatePlugins, parameter[widgetPath, buildPath]]: constant[ Generates all the plugin files for the system and imports them. :param widgetPath | <str> || None buildPath | <str> || None ] if compare[name[widgetPath] is constant[None]] begin[:] variable[widgetPath] assign[=] name[WIDGET_PATH] if compare[name[buildPath] is constant[None]] begin[:] variable[buildPath] assign[=] name[BUILD_PATH] for taget[name[basepath]] in starred[call[name[widgetPath].split, parameter[name[os].path.pathsep]]] begin[:] if <ast.UnaryOp object at 0x7da204623e50> begin[:] continue for taget[name[filepath]] in starred[call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[basepath], constant[*/__init__.py]]]]]] begin[:] call[name[generatePlugins], parameter[call[name[os].path.dirname, parameter[name[filepath]]], name[buildPath]]] for taget[name[filepath]] in starred[call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[basepath], constant[*.py]]]]]] begin[:] call[name[generatePlugin], parameter[name[filepath], name[buildPath]]]
keyword[def] identifier[generatePlugins] ( identifier[widgetPath] = keyword[None] , identifier[buildPath] = keyword[None] ): literal[string] keyword[if] identifier[widgetPath] keyword[is] keyword[None] : identifier[widgetPath] = identifier[WIDGET_PATH] keyword[if] identifier[buildPath] keyword[is] keyword[None] : identifier[buildPath] = identifier[BUILD_PATH] keyword[for] identifier[basepath] keyword[in] identifier[widgetPath] . identifier[split] ( identifier[os] . identifier[path] . identifier[pathsep] ): keyword[if] keyword[not] identifier[basepath] : keyword[continue] keyword[for] identifier[filepath] keyword[in] identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[basepath] , literal[string] )): identifier[generatePlugins] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[filepath] ), identifier[buildPath] ) keyword[for] identifier[filepath] keyword[in] identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[basepath] , literal[string] )): identifier[generatePlugin] ( identifier[filepath] , identifier[buildPath] )
def generatePlugins(widgetPath=None, buildPath=None): """ Generates all the plugin files for the system and imports them. :param widgetPath | <str> || None buildPath | <str> || None """ if widgetPath is None: widgetPath = WIDGET_PATH # depends on [control=['if'], data=['widgetPath']] if buildPath is None: buildPath = BUILD_PATH # depends on [control=['if'], data=['buildPath']] for basepath in widgetPath.split(os.path.pathsep): if not basepath: continue # depends on [control=['if'], data=[]] # load packaged widgets for filepath in glob.glob(os.path.join(basepath, '*/__init__.py')): generatePlugins(os.path.dirname(filepath), buildPath) # depends on [control=['for'], data=['filepath']] # load module widgets for filepath in glob.glob(os.path.join(basepath, '*.py')): generatePlugin(filepath, buildPath) # depends on [control=['for'], data=['filepath']] # depends on [control=['for'], data=['basepath']]
def _format_select(formatter, name): """Modify the query selector by applying any formatters to it. Parameters ---------- formatter : str Hyphen-delimited formatter string where formatters are applied inside-out, e.g. the formatter string SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)). name: str The name of the selector to apply formatters to. Returns ------- str The formatted selector """ for caster in formatter.split('-'): if caster == 'SEC_TO_MICRO': name = "%s*1000000" % name elif ':' in caster: caster, args = caster.split(':') name = "%s(%s,%s)" % (caster, name, args) else: name = "%s(%s)" % (caster, name) return name
def function[_format_select, parameter[formatter, name]]: constant[Modify the query selector by applying any formatters to it. Parameters ---------- formatter : str Hyphen-delimited formatter string where formatters are applied inside-out, e.g. the formatter string SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)). name: str The name of the selector to apply formatters to. Returns ------- str The formatted selector ] for taget[name[caster]] in starred[call[name[formatter].split, parameter[constant[-]]]] begin[:] if compare[name[caster] equal[==] constant[SEC_TO_MICRO]] begin[:] variable[name] assign[=] binary_operation[constant[%s*1000000] <ast.Mod object at 0x7da2590d6920> name[name]] return[name[name]]
keyword[def] identifier[_format_select] ( identifier[formatter] , identifier[name] ): literal[string] keyword[for] identifier[caster] keyword[in] identifier[formatter] . identifier[split] ( literal[string] ): keyword[if] identifier[caster] == literal[string] : identifier[name] = literal[string] % identifier[name] keyword[elif] literal[string] keyword[in] identifier[caster] : identifier[caster] , identifier[args] = identifier[caster] . identifier[split] ( literal[string] ) identifier[name] = literal[string] %( identifier[caster] , identifier[name] , identifier[args] ) keyword[else] : identifier[name] = literal[string] %( identifier[caster] , identifier[name] ) keyword[return] identifier[name]
def _format_select(formatter, name): """Modify the query selector by applying any formatters to it. Parameters ---------- formatter : str Hyphen-delimited formatter string where formatters are applied inside-out, e.g. the formatter string SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)). name: str The name of the selector to apply formatters to. Returns ------- str The formatted selector """ for caster in formatter.split('-'): if caster == 'SEC_TO_MICRO': name = '%s*1000000' % name # depends on [control=['if'], data=[]] elif ':' in caster: (caster, args) = caster.split(':') name = '%s(%s,%s)' % (caster, name, args) # depends on [control=['if'], data=['caster']] else: name = '%s(%s)' % (caster, name) # depends on [control=['for'], data=['caster']] return name
def p_path_sum(self, p): """ path_sum : ctx_path | path_sum PLUS ctx_path""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[3]]
def function[p_path_sum, parameter[self, p]]: constant[ path_sum : ctx_path | path_sum PLUS ctx_path] if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:] call[name[p]][constant[0]] assign[=] list[[<ast.Subscript object at 0x7da1b0285150>]]
keyword[def] identifier[p_path_sum] ( identifier[self] , identifier[p] ): literal[string] keyword[if] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]=[ identifier[p] [ literal[int] ]] keyword[else] : identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]+[ identifier[p] [ literal[int] ]]
def p_path_sum(self, p): """ path_sum : ctx_path | path_sum PLUS ctx_path""" if len(p) == 2: p[0] = [p[1]] # depends on [control=['if'], data=[]] else: p[0] = p[1] + [p[3]]
def _update_from_file(self, filename): """ Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file. """ if os.path.exists(filename): try: with open(filename, 'r') as config_file: yaml_dict = yaml.safe_load(config_file.read()) if yaml_dict is not None: self._update_dict(self._config, yaml_dict) except IsADirectoryError: raise ConfigLoadError( 'The specified configuration file is a directory not a file') else: raise ConfigLoadError('The config file {} does not exist'.format(filename))
def function[_update_from_file, parameter[self, filename]]: constant[ Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file. ] if call[name[os].path.exists, parameter[name[filename]]] begin[:] <ast.Try object at 0x7da1b106d090>
keyword[def] identifier[_update_from_file] ( identifier[self] , identifier[filename] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ): keyword[try] : keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[config_file] : identifier[yaml_dict] = identifier[yaml] . identifier[safe_load] ( identifier[config_file] . identifier[read] ()) keyword[if] identifier[yaml_dict] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_update_dict] ( identifier[self] . identifier[_config] , identifier[yaml_dict] ) keyword[except] identifier[IsADirectoryError] : keyword[raise] identifier[ConfigLoadError] ( literal[string] ) keyword[else] : keyword[raise] identifier[ConfigLoadError] ( literal[string] . identifier[format] ( identifier[filename] ))
def _update_from_file(self, filename): """ Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file. """ if os.path.exists(filename): try: with open(filename, 'r') as config_file: yaml_dict = yaml.safe_load(config_file.read()) if yaml_dict is not None: self._update_dict(self._config, yaml_dict) # depends on [control=['if'], data=['yaml_dict']] # depends on [control=['with'], data=['config_file']] # depends on [control=['try'], data=[]] except IsADirectoryError: raise ConfigLoadError('The specified configuration file is a directory not a file') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: raise ConfigLoadError('The config file {} does not exist'.format(filename))
def package_regex_filter(config, message, pattern=None, *args, **kw): """ All packages matching a regular expression Use this rule to include messages that relate to packages that match particular regular expressions (*i.e., (maven|javapackages-tools|maven-surefire)*). """ pattern = kw.get('pattern', pattern) if pattern: packages = fmn.rules.utils.msg2packages(message, **config) regex = fmn.rules.utils.compile_regex(pattern.encode('utf-8')) return any([regex.search(p.encode('utf-8')) for p in packages])
def function[package_regex_filter, parameter[config, message, pattern]]: constant[ All packages matching a regular expression Use this rule to include messages that relate to packages that match particular regular expressions (*i.e., (maven|javapackages-tools|maven-surefire)*). ] variable[pattern] assign[=] call[name[kw].get, parameter[constant[pattern], name[pattern]]] if name[pattern] begin[:] variable[packages] assign[=] call[name[fmn].rules.utils.msg2packages, parameter[name[message]]] variable[regex] assign[=] call[name[fmn].rules.utils.compile_regex, parameter[call[name[pattern].encode, parameter[constant[utf-8]]]]] return[call[name[any], parameter[<ast.ListComp object at 0x7da20c76f490>]]]
keyword[def] identifier[package_regex_filter] ( identifier[config] , identifier[message] , identifier[pattern] = keyword[None] ,* identifier[args] ,** identifier[kw] ): literal[string] identifier[pattern] = identifier[kw] . identifier[get] ( literal[string] , identifier[pattern] ) keyword[if] identifier[pattern] : identifier[packages] = identifier[fmn] . identifier[rules] . identifier[utils] . identifier[msg2packages] ( identifier[message] ,** identifier[config] ) identifier[regex] = identifier[fmn] . identifier[rules] . identifier[utils] . identifier[compile_regex] ( identifier[pattern] . identifier[encode] ( literal[string] )) keyword[return] identifier[any] ([ identifier[regex] . identifier[search] ( identifier[p] . identifier[encode] ( literal[string] )) keyword[for] identifier[p] keyword[in] identifier[packages] ])
def package_regex_filter(config, message, pattern=None, *args, **kw): """ All packages matching a regular expression Use this rule to include messages that relate to packages that match particular regular expressions (*i.e., (maven|javapackages-tools|maven-surefire)*). """ pattern = kw.get('pattern', pattern) if pattern: packages = fmn.rules.utils.msg2packages(message, **config) regex = fmn.rules.utils.compile_regex(pattern.encode('utf-8')) return any([regex.search(p.encode('utf-8')) for p in packages]) # depends on [control=['if'], data=[]]
def tdecode(pkt, *args): """Run tshark to decode and display the packet. If no args defined uses -V""" if not args: args = [ "-V" ] fname = get_temp_file() wrpcap(fname,[pkt]) subprocess.call(["tshark", "-r", fname] + list(args))
def function[tdecode, parameter[pkt]]: constant[Run tshark to decode and display the packet. If no args defined uses -V] if <ast.UnaryOp object at 0x7da1b12abeb0> begin[:] variable[args] assign[=] list[[<ast.Constant object at 0x7da1b12aa1a0>]] variable[fname] assign[=] call[name[get_temp_file], parameter[]] call[name[wrpcap], parameter[name[fname], list[[<ast.Name object at 0x7da1b12a9600>]]]] call[name[subprocess].call, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b12aaa40>, <ast.Constant object at 0x7da1b12a9ff0>, <ast.Name object at 0x7da1b1295990>]] + call[name[list], parameter[name[args]]]]]]
keyword[def] identifier[tdecode] ( identifier[pkt] ,* identifier[args] ): literal[string] keyword[if] keyword[not] identifier[args] : identifier[args] =[ literal[string] ] identifier[fname] = identifier[get_temp_file] () identifier[wrpcap] ( identifier[fname] ,[ identifier[pkt] ]) identifier[subprocess] . identifier[call] ([ literal[string] , literal[string] , identifier[fname] ]+ identifier[list] ( identifier[args] ))
def tdecode(pkt, *args): """Run tshark to decode and display the packet. If no args defined uses -V""" if not args: args = ['-V'] # depends on [control=['if'], data=[]] fname = get_temp_file() wrpcap(fname, [pkt]) subprocess.call(['tshark', '-r', fname] + list(args))
def wrapper(vertices_resources, vertices_applications, nets, net_keys, machine, constraints=[], reserve_monitor=True, align_sdram=True, place=default_place, place_kwargs={}, allocate=default_allocate, allocate_kwargs={}, route=default_route, route_kwargs={}, core_resource=Cores, sdram_resource=SDRAM): """Wrapper for core place-and-route tasks for the common case. At a high level this function essentially takes a set of vertices and nets and produces placements, memory allocations, routing tables and application loading information. .. warning:: This function is deprecated. New users should use :py:func:`.place_and_route_wrapper` along with :py:meth:`rig.machine_control.MachineController.get_system_info` in place of this function. The new wrapper automatically reserves cores and SDRAM already in use in the target machine, improving on the behaviour of this wrapper which blindly reserves certain ranges of resources presuming only core 0 (the monitor processor) is not idle. Parameters ---------- vertices_resources : {vertex: {resource: quantity, ...}, ...} A dictionary from vertex to the required resources for that vertex. This dictionary must include an entry for every vertex in the application. Resource requirements are specified by a dictionary `{resource: quantity, ...}` where `resource` is some resource identifier and `quantity` is a non-negative integer representing the quantity of that resource required. vertices_applications : {vertex: application, ...} A dictionary from vertices to the application binary to load onto cores associated with that vertex. Applications are given as a string containing the file name of the binary to load. nets : [:py:class:`~rig.netlist.Net`, ...] A list (in no particular order) defining the nets connecting vertices. net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...} A dictionary from nets to (key, mask) tuples to be used in SpiNNaker routing tables for routes implementing this net. The key and mask should be given as 32-bit integers. machine : :py:class:`rig.place_and_route.Machine` A data structure which defines the resources available in the target SpiNNaker machine. constraints : [constraint, ...] A list of constraints on placement, allocation and routing. Available constraints are provided in the :py:mod:`rig.place_and_route.constraints` module. reserve_monitor : bool (Default: True) **Optional.** If True, reserve core zero since it will be used as the monitor processor using a :py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`. align_sdram : bool (Default: True) **Optional.** If True, SDRAM allocations will be aligned to 4-byte addresses. Specifically, the supplied constraints will be augmented with an `AlignResourceConstraint(sdram_resource, 4)`. place : function (Default: :py:func:`rig.place_and_route.place`) **Optional.** Placement algorithm to use. place_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the placer. allocate : function (Default: :py:func:`rig.place_and_route.allocate`) **Optional.** Allocation algorithm to use. allocate_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the allocator. route : function (Default: :py:func:`rig.place_and_route.route`) **Optional.** Routing algorithm to use. route_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the router. core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`) **Optional.** The resource identifier used for cores. sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`) **Optional.** The resource identifier used for SDRAM. Returns ------- placements : {vertex: (x, y), ...} A dictionary from vertices to the chip coordinate produced by placement. allocations : {vertex: {resource: slice, ...}, ...} A dictionary from vertices to the resources allocated to it. Resource allocations are dictionaries from resources to a :py:class:`slice` defining the range of the given resource type allocated to the vertex. These :py:class:`slice` objects have `start` <= `end` and `step` set to None. application_map : {application: {(x, y): set([core_num, ...]), ...}, ...} A dictionary from application to the set of cores it should be loaded onto. The set of cores is given as a dictionary from chip to sets of core numbers. routing_tables : {(x, y): \ [:py:class:`~rig.routing_table.RoutingTableEntry`, \ ...], ...} The generated routing tables. Provided as a dictionary from chip to a list of routing table entries. """ warnings.warn("rig.place_and_route.wrapper is deprecated " "use rig.place_and_route.place_and_route_wrapper instead in " "new applications.", DeprecationWarning) constraints = constraints[:] # Augment constraints with (historically) commonly used constraints if reserve_monitor: constraints.append( ReserveResourceConstraint(core_resource, slice(0, 1))) if align_sdram: constraints.append(AlignResourceConstraint(sdram_resource, 4)) # Place/Allocate/Route placements = place(vertices_resources, nets, machine, constraints, **place_kwargs) allocations = allocate(vertices_resources, nets, machine, constraints, placements, **allocate_kwargs) routes = route(vertices_resources, nets, machine, constraints, placements, allocations, core_resource, **route_kwargs) # Build data-structures ready to feed to the machine loading functions application_map = build_application_map(vertices_applications, placements, allocations, core_resource) # Build data-structures ready to feed to the machine loading functions from rig.place_and_route.utils import build_routing_tables routing_tables = build_routing_tables(routes, net_keys) return placements, allocations, application_map, routing_tables
def function[wrapper, parameter[vertices_resources, vertices_applications, nets, net_keys, machine, constraints, reserve_monitor, align_sdram, place, place_kwargs, allocate, allocate_kwargs, route, route_kwargs, core_resource, sdram_resource]]: constant[Wrapper for core place-and-route tasks for the common case. At a high level this function essentially takes a set of vertices and nets and produces placements, memory allocations, routing tables and application loading information. .. warning:: This function is deprecated. New users should use :py:func:`.place_and_route_wrapper` along with :py:meth:`rig.machine_control.MachineController.get_system_info` in place of this function. The new wrapper automatically reserves cores and SDRAM already in use in the target machine, improving on the behaviour of this wrapper which blindly reserves certain ranges of resources presuming only core 0 (the monitor processor) is not idle. Parameters ---------- vertices_resources : {vertex: {resource: quantity, ...}, ...} A dictionary from vertex to the required resources for that vertex. This dictionary must include an entry for every vertex in the application. Resource requirements are specified by a dictionary `{resource: quantity, ...}` where `resource` is some resource identifier and `quantity` is a non-negative integer representing the quantity of that resource required. vertices_applications : {vertex: application, ...} A dictionary from vertices to the application binary to load onto cores associated with that vertex. Applications are given as a string containing the file name of the binary to load. nets : [:py:class:`~rig.netlist.Net`, ...] A list (in no particular order) defining the nets connecting vertices. net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...} A dictionary from nets to (key, mask) tuples to be used in SpiNNaker routing tables for routes implementing this net. The key and mask should be given as 32-bit integers. machine : :py:class:`rig.place_and_route.Machine` A data structure which defines the resources available in the target SpiNNaker machine. constraints : [constraint, ...] A list of constraints on placement, allocation and routing. Available constraints are provided in the :py:mod:`rig.place_and_route.constraints` module. reserve_monitor : bool (Default: True) **Optional.** If True, reserve core zero since it will be used as the monitor processor using a :py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`. align_sdram : bool (Default: True) **Optional.** If True, SDRAM allocations will be aligned to 4-byte addresses. Specifically, the supplied constraints will be augmented with an `AlignResourceConstraint(sdram_resource, 4)`. place : function (Default: :py:func:`rig.place_and_route.place`) **Optional.** Placement algorithm to use. place_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the placer. allocate : function (Default: :py:func:`rig.place_and_route.allocate`) **Optional.** Allocation algorithm to use. allocate_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the allocator. route : function (Default: :py:func:`rig.place_and_route.route`) **Optional.** Routing algorithm to use. route_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the router. core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`) **Optional.** The resource identifier used for cores. sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`) **Optional.** The resource identifier used for SDRAM. Returns ------- placements : {vertex: (x, y), ...} A dictionary from vertices to the chip coordinate produced by placement. allocations : {vertex: {resource: slice, ...}, ...} A dictionary from vertices to the resources allocated to it. Resource allocations are dictionaries from resources to a :py:class:`slice` defining the range of the given resource type allocated to the vertex. These :py:class:`slice` objects have `start` <= `end` and `step` set to None. application_map : {application: {(x, y): set([core_num, ...]), ...}, ...} A dictionary from application to the set of cores it should be loaded onto. The set of cores is given as a dictionary from chip to sets of core numbers. routing_tables : {(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...} The generated routing tables. Provided as a dictionary from chip to a list of routing table entries. ] call[name[warnings].warn, parameter[constant[rig.place_and_route.wrapper is deprecated use rig.place_and_route.place_and_route_wrapper instead in new applications.], name[DeprecationWarning]]] variable[constraints] assign[=] call[name[constraints]][<ast.Slice object at 0x7da1b19cbc10>] if name[reserve_monitor] begin[:] call[name[constraints].append, parameter[call[name[ReserveResourceConstraint], parameter[name[core_resource], call[name[slice], parameter[constant[0], constant[1]]]]]]] if name[align_sdram] begin[:] call[name[constraints].append, parameter[call[name[AlignResourceConstraint], parameter[name[sdram_resource], constant[4]]]]] variable[placements] assign[=] call[name[place], parameter[name[vertices_resources], name[nets], name[machine], name[constraints]]] variable[allocations] assign[=] call[name[allocate], parameter[name[vertices_resources], name[nets], name[machine], name[constraints], name[placements]]] variable[routes] assign[=] call[name[route], parameter[name[vertices_resources], name[nets], name[machine], name[constraints], name[placements], name[allocations], name[core_resource]]] variable[application_map] assign[=] call[name[build_application_map], parameter[name[vertices_applications], name[placements], name[allocations], name[core_resource]]] from relative_module[rig.place_and_route.utils] import module[build_routing_tables] variable[routing_tables] assign[=] call[name[build_routing_tables], parameter[name[routes], name[net_keys]]] return[tuple[[<ast.Name object at 0x7da1b195eb90>, <ast.Name object at 0x7da1b195ce20>, <ast.Name object at 0x7da1b195dde0>, <ast.Name object at 0x7da1b195eb60>]]]
keyword[def] identifier[wrapper] ( identifier[vertices_resources] , identifier[vertices_applications] , identifier[nets] , identifier[net_keys] , identifier[machine] , identifier[constraints] =[], identifier[reserve_monitor] = keyword[True] , identifier[align_sdram] = keyword[True] , identifier[place] = identifier[default_place] , identifier[place_kwargs] ={}, identifier[allocate] = identifier[default_allocate] , identifier[allocate_kwargs] ={}, identifier[route] = identifier[default_route] , identifier[route_kwargs] ={}, identifier[core_resource] = identifier[Cores] , identifier[sdram_resource] = identifier[SDRAM] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] literal[string] , identifier[DeprecationWarning] ) identifier[constraints] = identifier[constraints] [:] keyword[if] identifier[reserve_monitor] : identifier[constraints] . identifier[append] ( identifier[ReserveResourceConstraint] ( identifier[core_resource] , identifier[slice] ( literal[int] , literal[int] ))) keyword[if] identifier[align_sdram] : identifier[constraints] . identifier[append] ( identifier[AlignResourceConstraint] ( identifier[sdram_resource] , literal[int] )) identifier[placements] = identifier[place] ( identifier[vertices_resources] , identifier[nets] , identifier[machine] , identifier[constraints] , ** identifier[place_kwargs] ) identifier[allocations] = identifier[allocate] ( identifier[vertices_resources] , identifier[nets] , identifier[machine] , identifier[constraints] , identifier[placements] ,** identifier[allocate_kwargs] ) identifier[routes] = identifier[route] ( identifier[vertices_resources] , identifier[nets] , identifier[machine] , identifier[constraints] , identifier[placements] , identifier[allocations] , identifier[core_resource] ,** identifier[route_kwargs] ) identifier[application_map] = identifier[build_application_map] ( identifier[vertices_applications] , identifier[placements] , identifier[allocations] , identifier[core_resource] ) keyword[from] identifier[rig] . identifier[place_and_route] . identifier[utils] keyword[import] identifier[build_routing_tables] identifier[routing_tables] = identifier[build_routing_tables] ( identifier[routes] , identifier[net_keys] ) keyword[return] identifier[placements] , identifier[allocations] , identifier[application_map] , identifier[routing_tables]
def wrapper(vertices_resources, vertices_applications, nets, net_keys, machine, constraints=[], reserve_monitor=True, align_sdram=True, place=default_place, place_kwargs={}, allocate=default_allocate, allocate_kwargs={}, route=default_route, route_kwargs={}, core_resource=Cores, sdram_resource=SDRAM): """Wrapper for core place-and-route tasks for the common case. At a high level this function essentially takes a set of vertices and nets and produces placements, memory allocations, routing tables and application loading information. .. warning:: This function is deprecated. New users should use :py:func:`.place_and_route_wrapper` along with :py:meth:`rig.machine_control.MachineController.get_system_info` in place of this function. The new wrapper automatically reserves cores and SDRAM already in use in the target machine, improving on the behaviour of this wrapper which blindly reserves certain ranges of resources presuming only core 0 (the monitor processor) is not idle. Parameters ---------- vertices_resources : {vertex: {resource: quantity, ...}, ...} A dictionary from vertex to the required resources for that vertex. This dictionary must include an entry for every vertex in the application. Resource requirements are specified by a dictionary `{resource: quantity, ...}` where `resource` is some resource identifier and `quantity` is a non-negative integer representing the quantity of that resource required. vertices_applications : {vertex: application, ...} A dictionary from vertices to the application binary to load onto cores associated with that vertex. Applications are given as a string containing the file name of the binary to load. nets : [:py:class:`~rig.netlist.Net`, ...] A list (in no particular order) defining the nets connecting vertices. net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...} A dictionary from nets to (key, mask) tuples to be used in SpiNNaker routing tables for routes implementing this net. The key and mask should be given as 32-bit integers. machine : :py:class:`rig.place_and_route.Machine` A data structure which defines the resources available in the target SpiNNaker machine. constraints : [constraint, ...] A list of constraints on placement, allocation and routing. Available constraints are provided in the :py:mod:`rig.place_and_route.constraints` module. reserve_monitor : bool (Default: True) **Optional.** If True, reserve core zero since it will be used as the monitor processor using a :py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`. align_sdram : bool (Default: True) **Optional.** If True, SDRAM allocations will be aligned to 4-byte addresses. Specifically, the supplied constraints will be augmented with an `AlignResourceConstraint(sdram_resource, 4)`. place : function (Default: :py:func:`rig.place_and_route.place`) **Optional.** Placement algorithm to use. place_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the placer. allocate : function (Default: :py:func:`rig.place_and_route.allocate`) **Optional.** Allocation algorithm to use. allocate_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the allocator. route : function (Default: :py:func:`rig.place_and_route.route`) **Optional.** Routing algorithm to use. route_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the router. core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`) **Optional.** The resource identifier used for cores. sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`) **Optional.** The resource identifier used for SDRAM. Returns ------- placements : {vertex: (x, y), ...} A dictionary from vertices to the chip coordinate produced by placement. allocations : {vertex: {resource: slice, ...}, ...} A dictionary from vertices to the resources allocated to it. Resource allocations are dictionaries from resources to a :py:class:`slice` defining the range of the given resource type allocated to the vertex. These :py:class:`slice` objects have `start` <= `end` and `step` set to None. application_map : {application: {(x, y): set([core_num, ...]), ...}, ...} A dictionary from application to the set of cores it should be loaded onto. The set of cores is given as a dictionary from chip to sets of core numbers. routing_tables : {(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...} The generated routing tables. Provided as a dictionary from chip to a list of routing table entries. """ warnings.warn('rig.place_and_route.wrapper is deprecated use rig.place_and_route.place_and_route_wrapper instead in new applications.', DeprecationWarning) constraints = constraints[:] # Augment constraints with (historically) commonly used constraints if reserve_monitor: constraints.append(ReserveResourceConstraint(core_resource, slice(0, 1))) # depends on [control=['if'], data=[]] if align_sdram: constraints.append(AlignResourceConstraint(sdram_resource, 4)) # depends on [control=['if'], data=[]] # Place/Allocate/Route placements = place(vertices_resources, nets, machine, constraints, **place_kwargs) allocations = allocate(vertices_resources, nets, machine, constraints, placements, **allocate_kwargs) routes = route(vertices_resources, nets, machine, constraints, placements, allocations, core_resource, **route_kwargs) # Build data-structures ready to feed to the machine loading functions application_map = build_application_map(vertices_applications, placements, allocations, core_resource) # Build data-structures ready to feed to the machine loading functions from rig.place_and_route.utils import build_routing_tables routing_tables = build_routing_tables(routes, net_keys) return (placements, allocations, application_map, routing_tables)
def clone(self, ignore=()): """ Clone this dag using a set of substitutions. Traverse the dag in topological order. """ nodes = [ clone(node, self.substitutions, ignore) for node in toposorted(self.nodes, self.edges) ] return DAG(nodes=nodes, substitutions=self.substitutions).build_edges()
def function[clone, parameter[self, ignore]]: constant[ Clone this dag using a set of substitutions. Traverse the dag in topological order. ] variable[nodes] assign[=] <ast.ListComp object at 0x7da1b0c3ef20> return[call[call[name[DAG], parameter[]].build_edges, parameter[]]]
keyword[def] identifier[clone] ( identifier[self] , identifier[ignore] =()): literal[string] identifier[nodes] =[ identifier[clone] ( identifier[node] , identifier[self] . identifier[substitutions] , identifier[ignore] ) keyword[for] identifier[node] keyword[in] identifier[toposorted] ( identifier[self] . identifier[nodes] , identifier[self] . identifier[edges] ) ] keyword[return] identifier[DAG] ( identifier[nodes] = identifier[nodes] , identifier[substitutions] = identifier[self] . identifier[substitutions] ). identifier[build_edges] ()
def clone(self, ignore=()): """ Clone this dag using a set of substitutions. Traverse the dag in topological order. """ nodes = [clone(node, self.substitutions, ignore) for node in toposorted(self.nodes, self.edges)] return DAG(nodes=nodes, substitutions=self.substitutions).build_edges()
def exit_enable_mode(self, exit_command=""): """Exit enable mode. :param exit_command: Command that exits the session from privileged mode :type exit_command: str """ output = "" if self.check_enable_mode(): self.write_channel(self.normalize_cmd(exit_command)) output += self.read_until_prompt() if self.check_enable_mode(): raise ValueError("Failed to exit enable mode.") return output
def function[exit_enable_mode, parameter[self, exit_command]]: constant[Exit enable mode. :param exit_command: Command that exits the session from privileged mode :type exit_command: str ] variable[output] assign[=] constant[] if call[name[self].check_enable_mode, parameter[]] begin[:] call[name[self].write_channel, parameter[call[name[self].normalize_cmd, parameter[name[exit_command]]]]] <ast.AugAssign object at 0x7da1b1f0bb20> if call[name[self].check_enable_mode, parameter[]] begin[:] <ast.Raise object at 0x7da1b1f0b820> return[name[output]]
keyword[def] identifier[exit_enable_mode] ( identifier[self] , identifier[exit_command] = literal[string] ): literal[string] identifier[output] = literal[string] keyword[if] identifier[self] . identifier[check_enable_mode] (): identifier[self] . identifier[write_channel] ( identifier[self] . identifier[normalize_cmd] ( identifier[exit_command] )) identifier[output] += identifier[self] . identifier[read_until_prompt] () keyword[if] identifier[self] . identifier[check_enable_mode] (): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[output]
def exit_enable_mode(self, exit_command=''): """Exit enable mode. :param exit_command: Command that exits the session from privileged mode :type exit_command: str """ output = '' if self.check_enable_mode(): self.write_channel(self.normalize_cmd(exit_command)) output += self.read_until_prompt() if self.check_enable_mode(): raise ValueError('Failed to exit enable mode.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return output
def _extract_features(self): """ Extracts and sets the feature data from the log file necessary for a reduction """ for parsed_line in self.parsed_lines: # If it's ssh, we can handle it if parsed_line.get('program') == 'sshd': result = self._parse_auth_message(parsed_line['message']) # Add the ip if we have it if 'ip' in result: self.features['ips'].append(result['ip']) # If we haven't seen the ip, add it if result['ip'] not in self.ips_to_pids: # Make the value a list of pids self.ips_to_pids[result['ip']] = [parsed_line['processid']] else: # If we have seen the ip before, add the pid if it's a new one if parsed_line['processid'] not in self.ips_to_pids[result['ip']]: self.ips_to_pids[result['ip']].append(parsed_line['processid'])
def function[_extract_features, parameter[self]]: constant[ Extracts and sets the feature data from the log file necessary for a reduction ] for taget[name[parsed_line]] in starred[name[self].parsed_lines] begin[:] if compare[call[name[parsed_line].get, parameter[constant[program]]] equal[==] constant[sshd]] begin[:] variable[result] assign[=] call[name[self]._parse_auth_message, parameter[call[name[parsed_line]][constant[message]]]] if compare[constant[ip] in name[result]] begin[:] call[call[name[self].features][constant[ips]].append, parameter[call[name[result]][constant[ip]]]] if compare[call[name[result]][constant[ip]] <ast.NotIn object at 0x7da2590d7190> name[self].ips_to_pids] begin[:] call[name[self].ips_to_pids][call[name[result]][constant[ip]]] assign[=] list[[<ast.Subscript object at 0x7da18eb566e0>]]
keyword[def] identifier[_extract_features] ( identifier[self] ): literal[string] keyword[for] identifier[parsed_line] keyword[in] identifier[self] . identifier[parsed_lines] : keyword[if] identifier[parsed_line] . identifier[get] ( literal[string] )== literal[string] : identifier[result] = identifier[self] . identifier[_parse_auth_message] ( identifier[parsed_line] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[result] : identifier[self] . identifier[features] [ literal[string] ]. identifier[append] ( identifier[result] [ literal[string] ]) keyword[if] identifier[result] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[ips_to_pids] : identifier[self] . identifier[ips_to_pids] [ identifier[result] [ literal[string] ]]=[ identifier[parsed_line] [ literal[string] ]] keyword[else] : keyword[if] identifier[parsed_line] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[ips_to_pids] [ identifier[result] [ literal[string] ]]: identifier[self] . identifier[ips_to_pids] [ identifier[result] [ literal[string] ]]. identifier[append] ( identifier[parsed_line] [ literal[string] ])
def _extract_features(self): """ Extracts and sets the feature data from the log file necessary for a reduction """ for parsed_line in self.parsed_lines: # If it's ssh, we can handle it if parsed_line.get('program') == 'sshd': result = self._parse_auth_message(parsed_line['message']) # Add the ip if we have it if 'ip' in result: self.features['ips'].append(result['ip']) # If we haven't seen the ip, add it if result['ip'] not in self.ips_to_pids: # Make the value a list of pids self.ips_to_pids[result['ip']] = [parsed_line['processid']] # depends on [control=['if'], data=[]] # If we have seen the ip before, add the pid if it's a new one elif parsed_line['processid'] not in self.ips_to_pids[result['ip']]: self.ips_to_pids[result['ip']].append(parsed_line['processid']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['result']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['parsed_line']]
def _do_batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx): """Batched write commands entry point.""" if ctx.sock_info.compression_context: return _batched_write_command_compressed( namespace, operation, command, docs, check_keys, opts, ctx) return _batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx)
def function[_do_batched_write_command, parameter[namespace, operation, command, docs, check_keys, opts, ctx]]: constant[Batched write commands entry point.] if name[ctx].sock_info.compression_context begin[:] return[call[name[_batched_write_command_compressed], parameter[name[namespace], name[operation], name[command], name[docs], name[check_keys], name[opts], name[ctx]]]] return[call[name[_batched_write_command], parameter[name[namespace], name[operation], name[command], name[docs], name[check_keys], name[opts], name[ctx]]]]
keyword[def] identifier[_do_batched_write_command] ( identifier[namespace] , identifier[operation] , identifier[command] , identifier[docs] , identifier[check_keys] , identifier[opts] , identifier[ctx] ): literal[string] keyword[if] identifier[ctx] . identifier[sock_info] . identifier[compression_context] : keyword[return] identifier[_batched_write_command_compressed] ( identifier[namespace] , identifier[operation] , identifier[command] , identifier[docs] , identifier[check_keys] , identifier[opts] , identifier[ctx] ) keyword[return] identifier[_batched_write_command] ( identifier[namespace] , identifier[operation] , identifier[command] , identifier[docs] , identifier[check_keys] , identifier[opts] , identifier[ctx] )
def _do_batched_write_command(namespace, operation, command, docs, check_keys, opts, ctx): """Batched write commands entry point.""" if ctx.sock_info.compression_context: return _batched_write_command_compressed(namespace, operation, command, docs, check_keys, opts, ctx) # depends on [control=['if'], data=[]] return _batched_write_command(namespace, operation, command, docs, check_keys, opts, ctx)
def setbit(self, name, offset, value): """ Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. """ value = value and 1 or 0 return self.execute_command('SETBIT', name, offset, value)
def function[setbit, parameter[self, name, offset, value]]: constant[ Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. ] variable[value] assign[=] <ast.BoolOp object at 0x7da18dc9b820> return[call[name[self].execute_command, parameter[constant[SETBIT], name[name], name[offset], name[value]]]]
keyword[def] identifier[setbit] ( identifier[self] , identifier[name] , identifier[offset] , identifier[value] ): literal[string] identifier[value] = identifier[value] keyword[and] literal[int] keyword[or] literal[int] keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[offset] , identifier[value] )
def setbit(self, name, offset, value): """ Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. """ value = value and 1 or 0 return self.execute_command('SETBIT', name, offset, value)
def _z(self, x): """Standardize input `x` to a unit normal.""" with tf.name_scope("standardize"): return (x - self.loc) / self.scale
def function[_z, parameter[self, x]]: constant[Standardize input `x` to a unit normal.] with call[name[tf].name_scope, parameter[constant[standardize]]] begin[:] return[binary_operation[binary_operation[name[x] - name[self].loc] / name[self].scale]]
keyword[def] identifier[_z] ( identifier[self] , identifier[x] ): literal[string] keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ): keyword[return] ( identifier[x] - identifier[self] . identifier[loc] )/ identifier[self] . identifier[scale]
def _z(self, x): """Standardize input `x` to a unit normal.""" with tf.name_scope('standardize'): return (x - self.loc) / self.scale # depends on [control=['with'], data=[]]
def default(self, obj, **kwargs): """Handles the adapting of special types from mongo""" if isinstance(obj, datetime.datetime): return time.mktime(obj.timetuple()) if isinstance(obj, Timestamp): return obj.time if isinstance(obj, ObjectId): return obj.__str__() return JSONEncoder.default(self, obj)
def function[default, parameter[self, obj]]: constant[Handles the adapting of special types from mongo] if call[name[isinstance], parameter[name[obj], name[datetime].datetime]] begin[:] return[call[name[time].mktime, parameter[call[name[obj].timetuple, parameter[]]]]] if call[name[isinstance], parameter[name[obj], name[Timestamp]]] begin[:] return[name[obj].time] if call[name[isinstance], parameter[name[obj], name[ObjectId]]] begin[:] return[call[name[obj].__str__, parameter[]]] return[call[name[JSONEncoder].default, parameter[name[self], name[obj]]]]
keyword[def] identifier[default] ( identifier[self] , identifier[obj] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[datetime] . identifier[datetime] ): keyword[return] identifier[time] . identifier[mktime] ( identifier[obj] . identifier[timetuple] ()) keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Timestamp] ): keyword[return] identifier[obj] . identifier[time] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[ObjectId] ): keyword[return] identifier[obj] . identifier[__str__] () keyword[return] identifier[JSONEncoder] . identifier[default] ( identifier[self] , identifier[obj] )
def default(self, obj, **kwargs): """Handles the adapting of special types from mongo""" if isinstance(obj, datetime.datetime): return time.mktime(obj.timetuple()) # depends on [control=['if'], data=[]] if isinstance(obj, Timestamp): return obj.time # depends on [control=['if'], data=[]] if isinstance(obj, ObjectId): return obj.__str__() # depends on [control=['if'], data=[]] return JSONEncoder.default(self, obj)
def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None): """This is used by the interact() method. """ while self.isalive(): r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], []) if self.child_fd in r: data = self.__interact_read(self.child_fd) if output_filter: data = output_filter(data) if self.logfile is not None: self.logfile.write (data) self.logfile.flush() os.write(self.STDOUT_FILENO, data) if self.STDIN_FILENO in r: data = self.__interact_read(self.STDIN_FILENO) if input_filter: data = input_filter(data) i = data.rfind(escape_character) if i != -1: data = data[:i] self.__interact_writen(self.child_fd, data) break self.__interact_writen(self.child_fd, data)
def function[__interact_copy, parameter[self, escape_character, input_filter, output_filter]]: constant[This is used by the interact() method. ] while call[name[self].isalive, parameter[]] begin[:] <ast.Tuple object at 0x7da1b021c160> assign[=] call[name[self].__select, parameter[list[[<ast.Attribute object at 0x7da1b021d570>, <ast.Attribute object at 0x7da1b021db40>]], list[[]], list[[]]]] if compare[name[self].child_fd in name[r]] begin[:] variable[data] assign[=] call[name[self].__interact_read, parameter[name[self].child_fd]] if name[output_filter] begin[:] variable[data] assign[=] call[name[output_filter], parameter[name[data]]] if compare[name[self].logfile is_not constant[None]] begin[:] call[name[self].logfile.write, parameter[name[data]]] call[name[self].logfile.flush, parameter[]] call[name[os].write, parameter[name[self].STDOUT_FILENO, name[data]]] if compare[name[self].STDIN_FILENO in name[r]] begin[:] variable[data] assign[=] call[name[self].__interact_read, parameter[name[self].STDIN_FILENO]] if name[input_filter] begin[:] variable[data] assign[=] call[name[input_filter], parameter[name[data]]] variable[i] assign[=] call[name[data].rfind, parameter[name[escape_character]]] if compare[name[i] not_equal[!=] <ast.UnaryOp object at 0x7da1b26ad5a0>] begin[:] variable[data] assign[=] call[name[data]][<ast.Slice object at 0x7da1b26af910>] call[name[self].__interact_writen, parameter[name[self].child_fd, name[data]]] break call[name[self].__interact_writen, parameter[name[self].child_fd, name[data]]]
keyword[def] identifier[__interact_copy] ( identifier[self] , identifier[escape_character] = keyword[None] , identifier[input_filter] = keyword[None] , identifier[output_filter] = keyword[None] ): literal[string] keyword[while] identifier[self] . identifier[isalive] (): identifier[r] , identifier[w] , identifier[e] = identifier[self] . identifier[__select] ([ identifier[self] . identifier[child_fd] , identifier[self] . identifier[STDIN_FILENO] ],[],[]) keyword[if] identifier[self] . identifier[child_fd] keyword[in] identifier[r] : identifier[data] = identifier[self] . identifier[__interact_read] ( identifier[self] . identifier[child_fd] ) keyword[if] identifier[output_filter] : identifier[data] = identifier[output_filter] ( identifier[data] ) keyword[if] identifier[self] . identifier[logfile] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[logfile] . identifier[write] ( identifier[data] ) identifier[self] . identifier[logfile] . identifier[flush] () identifier[os] . identifier[write] ( identifier[self] . identifier[STDOUT_FILENO] , identifier[data] ) keyword[if] identifier[self] . identifier[STDIN_FILENO] keyword[in] identifier[r] : identifier[data] = identifier[self] . identifier[__interact_read] ( identifier[self] . identifier[STDIN_FILENO] ) keyword[if] identifier[input_filter] : identifier[data] = identifier[input_filter] ( identifier[data] ) identifier[i] = identifier[data] . identifier[rfind] ( identifier[escape_character] ) keyword[if] identifier[i] !=- literal[int] : identifier[data] = identifier[data] [: identifier[i] ] identifier[self] . identifier[__interact_writen] ( identifier[self] . identifier[child_fd] , identifier[data] ) keyword[break] identifier[self] . identifier[__interact_writen] ( identifier[self] . identifier[child_fd] , identifier[data] )
def __interact_copy(self, escape_character=None, input_filter=None, output_filter=None): """This is used by the interact() method. """ while self.isalive(): (r, w, e) = self.__select([self.child_fd, self.STDIN_FILENO], [], []) if self.child_fd in r: data = self.__interact_read(self.child_fd) if output_filter: data = output_filter(data) # depends on [control=['if'], data=[]] if self.logfile is not None: self.logfile.write(data) self.logfile.flush() # depends on [control=['if'], data=[]] os.write(self.STDOUT_FILENO, data) # depends on [control=['if'], data=[]] if self.STDIN_FILENO in r: data = self.__interact_read(self.STDIN_FILENO) if input_filter: data = input_filter(data) # depends on [control=['if'], data=[]] i = data.rfind(escape_character) if i != -1: data = data[:i] self.__interact_writen(self.child_fd, data) break # depends on [control=['if'], data=['i']] self.__interact_writen(self.child_fd, data) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def pair(self): """ Return tuple (address, port), where address is a string (empty string if self.address() is None) and port is an integer (zero if self.port() is None). Mainly, this tuple is used with python socket module (like in bind method) :return: 2 value tuple of str and int. """ address = str(self.__address) if self.__address is not None else '' port = int(self.__port) if self.__port is not None else 0 return address, port
def function[pair, parameter[self]]: constant[ Return tuple (address, port), where address is a string (empty string if self.address() is None) and port is an integer (zero if self.port() is None). Mainly, this tuple is used with python socket module (like in bind method) :return: 2 value tuple of str and int. ] variable[address] assign[=] <ast.IfExp object at 0x7da2044c29e0> variable[port] assign[=] <ast.IfExp object at 0x7da2044c1570> return[tuple[[<ast.Name object at 0x7da20c6e6740>, <ast.Name object at 0x7da20c6e7220>]]]
keyword[def] identifier[pair] ( identifier[self] ): literal[string] identifier[address] = identifier[str] ( identifier[self] . identifier[__address] ) keyword[if] identifier[self] . identifier[__address] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] identifier[port] = identifier[int] ( identifier[self] . identifier[__port] ) keyword[if] identifier[self] . identifier[__port] keyword[is] keyword[not] keyword[None] keyword[else] literal[int] keyword[return] identifier[address] , identifier[port]
def pair(self): """ Return tuple (address, port), where address is a string (empty string if self.address() is None) and port is an integer (zero if self.port() is None). Mainly, this tuple is used with python socket module (like in bind method) :return: 2 value tuple of str and int. """ address = str(self.__address) if self.__address is not None else '' port = int(self.__port) if self.__port is not None else 0 return (address, port)
def generate_config(directory): """ Generate default config file """ default_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yml") target_config_path = os.path.abspath(os.path.join(directory, 'config.yml')) shutil.copy(default_config, target_config_path) six.print_("Config file has been generated in", target_config_path)
def function[generate_config, parameter[directory]]: constant[ Generate default config file ] variable[default_config] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[name[__file__]]]]], constant[config.yml]]] variable[target_config_path] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[directory], constant[config.yml]]]]] call[name[shutil].copy, parameter[name[default_config], name[target_config_path]]] call[name[six].print_, parameter[constant[Config file has been generated in], name[target_config_path]]]
keyword[def] identifier[generate_config] ( identifier[directory] ): literal[string] identifier[default_config] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] )), literal[string] ) identifier[target_config_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] )) identifier[shutil] . identifier[copy] ( identifier[default_config] , identifier[target_config_path] ) identifier[six] . identifier[print_] ( literal[string] , identifier[target_config_path] )
def generate_config(directory): """ Generate default config file """ default_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.yml') target_config_path = os.path.abspath(os.path.join(directory, 'config.yml')) shutil.copy(default_config, target_config_path) six.print_('Config file has been generated in', target_config_path)
def register_lookup_handler(lookup_type, handler_or_path): """Register a lookup handler. Args: lookup_type (str): Name to register the handler under handler_or_path (OneOf[func, str]): a function or a path to a handler """ handler = handler_or_path if isinstance(handler_or_path, basestring): handler = load_object_from_string(handler_or_path) LOOKUP_HANDLERS[lookup_type] = handler if type(handler) != type: # Hander is a not a new-style handler logger = logging.getLogger(__name__) logger.warning("Registering lookup `%s`: Please upgrade to use the " "new style of Lookups." % lookup_type) warnings.warn( # For some reason, this does not show up... # Leaving it in anyway "Lookup `%s`: Please upgrade to use the new style of Lookups" "." % lookup_type, DeprecationWarning, stacklevel=2, )
def function[register_lookup_handler, parameter[lookup_type, handler_or_path]]: constant[Register a lookup handler. Args: lookup_type (str): Name to register the handler under handler_or_path (OneOf[func, str]): a function or a path to a handler ] variable[handler] assign[=] name[handler_or_path] if call[name[isinstance], parameter[name[handler_or_path], name[basestring]]] begin[:] variable[handler] assign[=] call[name[load_object_from_string], parameter[name[handler_or_path]]] call[name[LOOKUP_HANDLERS]][name[lookup_type]] assign[=] name[handler] if compare[call[name[type], parameter[name[handler]]] not_equal[!=] name[type]] begin[:] variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]] call[name[logger].warning, parameter[binary_operation[constant[Registering lookup `%s`: Please upgrade to use the new style of Lookups.] <ast.Mod object at 0x7da2590d6920> name[lookup_type]]]] call[name[warnings].warn, parameter[binary_operation[constant[Lookup `%s`: Please upgrade to use the new style of Lookups.] <ast.Mod object at 0x7da2590d6920> name[lookup_type]], name[DeprecationWarning]]]
keyword[def] identifier[register_lookup_handler] ( identifier[lookup_type] , identifier[handler_or_path] ): literal[string] identifier[handler] = identifier[handler_or_path] keyword[if] identifier[isinstance] ( identifier[handler_or_path] , identifier[basestring] ): identifier[handler] = identifier[load_object_from_string] ( identifier[handler_or_path] ) identifier[LOOKUP_HANDLERS] [ identifier[lookup_type] ]= identifier[handler] keyword[if] identifier[type] ( identifier[handler] )!= identifier[type] : identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[logger] . identifier[warning] ( literal[string] literal[string] % identifier[lookup_type] ) identifier[warnings] . identifier[warn] ( literal[string] literal[string] % identifier[lookup_type] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] , )
def register_lookup_handler(lookup_type, handler_or_path): """Register a lookup handler. Args: lookup_type (str): Name to register the handler under handler_or_path (OneOf[func, str]): a function or a path to a handler """ handler = handler_or_path if isinstance(handler_or_path, basestring): handler = load_object_from_string(handler_or_path) # depends on [control=['if'], data=[]] LOOKUP_HANDLERS[lookup_type] = handler if type(handler) != type: # Hander is a not a new-style handler logger = logging.getLogger(__name__) logger.warning('Registering lookup `%s`: Please upgrade to use the new style of Lookups.' % lookup_type) # For some reason, this does not show up... # Leaving it in anyway warnings.warn('Lookup `%s`: Please upgrade to use the new style of Lookups.' % lookup_type, DeprecationWarning, stacklevel=2) # depends on [control=['if'], data=[]]
def add_simple_link(self, issue, object): """Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data. ``object`` should be a dict containing at least ``url`` to the linked external URL and ``title`` to display for the link inside JIRA. For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links. :param issue: the issue to add the remote link to :param object: the dictionary used to create remotelink data """ data = {"object": object} url = self._get_url('issue/' + str(issue) + '/remotelink') r = self._session.post( url, data=json.dumps(data)) simple_link = RemoteLink( self._options, self._session, raw=json_loads(r)) return simple_link
def function[add_simple_link, parameter[self, issue, object]]: constant[Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data. ``object`` should be a dict containing at least ``url`` to the linked external URL and ``title`` to display for the link inside JIRA. For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links. :param issue: the issue to add the remote link to :param object: the dictionary used to create remotelink data ] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e3040>], [<ast.Name object at 0x7da1b21e2c20>]] variable[url] assign[=] call[name[self]._get_url, parameter[binary_operation[binary_operation[constant[issue/] + call[name[str], parameter[name[issue]]]] + constant[/remotelink]]]] variable[r] assign[=] call[name[self]._session.post, parameter[name[url]]] variable[simple_link] assign[=] call[name[RemoteLink], parameter[name[self]._options, name[self]._session]] return[name[simple_link]]
keyword[def] identifier[add_simple_link] ( identifier[self] , identifier[issue] , identifier[object] ): literal[string] identifier[data] ={ literal[string] : identifier[object] } identifier[url] = identifier[self] . identifier[_get_url] ( literal[string] + identifier[str] ( identifier[issue] )+ literal[string] ) identifier[r] = identifier[self] . identifier[_session] . identifier[post] ( identifier[url] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] )) identifier[simple_link] = identifier[RemoteLink] ( identifier[self] . identifier[_options] , identifier[self] . identifier[_session] , identifier[raw] = identifier[json_loads] ( identifier[r] )) keyword[return] identifier[simple_link]
def add_simple_link(self, issue, object): """Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data. ``object`` should be a dict containing at least ``url`` to the linked external URL and ``title`` to display for the link inside JIRA. For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links. :param issue: the issue to add the remote link to :param object: the dictionary used to create remotelink data """ data = {'object': object} url = self._get_url('issue/' + str(issue) + '/remotelink') r = self._session.post(url, data=json.dumps(data)) simple_link = RemoteLink(self._options, self._session, raw=json_loads(r)) return simple_link
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10., ref_height=120.): """Adds unit vectors expressing the ion drift coordinate system organized by the geomagnetic field. Unit vectors are expressed in ECEF coordinates. Parameters ---------- inst : pysat.Instrument Instrument object that will get unit vectors max_steps : int Maximum number of steps allowed for field line tracing step_size : float Maximum step size (km) allowed when field line tracing ref_height : float Altitude used as cutoff for labeling a field line location a footpoint Returns ------- None unit vectors are added to the passed Instrument object with a naming scheme: 'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z) 'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z) 'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z) """ # add unit vectors for magnetic drifts in ecef coordinates zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'], inst['longitude'], inst['altitude'], inst.data.index, steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height) inst['unit_zon_ecef_x'] = zvx inst['unit_zon_ecef_y'] = zvy inst['unit_zon_ecef_z'] = zvz inst['unit_fa_ecef_x'] = bx inst['unit_fa_ecef_y'] = by inst['unit_fa_ecef_z'] = bz inst['unit_mer_ecef_x'] = mx inst['unit_mer_ecef_y'] = my inst['unit_mer_ecef_z'] = mz inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x', 'desc': 'Zonal unit vector along ECEF-x', 'label': 'Zonal unit vector along ECEF-x', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Zonal unit vector along ECEF-x', 'value_min': -1., 'value_max': 1., } inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y', 'desc': 'Zonal unit vector along ECEF-y', 'label': 'Zonal unit vector along ECEF-y', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Zonal unit vector along ECEF-y', 'value_min': -1., 'value_max': 1., } inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z', 'desc': 'Zonal unit vector along ECEF-z', 'label': 'Zonal unit vector along ECEF-z', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Zonal unit vector along ECEF-z', 'value_min': -1., 'value_max': 1., } inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x', 'desc': 'Field-aligned unit vector along ECEF-x', 'label': 'Field-aligned unit vector along ECEF-x', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Field-aligned unit vector along ECEF-x', 'value_min': -1., 'value_max': 1., } inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y', 'desc': 'Field-aligned unit vector along ECEF-y', 'label': 'Field-aligned unit vector along ECEF-y', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Field-aligned unit vector along ECEF-y', 'value_min': -1., 'value_max': 1., } inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z', 'desc': 'Field-aligned unit vector along ECEF-z', 'label': 'Field-aligned unit vector along ECEF-z', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Field-aligned unit vector along ECEF-z', 'value_min': -1., 'value_max': 1., } inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x', 'desc': 'Meridional unit vector along ECEF-x', 'label': 'Meridional unit vector along ECEF-x', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Meridional unit vector along ECEF-x', 'value_min': -1., 'value_max': 1., } inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y', 'desc': 'Meridional unit vector along ECEF-y', 'label': 'Meridional unit vector along ECEF-y', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Meridional unit vector along ECEF-y', 'value_min': -1., 'value_max': 1., } inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z', 'desc': 'Meridional unit vector along ECEF-z', 'label': 'Meridional unit vector along ECEF-z', 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. ' 'Vector system is calcluated by field-line tracing along IGRF values ' 'down to reference altitudes of 120 km in both the Northern and Southern ' 'hemispheres. These two points, along with the satellite position, are ' 'used to define the magnetic meridian. Vector math from here generates ' 'the orthogonal system.'), 'axis': 'Meridional unit vector along ECEF-z', 'value_min': -1., 'value_max': 1., } return
def function[add_mag_drift_unit_vectors_ecef, parameter[inst, steps, max_steps, step_size, ref_height]]: constant[Adds unit vectors expressing the ion drift coordinate system organized by the geomagnetic field. Unit vectors are expressed in ECEF coordinates. Parameters ---------- inst : pysat.Instrument Instrument object that will get unit vectors max_steps : int Maximum number of steps allowed for field line tracing step_size : float Maximum step size (km) allowed when field line tracing ref_height : float Altitude used as cutoff for labeling a field line location a footpoint Returns ------- None unit vectors are added to the passed Instrument object with a naming scheme: 'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z) 'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z) 'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z) ] <ast.Tuple object at 0x7da1b0b54e50> assign[=] call[name[calculate_mag_drift_unit_vectors_ecef], parameter[call[name[inst]][constant[latitude]], call[name[inst]][constant[longitude]], call[name[inst]][constant[altitude]], name[inst].data.index]] call[name[inst]][constant[unit_zon_ecef_x]] assign[=] name[zvx] call[name[inst]][constant[unit_zon_ecef_y]] assign[=] name[zvy] call[name[inst]][constant[unit_zon_ecef_z]] assign[=] name[zvz] call[name[inst]][constant[unit_fa_ecef_x]] assign[=] name[bx] call[name[inst]][constant[unit_fa_ecef_y]] assign[=] name[by] call[name[inst]][constant[unit_fa_ecef_z]] assign[=] name[bz] call[name[inst]][constant[unit_mer_ecef_x]] assign[=] name[mx] call[name[inst]][constant[unit_mer_ecef_y]] assign[=] name[my] call[name[inst]][constant[unit_mer_ecef_z]] assign[=] name[mz] call[name[inst].meta][constant[unit_zon_ecef_x]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b57700>, <ast.Constant object at 0x7da1b0b543a0>, <ast.Constant object at 0x7da1b0b54dc0>, <ast.Constant object at 0x7da1b0b57640>, <ast.Constant object at 0x7da1b0b55d80>, <ast.Constant object at 0x7da1b0b56260>, <ast.Constant object at 0x7da1b0b57760>], [<ast.Constant object at 0x7da1b0b56320>, <ast.Constant object at 0x7da1b0b54040>, <ast.Constant object at 0x7da1b0b57190>, <ast.Constant object at 0x7da1b0b54610>, <ast.Constant object at 0x7da1b0b558d0>, <ast.UnaryOp object at 0x7da1b0b55ae0>, <ast.Constant object at 0x7da1b0b54af0>]] call[name[inst].meta][constant[unit_zon_ecef_y]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b55a50>, <ast.Constant object at 0x7da1b0b543d0>, <ast.Constant object at 0x7da1b0b548b0>, <ast.Constant object at 0x7da1b0b56530>, <ast.Constant object at 0x7da1b0b560e0>, <ast.Constant object at 0x7da1b0b575b0>, <ast.Constant object at 0x7da1b0b55930>], [<ast.Constant object at 0x7da1b0b54f10>, <ast.Constant object at 0x7da1b0b556f0>, <ast.Constant object at 0x7da1b0b545b0>, <ast.Constant object at 0x7da1b0b558a0>, <ast.Constant object at 0x7da1b0b57400>, <ast.UnaryOp object at 0x7da1b0b54460>, <ast.Constant object at 0x7da1b0b563e0>]] call[name[inst].meta][constant[unit_zon_ecef_z]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b54520>, <ast.Constant object at 0x7da1b0b56920>, <ast.Constant object at 0x7da1b0b54ca0>, <ast.Constant object at 0x7da1b0b56a10>, <ast.Constant object at 0x7da1b0b56380>, <ast.Constant object at 0x7da1b0b55660>, <ast.Constant object at 0x7da1b0b568c0>], [<ast.Constant object at 0x7da1b0b54280>, <ast.Constant object at 0x7da1b0b54a90>, <ast.Constant object at 0x7da1b0b54880>, <ast.Constant object at 0x7da1b0b54910>, <ast.Constant object at 0x7da1b0b55bd0>, <ast.UnaryOp object at 0x7da1b0b54250>, <ast.Constant object at 0x7da1b0b57130>]] call[name[inst].meta][constant[unit_fa_ecef_x]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b56fb0>, <ast.Constant object at 0x7da1b0b57f40>, <ast.Constant object at 0x7da1b0b578b0>, <ast.Constant object at 0x7da1b0b561a0>, <ast.Constant object at 0x7da1b0b57e50>, <ast.Constant object at 0x7da1b0b56950>, <ast.Constant object at 0x7da1b0b569e0>], [<ast.Constant object at 0x7da1b0b55510>, <ast.Constant object at 0x7da1b0b55180>, <ast.Constant object at 0x7da1b0b557b0>, <ast.Constant object at 0x7da1b0b54fd0>, <ast.Constant object at 0x7da1b0b56ec0>, <ast.UnaryOp object at 0x7da1b0b56b00>, <ast.Constant object at 0x7da1b0b57f10>]] call[name[inst].meta][constant[unit_fa_ecef_y]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b55690>, <ast.Constant object at 0x7da1b0b54a30>, <ast.Constant object at 0x7da1b0b57160>, <ast.Constant object at 0x7da1b0b54e80>, <ast.Constant object at 0x7da1b0a6bbb0>, <ast.Constant object at 0x7da1b0a6b9a0>, <ast.Constant object at 0x7da1b0a69f00>], [<ast.Constant object at 0x7da1b0a6a920>, <ast.Constant object at 0x7da1b0a6bfa0>, <ast.Constant object at 0x7da1b0a6a830>, <ast.Constant object at 0x7da1b0a6a3b0>, <ast.Constant object at 0x7da1b0a69030>, <ast.UnaryOp object at 0x7da1b0a6bb50>, <ast.Constant object at 0x7da1b0a6bf40>]] call[name[inst].meta][constant[unit_fa_ecef_z]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a6a410>, <ast.Constant object at 0x7da1b0a6afb0>, <ast.Constant object at 0x7da1b0a69330>, <ast.Constant object at 0x7da1b0a69cf0>, <ast.Constant object at 0x7da1b0a6a1d0>, <ast.Constant object at 0x7da1b0a6b9d0>, <ast.Constant object at 0x7da1b0a69300>], [<ast.Constant object at 0x7da1b0bac220>, <ast.Constant object at 0x7da1b0baf070>, <ast.Constant object at 0x7da1b0baef80>, <ast.Constant object at 0x7da1b0baea40>, <ast.Constant object at 0x7da1b0bace80>, <ast.UnaryOp object at 0x7da1b0bace20>, <ast.Constant object at 0x7da1b0baf040>]] call[name[inst].meta][constant[unit_mer_ecef_x]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0bae9e0>, <ast.Constant object at 0x7da1b0baef50>, <ast.Constant object at 0x7da1b0baf130>, <ast.Constant object at 0x7da1b0a22d10>, <ast.Constant object at 0x7da1b0a23010>, <ast.Constant object at 0x7da1b0a23550>, <ast.Constant object at 0x7da1b0a22680>], [<ast.Constant object at 0x7da1b0a21ae0>, <ast.Constant object at 0x7da1b0a21c60>, <ast.Constant object at 0x7da1b0a21cf0>, <ast.Constant object at 0x7da1b0a23190>, <ast.Constant object at 0x7da1b0a229b0>, <ast.UnaryOp object at 0x7da1b0a22a70>, <ast.Constant object at 0x7da1b0a21360>]] call[name[inst].meta][constant[unit_mer_ecef_y]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a226e0>, <ast.Constant object at 0x7da1b0a21960>, <ast.Constant object at 0x7da1b0a22830>, <ast.Constant object at 0x7da1b0a22d40>, <ast.Constant object at 0x7da1b0a216c0>, <ast.Constant object at 0x7da1b0a23400>, <ast.Constant object at 0x7da1b0a218d0>], [<ast.Constant object at 0x7da1b0a22200>, <ast.Constant object at 0x7da1b0a21fc0>, <ast.Constant object at 0x7da1b0a22260>, <ast.Constant object at 0x7da1b0a23160>, <ast.Constant object at 0x7da1b0a22650>, <ast.UnaryOp object at 0x7da1b0a23310>, <ast.Constant object at 0x7da1b0a21600>]] call[name[inst].meta][constant[unit_mer_ecef_z]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a21f60>, <ast.Constant object at 0x7da1b0a21d80>, <ast.Constant object at 0x7da1b0a21c00>, <ast.Constant object at 0x7da1b0a23520>, <ast.Constant object at 0x7da1b0a22740>, <ast.Constant object at 0x7da1b0a230d0>, <ast.Constant object at 0x7da1b0a22710>], [<ast.Constant object at 0x7da1b0a21630>, <ast.Constant object at 0x7da1b0a221a0>, <ast.Constant object at 0x7da1b0a21810>, <ast.Constant object at 0x7da1b0a23460>, <ast.Constant object at 0x7da1b0a21540>, <ast.UnaryOp object at 0x7da1b0a21cc0>, <ast.Constant object at 0x7da1b0a222c0>]] return[None]
keyword[def] identifier[add_mag_drift_unit_vectors_ecef] ( identifier[inst] , identifier[steps] = keyword[None] , identifier[max_steps] = literal[int] , identifier[step_size] = literal[int] , identifier[ref_height] = literal[int] ): literal[string] identifier[zvx] , identifier[zvy] , identifier[zvz] , identifier[bx] , identifier[by] , identifier[bz] , identifier[mx] , identifier[my] , identifier[mz] = identifier[calculate_mag_drift_unit_vectors_ecef] ( identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] . identifier[data] . identifier[index] , identifier[steps] = identifier[steps] , identifier[max_steps] = identifier[max_steps] , identifier[step_size] = identifier[step_size] , identifier[ref_height] = identifier[ref_height] ) identifier[inst] [ literal[string] ]= identifier[zvx] identifier[inst] [ literal[string] ]= identifier[zvy] identifier[inst] [ literal[string] ]= identifier[zvz] identifier[inst] [ literal[string] ]= identifier[bx] identifier[inst] [ literal[string] ]= identifier[by] identifier[inst] [ literal[string] ]= identifier[bz] identifier[inst] [ literal[string] ]= identifier[mx] identifier[inst] [ literal[string] ]= identifier[my] identifier[inst] [ literal[string] ]= identifier[mz] identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ), literal[string] : literal[string] , literal[string] :- literal[int] , literal[string] : literal[int] , } keyword[return]
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.0, ref_height=120.0): """Adds unit vectors expressing the ion drift coordinate system organized by the geomagnetic field. Unit vectors are expressed in ECEF coordinates. Parameters ---------- inst : pysat.Instrument Instrument object that will get unit vectors max_steps : int Maximum number of steps allowed for field line tracing step_size : float Maximum step size (km) allowed when field line tracing ref_height : float Altitude used as cutoff for labeling a field line location a footpoint Returns ------- None unit vectors are added to the passed Instrument object with a naming scheme: 'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z) 'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z) 'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z) """ # add unit vectors for magnetic drifts in ecef coordinates (zvx, zvy, zvz, bx, by, bz, mx, my, mz) = calculate_mag_drift_unit_vectors_ecef(inst['latitude'], inst['longitude'], inst['altitude'], inst.data.index, steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height) inst['unit_zon_ecef_x'] = zvx inst['unit_zon_ecef_y'] = zvy inst['unit_zon_ecef_z'] = zvz inst['unit_fa_ecef_x'] = bx inst['unit_fa_ecef_y'] = by inst['unit_fa_ecef_z'] = bz inst['unit_mer_ecef_x'] = mx inst['unit_mer_ecef_y'] = my inst['unit_mer_ecef_z'] = mz inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x', 'desc': 'Zonal unit vector along ECEF-x', 'label': 'Zonal unit vector along ECEF-x', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Zonal unit vector along ECEF-x', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y', 'desc': 'Zonal unit vector along ECEF-y', 'label': 'Zonal unit vector along ECEF-y', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Zonal unit vector along ECEF-y', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z', 'desc': 'Zonal unit vector along ECEF-z', 'label': 'Zonal unit vector along ECEF-z', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Zonal unit vector along ECEF-z', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x', 'desc': 'Field-aligned unit vector along ECEF-x', 'label': 'Field-aligned unit vector along ECEF-x', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Field-aligned unit vector along ECEF-x', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y', 'desc': 'Field-aligned unit vector along ECEF-y', 'label': 'Field-aligned unit vector along ECEF-y', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Field-aligned unit vector along ECEF-y', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z', 'desc': 'Field-aligned unit vector along ECEF-z', 'label': 'Field-aligned unit vector along ECEF-z', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Field-aligned unit vector along ECEF-z', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x', 'desc': 'Meridional unit vector along ECEF-x', 'label': 'Meridional unit vector along ECEF-x', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Meridional unit vector along ECEF-x', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y', 'desc': 'Meridional unit vector along ECEF-y', 'label': 'Meridional unit vector along ECEF-y', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Meridional unit vector along ECEF-y', 'value_min': -1.0, 'value_max': 1.0} inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z', 'desc': 'Meridional unit vector along ECEF-z', 'label': 'Meridional unit vector along ECEF-z', 'notes': 'Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. Vector system is calcluated by field-line tracing along IGRF values down to reference altitudes of 120 km in both the Northern and Southern hemispheres. These two points, along with the satellite position, are used to define the magnetic meridian. Vector math from here generates the orthogonal system.', 'axis': 'Meridional unit vector along ECEF-z', 'value_min': -1.0, 'value_max': 1.0} return
def region_size(im): r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. """ if im.dtype == bool: im = spim.label(im)[0] counts = sp.bincount(im.flatten()) counts[0] = 0 chords = counts[im] return chords
def function[region_size, parameter[im]]: constant[ Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. ] if compare[name[im].dtype equal[==] name[bool]] begin[:] variable[im] assign[=] call[call[name[spim].label, parameter[name[im]]]][constant[0]] variable[counts] assign[=] call[name[sp].bincount, parameter[call[name[im].flatten, parameter[]]]] call[name[counts]][constant[0]] assign[=] constant[0] variable[chords] assign[=] call[name[counts]][name[im]] return[name[chords]]
keyword[def] identifier[region_size] ( identifier[im] ): literal[string] keyword[if] identifier[im] . identifier[dtype] == identifier[bool] : identifier[im] = identifier[spim] . identifier[label] ( identifier[im] )[ literal[int] ] identifier[counts] = identifier[sp] . identifier[bincount] ( identifier[im] . identifier[flatten] ()) identifier[counts] [ literal[int] ]= literal[int] identifier[chords] = identifier[counts] [ identifier[im] ] keyword[return] identifier[chords]
def region_size(im): """ Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. """ if im.dtype == bool: im = spim.label(im)[0] # depends on [control=['if'], data=[]] counts = sp.bincount(im.flatten()) counts[0] = 0 chords = counts[im] return chords
def pretty_print_list_info(num_results, page_info=None, suffix=None): """Pretty print list info, with pagination, for user display.""" num_results_fg = "green" if num_results else "red" num_results_text = click.style(str(num_results), fg=num_results_fg) if page_info and page_info.is_valid: page_range = page_info.calculate_range(num_results) page_info_text = "page: %(page)s/%(page_total)s, page size: %(page_size)s" % { "page": click.style(str(page_info.page), bold=True), "page_size": click.style(str(page_info.page_size), bold=True), "page_total": click.style(str(page_info.page_total), bold=True), } range_results_text = "%(from)s-%(to)s (%(num_results)s) of %(total)s" % { "num_results": num_results_text, "from": click.style(str(page_range[0]), fg=num_results_fg), "to": click.style(str(page_range[1]), fg=num_results_fg), "total": click.style(str(page_info.count), fg=num_results_fg), } else: page_info_text = "" range_results_text = num_results_text click.secho( "Results: %(range_results)s %(suffix)s%(page_info)s" % { "range_results": range_results_text, "page_info": " (%s)" % page_info_text if page_info_text else "", "suffix": suffix or "item(s)", } )
def function[pretty_print_list_info, parameter[num_results, page_info, suffix]]: constant[Pretty print list info, with pagination, for user display.] variable[num_results_fg] assign[=] <ast.IfExp object at 0x7da1b19d8ac0> variable[num_results_text] assign[=] call[name[click].style, parameter[call[name[str], parameter[name[num_results]]]]] if <ast.BoolOp object at 0x7da1b19d8a00> begin[:] variable[page_range] assign[=] call[name[page_info].calculate_range, parameter[name[num_results]]] variable[page_info_text] assign[=] binary_operation[constant[page: %(page)s/%(page_total)s, page size: %(page_size)s] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b19d9420>, <ast.Constant object at 0x7da1b19d94e0>, <ast.Constant object at 0x7da1b19d8970>], [<ast.Call object at 0x7da1b19d9060>, <ast.Call object at 0x7da1b19a8d90>, <ast.Call object at 0x7da1b1a1d0f0>]]] variable[range_results_text] assign[=] binary_operation[constant[%(from)s-%(to)s (%(num_results)s) of %(total)s] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b1a1ff10>, <ast.Constant object at 0x7da1b1a1fa60>, <ast.Constant object at 0x7da1b1a1c8e0>, <ast.Constant object at 0x7da1b1a1dae0>], [<ast.Name object at 0x7da1b1a1fd30>, <ast.Call object at 0x7da1b1a1c0a0>, <ast.Call object at 0x7da1b1a1fc40>, <ast.Call object at 0x7da1b1a1fdf0>]]] call[name[click].secho, parameter[binary_operation[constant[Results: %(range_results)s %(suffix)s%(page_info)s] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b1952800>, <ast.Constant object at 0x7da1b1951c90>, <ast.Constant object at 0x7da1b1951c00>], [<ast.Name object at 0x7da1b1950c70>, <ast.IfExp object at 0x7da1b1952e60>, <ast.BoolOp object at 0x7da1b1953fa0>]]]]]
keyword[def] identifier[pretty_print_list_info] ( identifier[num_results] , identifier[page_info] = keyword[None] , identifier[suffix] = keyword[None] ): literal[string] identifier[num_results_fg] = literal[string] keyword[if] identifier[num_results] keyword[else] literal[string] identifier[num_results_text] = identifier[click] . identifier[style] ( identifier[str] ( identifier[num_results] ), identifier[fg] = identifier[num_results_fg] ) keyword[if] identifier[page_info] keyword[and] identifier[page_info] . identifier[is_valid] : identifier[page_range] = identifier[page_info] . identifier[calculate_range] ( identifier[num_results] ) identifier[page_info_text] = literal[string] %{ literal[string] : identifier[click] . identifier[style] ( identifier[str] ( identifier[page_info] . identifier[page] ), identifier[bold] = keyword[True] ), literal[string] : identifier[click] . identifier[style] ( identifier[str] ( identifier[page_info] . identifier[page_size] ), identifier[bold] = keyword[True] ), literal[string] : identifier[click] . identifier[style] ( identifier[str] ( identifier[page_info] . identifier[page_total] ), identifier[bold] = keyword[True] ), } identifier[range_results_text] = literal[string] %{ literal[string] : identifier[num_results_text] , literal[string] : identifier[click] . identifier[style] ( identifier[str] ( identifier[page_range] [ literal[int] ]), identifier[fg] = identifier[num_results_fg] ), literal[string] : identifier[click] . identifier[style] ( identifier[str] ( identifier[page_range] [ literal[int] ]), identifier[fg] = identifier[num_results_fg] ), literal[string] : identifier[click] . identifier[style] ( identifier[str] ( identifier[page_info] . identifier[count] ), identifier[fg] = identifier[num_results_fg] ), } keyword[else] : identifier[page_info_text] = literal[string] identifier[range_results_text] = identifier[num_results_text] identifier[click] . identifier[secho] ( literal[string] %{ literal[string] : identifier[range_results_text] , literal[string] : literal[string] % identifier[page_info_text] keyword[if] identifier[page_info_text] keyword[else] literal[string] , literal[string] : identifier[suffix] keyword[or] literal[string] , } )
def pretty_print_list_info(num_results, page_info=None, suffix=None): """Pretty print list info, with pagination, for user display.""" num_results_fg = 'green' if num_results else 'red' num_results_text = click.style(str(num_results), fg=num_results_fg) if page_info and page_info.is_valid: page_range = page_info.calculate_range(num_results) page_info_text = 'page: %(page)s/%(page_total)s, page size: %(page_size)s' % {'page': click.style(str(page_info.page), bold=True), 'page_size': click.style(str(page_info.page_size), bold=True), 'page_total': click.style(str(page_info.page_total), bold=True)} range_results_text = '%(from)s-%(to)s (%(num_results)s) of %(total)s' % {'num_results': num_results_text, 'from': click.style(str(page_range[0]), fg=num_results_fg), 'to': click.style(str(page_range[1]), fg=num_results_fg), 'total': click.style(str(page_info.count), fg=num_results_fg)} # depends on [control=['if'], data=[]] else: page_info_text = '' range_results_text = num_results_text click.secho('Results: %(range_results)s %(suffix)s%(page_info)s' % {'range_results': range_results_text, 'page_info': ' (%s)' % page_info_text if page_info_text else '', 'suffix': suffix or 'item(s)'})
def is_empty(self, resource): """Test if there is no document for resource. :param resource: resource name """ args = self._es_args(resource) res = self.elastic(resource).count(body={'query': {'match_all': {}}}, **args) return res.get('count', 0) == 0
def function[is_empty, parameter[self, resource]]: constant[Test if there is no document for resource. :param resource: resource name ] variable[args] assign[=] call[name[self]._es_args, parameter[name[resource]]] variable[res] assign[=] call[call[name[self].elastic, parameter[name[resource]]].count, parameter[]] return[compare[call[name[res].get, parameter[constant[count], constant[0]]] equal[==] constant[0]]]
keyword[def] identifier[is_empty] ( identifier[self] , identifier[resource] ): literal[string] identifier[args] = identifier[self] . identifier[_es_args] ( identifier[resource] ) identifier[res] = identifier[self] . identifier[elastic] ( identifier[resource] ). identifier[count] ( identifier[body] ={ literal[string] :{ literal[string] :{}}},** identifier[args] ) keyword[return] identifier[res] . identifier[get] ( literal[string] , literal[int] )== literal[int]
def is_empty(self, resource): """Test if there is no document for resource. :param resource: resource name """ args = self._es_args(resource) res = self.elastic(resource).count(body={'query': {'match_all': {}}}, **args) return res.get('count', 0) == 0
def _get_id(self): """Getter because using the id property from within was not working""" ret = None row = self.row if row: ret = row["id"] return ret
def function[_get_id, parameter[self]]: constant[Getter because using the id property from within was not working] variable[ret] assign[=] constant[None] variable[row] assign[=] name[self].row if name[row] begin[:] variable[ret] assign[=] call[name[row]][constant[id]] return[name[ret]]
keyword[def] identifier[_get_id] ( identifier[self] ): literal[string] identifier[ret] = keyword[None] identifier[row] = identifier[self] . identifier[row] keyword[if] identifier[row] : identifier[ret] = identifier[row] [ literal[string] ] keyword[return] identifier[ret]
def _get_id(self): """Getter because using the id property from within was not working""" ret = None row = self.row if row: ret = row['id'] # depends on [control=['if'], data=[]] return ret
def __reorganize_geo(self): """ Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return: """ logger_lpd_noaa.info("enter reorganize_geo") try: # Geo -> Properties for k, v in self.noaa_data_sorted["Site_Information"]['properties'].items(): noaa_key = self.__get_noaa_key(k) self.noaa_geo[noaa_key] = v except KeyError: logger_lpd_noaa.info("reorganize_geo: KeyError: geo properties") try: # Geo -> Geometry self.__reorganize_coordinates() except Exception: logger_lpd_noaa.warning("reorganize_geo: Exception: missing required data: coordinates") # put the temporarily organized data into the self.noaa_data_sorted self.noaa_data_sorted["Site_Information"] = self.noaa_geo return
def function[__reorganize_geo, parameter[self]]: constant[ Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return: ] call[name[logger_lpd_noaa].info, parameter[constant[enter reorganize_geo]]] <ast.Try object at 0x7da18f09dc30> <ast.Try object at 0x7da18f09d210> call[name[self].noaa_data_sorted][constant[Site_Information]] assign[=] name[self].noaa_geo return[None]
keyword[def] identifier[__reorganize_geo] ( identifier[self] ): literal[string] identifier[logger_lpd_noaa] . identifier[info] ( literal[string] ) keyword[try] : keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[noaa_data_sorted] [ literal[string] ][ literal[string] ]. identifier[items] (): identifier[noaa_key] = identifier[self] . identifier[__get_noaa_key] ( identifier[k] ) identifier[self] . identifier[noaa_geo] [ identifier[noaa_key] ]= identifier[v] keyword[except] identifier[KeyError] : identifier[logger_lpd_noaa] . identifier[info] ( literal[string] ) keyword[try] : identifier[self] . identifier[__reorganize_coordinates] () keyword[except] identifier[Exception] : identifier[logger_lpd_noaa] . identifier[warning] ( literal[string] ) identifier[self] . identifier[noaa_data_sorted] [ literal[string] ]= identifier[self] . identifier[noaa_geo] keyword[return]
def __reorganize_geo(self): """ Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return: """ logger_lpd_noaa.info('enter reorganize_geo') try: # Geo -> Properties for (k, v) in self.noaa_data_sorted['Site_Information']['properties'].items(): noaa_key = self.__get_noaa_key(k) self.noaa_geo[noaa_key] = v # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except KeyError: logger_lpd_noaa.info('reorganize_geo: KeyError: geo properties') # depends on [control=['except'], data=[]] try: # Geo -> Geometry self.__reorganize_coordinates() # depends on [control=['try'], data=[]] except Exception: logger_lpd_noaa.warning('reorganize_geo: Exception: missing required data: coordinates') # depends on [control=['except'], data=[]] # put the temporarily organized data into the self.noaa_data_sorted self.noaa_data_sorted['Site_Information'] = self.noaa_geo return
def duration(self): """ Returns task's current duration in minutes. """ if not self._loaded: return 0 delta = datetime.datetime.now() - self._start_time total_secs = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 return max(0, int(round(total_secs / 60.0)))
def function[duration, parameter[self]]: constant[ Returns task's current duration in minutes. ] if <ast.UnaryOp object at 0x7da1b134b400> begin[:] return[constant[0]] variable[delta] assign[=] binary_operation[call[name[datetime].datetime.now, parameter[]] - name[self]._start_time] variable[total_secs] assign[=] binary_operation[binary_operation[name[delta].microseconds + binary_operation[binary_operation[name[delta].seconds + binary_operation[binary_operation[name[delta].days * constant[24]] * constant[3600]]] * binary_operation[constant[10] ** constant[6]]]] / binary_operation[constant[10] ** constant[6]]] return[call[name[max], parameter[constant[0], call[name[int], parameter[call[name[round], parameter[binary_operation[name[total_secs] / constant[60.0]]]]]]]]]
keyword[def] identifier[duration] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_loaded] : keyword[return] literal[int] identifier[delta] = identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_start_time] identifier[total_secs] =( identifier[delta] . identifier[microseconds] + ( identifier[delta] . identifier[seconds] + identifier[delta] . identifier[days] * literal[int] * literal[int] )* literal[int] ** literal[int] )/ literal[int] ** literal[int] keyword[return] identifier[max] ( literal[int] , identifier[int] ( identifier[round] ( identifier[total_secs] / literal[int] )))
def duration(self): """ Returns task's current duration in minutes. """ if not self._loaded: return 0 # depends on [control=['if'], data=[]] delta = datetime.datetime.now() - self._start_time total_secs = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 return max(0, int(round(total_secs / 60.0)))
def deprecated(version, version_removed): '''This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.''' def __wrapper(func, *args, **kwargs): '''Warn the user, and then proceed.''' code = six.get_function_code(func) warnings.warn_explicit( "{:s}.{:s}\n\tDeprecated as of JAMS version {:s}." "\n\tIt will be removed in JAMS version {:s}." .format(func.__module__, func.__name__, version, version_removed), category=DeprecationWarning, filename=code.co_filename, lineno=code.co_firstlineno + 1 ) return func(*args, **kwargs) return decorator(__wrapper)
def function[deprecated, parameter[version, version_removed]]: constant[This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.] def function[__wrapper, parameter[func]]: constant[Warn the user, and then proceed.] variable[code] assign[=] call[name[six].get_function_code, parameter[name[func]]] call[name[warnings].warn_explicit, parameter[call[constant[{:s}.{:s} Deprecated as of JAMS version {:s}. It will be removed in JAMS version {:s}.].format, parameter[name[func].__module__, name[func].__name__, name[version], name[version_removed]]]]] return[call[name[func], parameter[<ast.Starred object at 0x7da20c6c6cb0>]]] return[call[name[decorator], parameter[name[__wrapper]]]]
keyword[def] identifier[deprecated] ( identifier[version] , identifier[version_removed] ): literal[string] keyword[def] identifier[__wrapper] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[code] = identifier[six] . identifier[get_function_code] ( identifier[func] ) identifier[warnings] . identifier[warn_explicit] ( literal[string] literal[string] . identifier[format] ( identifier[func] . identifier[__module__] , identifier[func] . identifier[__name__] , identifier[version] , identifier[version_removed] ), identifier[category] = identifier[DeprecationWarning] , identifier[filename] = identifier[code] . identifier[co_filename] , identifier[lineno] = identifier[code] . identifier[co_firstlineno] + literal[int] ) keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[decorator] ( identifier[__wrapper] )
def deprecated(version, version_removed): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.""" def __wrapper(func, *args, **kwargs): """Warn the user, and then proceed.""" code = six.get_function_code(func) warnings.warn_explicit('{:s}.{:s}\n\tDeprecated as of JAMS version {:s}.\n\tIt will be removed in JAMS version {:s}.'.format(func.__module__, func.__name__, version, version_removed), category=DeprecationWarning, filename=code.co_filename, lineno=code.co_firstlineno + 1) return func(*args, **kwargs) return decorator(__wrapper)
def get_profile_name(org_vm, profile_inst): """ Get the org, name, and version from the profile instance and return them as a tuple. Returns: tuple of org, name, vers Raises: TypeError: if invalid property type ValueError: If property value outside range """ try: org = org_vm.tovalues(profile_inst['RegisteredOrganization']) name = profile_inst['RegisteredName'] vers = profile_inst['RegisteredVersion'] return org, name, vers except TypeError as te: print('ORG_VM.TOVALUES FAILED. inst=%s, Exception %s' % (profile_inst, te)) except ValueError as ve: print('ORG_VM.TOVALUES FAILED. inst=%s, Exception %s' % (profile_inst, ve)) return 'ERR', 'ERR', 'ERR'
def function[get_profile_name, parameter[org_vm, profile_inst]]: constant[ Get the org, name, and version from the profile instance and return them as a tuple. Returns: tuple of org, name, vers Raises: TypeError: if invalid property type ValueError: If property value outside range ] <ast.Try object at 0x7da1b0ef5090> return[tuple[[<ast.Constant object at 0x7da1b0ef61d0>, <ast.Constant object at 0x7da1b0ef4460>, <ast.Constant object at 0x7da1b0ef57e0>]]]
keyword[def] identifier[get_profile_name] ( identifier[org_vm] , identifier[profile_inst] ): literal[string] keyword[try] : identifier[org] = identifier[org_vm] . identifier[tovalues] ( identifier[profile_inst] [ literal[string] ]) identifier[name] = identifier[profile_inst] [ literal[string] ] identifier[vers] = identifier[profile_inst] [ literal[string] ] keyword[return] identifier[org] , identifier[name] , identifier[vers] keyword[except] identifier[TypeError] keyword[as] identifier[te] : identifier[print] ( literal[string] % ( identifier[profile_inst] , identifier[te] )) keyword[except] identifier[ValueError] keyword[as] identifier[ve] : identifier[print] ( literal[string] % ( identifier[profile_inst] , identifier[ve] )) keyword[return] literal[string] , literal[string] , literal[string]
def get_profile_name(org_vm, profile_inst): """ Get the org, name, and version from the profile instance and return them as a tuple. Returns: tuple of org, name, vers Raises: TypeError: if invalid property type ValueError: If property value outside range """ try: org = org_vm.tovalues(profile_inst['RegisteredOrganization']) name = profile_inst['RegisteredName'] vers = profile_inst['RegisteredVersion'] return (org, name, vers) # depends on [control=['try'], data=[]] except TypeError as te: print('ORG_VM.TOVALUES FAILED. inst=%s, Exception %s' % (profile_inst, te)) # depends on [control=['except'], data=['te']] except ValueError as ve: print('ORG_VM.TOVALUES FAILED. inst=%s, Exception %s' % (profile_inst, ve)) # depends on [control=['except'], data=['ve']] return ('ERR', 'ERR', 'ERR')
def ParseObjs(self, objs, expression): """Parse one or more objects by testing if it has matching stat results. Args: objs: An iterable of objects that should be checked. expression: A StatFilter expression, e.g.: "uid:>0 gid:=0 file_type:link" Yields: matching objects. """ self.Validate(expression) for obj in objs: if not isinstance(obj, rdf_client_fs.StatEntry): continue # If all match conditions pass, yield the object. for match in self.matchers: if not match(obj): break else: yield obj
def function[ParseObjs, parameter[self, objs, expression]]: constant[Parse one or more objects by testing if it has matching stat results. Args: objs: An iterable of objects that should be checked. expression: A StatFilter expression, e.g.: "uid:>0 gid:=0 file_type:link" Yields: matching objects. ] call[name[self].Validate, parameter[name[expression]]] for taget[name[obj]] in starred[name[objs]] begin[:] if <ast.UnaryOp object at 0x7da1b1c0c130> begin[:] continue for taget[name[match]] in starred[name[self].matchers] begin[:] if <ast.UnaryOp object at 0x7da1b1c0ee00> begin[:] break
keyword[def] identifier[ParseObjs] ( identifier[self] , identifier[objs] , identifier[expression] ): literal[string] identifier[self] . identifier[Validate] ( identifier[expression] ) keyword[for] identifier[obj] keyword[in] identifier[objs] : keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[rdf_client_fs] . identifier[StatEntry] ): keyword[continue] keyword[for] identifier[match] keyword[in] identifier[self] . identifier[matchers] : keyword[if] keyword[not] identifier[match] ( identifier[obj] ): keyword[break] keyword[else] : keyword[yield] identifier[obj]
def ParseObjs(self, objs, expression): """Parse one or more objects by testing if it has matching stat results. Args: objs: An iterable of objects that should be checked. expression: A StatFilter expression, e.g.: "uid:>0 gid:=0 file_type:link" Yields: matching objects. """ self.Validate(expression) for obj in objs: if not isinstance(obj, rdf_client_fs.StatEntry): continue # depends on [control=['if'], data=[]] # If all match conditions pass, yield the object. for match in self.matchers: if not match(obj): break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['match']] else: yield obj # depends on [control=['for'], data=['obj']]
def cdata(text): """Wraps the input `text` in a ``<![CDATA[ ]]>`` block. If the text contains CDATA sections already, they are stripped and replaced by the application of an outer-most CDATA block. Args: text: A string to wrap in a CDATA block. Returns: The `text` value wrapped in ``<![CDATA[]]>`` """ if not text: return text if is_cdata(text): text = strip_cdata(text) escaped = "{0}{1}{2}".format(CDATA_START, text, CDATA_END) return escaped
def function[cdata, parameter[text]]: constant[Wraps the input `text` in a ``<![CDATA[ ]]>`` block. If the text contains CDATA sections already, they are stripped and replaced by the application of an outer-most CDATA block. Args: text: A string to wrap in a CDATA block. Returns: The `text` value wrapped in ``<![CDATA[]]>`` ] if <ast.UnaryOp object at 0x7da1b25b2020> begin[:] return[name[text]] if call[name[is_cdata], parameter[name[text]]] begin[:] variable[text] assign[=] call[name[strip_cdata], parameter[name[text]]] variable[escaped] assign[=] call[constant[{0}{1}{2}].format, parameter[name[CDATA_START], name[text], name[CDATA_END]]] return[name[escaped]]
keyword[def] identifier[cdata] ( identifier[text] ): literal[string] keyword[if] keyword[not] identifier[text] : keyword[return] identifier[text] keyword[if] identifier[is_cdata] ( identifier[text] ): identifier[text] = identifier[strip_cdata] ( identifier[text] ) identifier[escaped] = literal[string] . identifier[format] ( identifier[CDATA_START] , identifier[text] , identifier[CDATA_END] ) keyword[return] identifier[escaped]
def cdata(text): """Wraps the input `text` in a ``<![CDATA[ ]]>`` block. If the text contains CDATA sections already, they are stripped and replaced by the application of an outer-most CDATA block. Args: text: A string to wrap in a CDATA block. Returns: The `text` value wrapped in ``<![CDATA[]]>`` """ if not text: return text # depends on [control=['if'], data=[]] if is_cdata(text): text = strip_cdata(text) # depends on [control=['if'], data=[]] escaped = '{0}{1}{2}'.format(CDATA_START, text, CDATA_END) return escaped
def decorate_disabled(self): """ Return True if this decoration must be omitted, otherwise - False. This class searches for tags values in environment variable (:attr:`.Verifier.__environment_var__`), Derived class can implement any logic :return: bool """ if len(self._tags) == 0: return False if self._env_var not in os.environ: return True env_tags = os.environ[self._env_var].split(self.__class__.__tags_delimiter__) if '*' in env_tags: return False for tag in self._tags: if tag in env_tags: return False return True
def function[decorate_disabled, parameter[self]]: constant[ Return True if this decoration must be omitted, otherwise - False. This class searches for tags values in environment variable (:attr:`.Verifier.__environment_var__`), Derived class can implement any logic :return: bool ] if compare[call[name[len], parameter[name[self]._tags]] equal[==] constant[0]] begin[:] return[constant[False]] if compare[name[self]._env_var <ast.NotIn object at 0x7da2590d7190> name[os].environ] begin[:] return[constant[True]] variable[env_tags] assign[=] call[call[name[os].environ][name[self]._env_var].split, parameter[name[self].__class__.__tags_delimiter__]] if compare[constant[*] in name[env_tags]] begin[:] return[constant[False]] for taget[name[tag]] in starred[name[self]._tags] begin[:] if compare[name[tag] in name[env_tags]] begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[decorate_disabled] ( identifier[self] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[_tags] )== literal[int] : keyword[return] keyword[False] keyword[if] identifier[self] . identifier[_env_var] keyword[not] keyword[in] identifier[os] . identifier[environ] : keyword[return] keyword[True] identifier[env_tags] = identifier[os] . identifier[environ] [ identifier[self] . identifier[_env_var] ]. identifier[split] ( identifier[self] . identifier[__class__] . identifier[__tags_delimiter__] ) keyword[if] literal[string] keyword[in] identifier[env_tags] : keyword[return] keyword[False] keyword[for] identifier[tag] keyword[in] identifier[self] . identifier[_tags] : keyword[if] identifier[tag] keyword[in] identifier[env_tags] : keyword[return] keyword[False] keyword[return] keyword[True]
def decorate_disabled(self): """ Return True if this decoration must be omitted, otherwise - False. This class searches for tags values in environment variable (:attr:`.Verifier.__environment_var__`), Derived class can implement any logic :return: bool """ if len(self._tags) == 0: return False # depends on [control=['if'], data=[]] if self._env_var not in os.environ: return True # depends on [control=['if'], data=[]] env_tags = os.environ[self._env_var].split(self.__class__.__tags_delimiter__) if '*' in env_tags: return False # depends on [control=['if'], data=[]] for tag in self._tags: if tag in env_tags: return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']] return True
def _roundSlist(slist): """ Rounds a signed list over the last element and removes it. """ slist[-1] = 60 if slist[-1] >= 30 else 0 for i in range(len(slist)-1, 1, -1): if slist[i] == 60: slist[i] = 0 slist[i-1] += 1 return slist[:-1]
def function[_roundSlist, parameter[slist]]: constant[ Rounds a signed list over the last element and removes it. ] call[name[slist]][<ast.UnaryOp object at 0x7da1b11a2a70>] assign[=] <ast.IfExp object at 0x7da1b11a24d0> for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[slist]]] - constant[1]], constant[1], <ast.UnaryOp object at 0x7da1b11a1690>]]] begin[:] if compare[call[name[slist]][name[i]] equal[==] constant[60]] begin[:] call[name[slist]][name[i]] assign[=] constant[0] <ast.AugAssign object at 0x7da1b11a04c0> return[call[name[slist]][<ast.Slice object at 0x7da1b11a1c90>]]
keyword[def] identifier[_roundSlist] ( identifier[slist] ): literal[string] identifier[slist] [- literal[int] ]= literal[int] keyword[if] identifier[slist] [- literal[int] ]>= literal[int] keyword[else] literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[slist] )- literal[int] , literal[int] ,- literal[int] ): keyword[if] identifier[slist] [ identifier[i] ]== literal[int] : identifier[slist] [ identifier[i] ]= literal[int] identifier[slist] [ identifier[i] - literal[int] ]+= literal[int] keyword[return] identifier[slist] [:- literal[int] ]
def _roundSlist(slist): """ Rounds a signed list over the last element and removes it. """ slist[-1] = 60 if slist[-1] >= 30 else 0 for i in range(len(slist) - 1, 1, -1): if slist[i] == 60: slist[i] = 0 slist[i - 1] += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return slist[:-1]
def fn_int16(self, value): """ Return the value cast to an 16-bit signed integer (numpy array) or a Python int (single value) :param value: The number or array :return: The number or array as int/int8 """ if is_ndarray(value) or isinstance(value, (list, tuple)): return self._to_ndarray(value).astype(numpy.int16) else: return int(value)
def function[fn_int16, parameter[self, value]]: constant[ Return the value cast to an 16-bit signed integer (numpy array) or a Python int (single value) :param value: The number or array :return: The number or array as int/int8 ] if <ast.BoolOp object at 0x7da18f58da50> begin[:] return[call[call[name[self]._to_ndarray, parameter[name[value]]].astype, parameter[name[numpy].int16]]]
keyword[def] identifier[fn_int16] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[is_ndarray] ( identifier[value] ) keyword[or] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] )): keyword[return] identifier[self] . identifier[_to_ndarray] ( identifier[value] ). identifier[astype] ( identifier[numpy] . identifier[int16] ) keyword[else] : keyword[return] identifier[int] ( identifier[value] )
def fn_int16(self, value): """ Return the value cast to an 16-bit signed integer (numpy array) or a Python int (single value) :param value: The number or array :return: The number or array as int/int8 """ if is_ndarray(value) or isinstance(value, (list, tuple)): return self._to_ndarray(value).astype(numpy.int16) # depends on [control=['if'], data=[]] else: return int(value)
def p_var_decl_ini(p): """ var_decl : DIM idlist typedef EQ expr | CONST idlist typedef EQ expr """ p[0] = None if len(p[2]) != 1: syntax_error(p.lineno(1), "Initialized variables must be declared one by one.") return if p[5] is None: return if not is_static(p[5]): if isinstance(p[5], symbols.UNARY): p[5] = make_constexpr(p.lineno(4), p[5]) # Delayed constant evaluation if p[3].implicit: p[3] = symbols.TYPEREF(p[5].type_, p.lexer.lineno, implicit=True) value = make_typecast(p[3], p[5], p.lineno(4)) defval = value if is_static(p[5]) else None if p[1] == 'DIM': SYMBOL_TABLE.declare_variable(p[2][0][0], p[2][0][1], p[3], default_value=defval) else: SYMBOL_TABLE.declare_const(p[2][0][0], p[2][0][1], p[3], default_value=defval) if defval is None: # Okay do a delayed initialization p[0] = make_sentence('LET', SYMBOL_TABLE.access_var(p[2][0][0], p.lineno(1)), value)
def function[p_var_decl_ini, parameter[p]]: constant[ var_decl : DIM idlist typedef EQ expr | CONST idlist typedef EQ expr ] call[name[p]][constant[0]] assign[=] constant[None] if compare[call[name[len], parameter[call[name[p]][constant[2]]]] not_equal[!=] constant[1]] begin[:] call[name[syntax_error], parameter[call[name[p].lineno, parameter[constant[1]]], constant[Initialized variables must be declared one by one.]]] return[None] if compare[call[name[p]][constant[5]] is constant[None]] begin[:] return[None] if <ast.UnaryOp object at 0x7da1b06f9450> begin[:] if call[name[isinstance], parameter[call[name[p]][constant[5]], name[symbols].UNARY]] begin[:] call[name[p]][constant[5]] assign[=] call[name[make_constexpr], parameter[call[name[p].lineno, parameter[constant[4]]], call[name[p]][constant[5]]]] if call[name[p]][constant[3]].implicit begin[:] call[name[p]][constant[3]] assign[=] call[name[symbols].TYPEREF, parameter[call[name[p]][constant[5]].type_, name[p].lexer.lineno]] variable[value] assign[=] call[name[make_typecast], parameter[call[name[p]][constant[3]], call[name[p]][constant[5]], call[name[p].lineno, parameter[constant[4]]]]] variable[defval] assign[=] <ast.IfExp object at 0x7da1b06f8670> if compare[call[name[p]][constant[1]] equal[==] constant[DIM]] begin[:] call[name[SYMBOL_TABLE].declare_variable, parameter[call[call[call[name[p]][constant[2]]][constant[0]]][constant[0]], call[call[call[name[p]][constant[2]]][constant[0]]][constant[1]], call[name[p]][constant[3]]]] if compare[name[defval] is constant[None]] begin[:] call[name[p]][constant[0]] assign[=] call[name[make_sentence], parameter[constant[LET], call[name[SYMBOL_TABLE].access_var, parameter[call[call[call[name[p]][constant[2]]][constant[0]]][constant[0]], call[name[p].lineno, parameter[constant[1]]]]], name[value]]]
keyword[def] identifier[p_var_decl_ini] ( identifier[p] ): literal[string] identifier[p] [ literal[int] ]= keyword[None] keyword[if] identifier[len] ( identifier[p] [ literal[int] ])!= literal[int] : identifier[syntax_error] ( identifier[p] . identifier[lineno] ( literal[int] ), literal[string] ) keyword[return] keyword[if] identifier[p] [ literal[int] ] keyword[is] keyword[None] : keyword[return] keyword[if] keyword[not] identifier[is_static] ( identifier[p] [ literal[int] ]): keyword[if] identifier[isinstance] ( identifier[p] [ literal[int] ], identifier[symbols] . identifier[UNARY] ): identifier[p] [ literal[int] ]= identifier[make_constexpr] ( identifier[p] . identifier[lineno] ( literal[int] ), identifier[p] [ literal[int] ]) keyword[if] identifier[p] [ literal[int] ]. identifier[implicit] : identifier[p] [ literal[int] ]= identifier[symbols] . identifier[TYPEREF] ( identifier[p] [ literal[int] ]. identifier[type_] , identifier[p] . identifier[lexer] . identifier[lineno] , identifier[implicit] = keyword[True] ) identifier[value] = identifier[make_typecast] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] . identifier[lineno] ( literal[int] )) identifier[defval] = identifier[value] keyword[if] identifier[is_static] ( identifier[p] [ literal[int] ]) keyword[else] keyword[None] keyword[if] identifier[p] [ literal[int] ]== literal[string] : identifier[SYMBOL_TABLE] . identifier[declare_variable] ( identifier[p] [ literal[int] ][ literal[int] ][ literal[int] ], identifier[p] [ literal[int] ][ literal[int] ][ literal[int] ], identifier[p] [ literal[int] ], identifier[default_value] = identifier[defval] ) keyword[else] : identifier[SYMBOL_TABLE] . identifier[declare_const] ( identifier[p] [ literal[int] ][ literal[int] ][ literal[int] ], identifier[p] [ literal[int] ][ literal[int] ][ literal[int] ], identifier[p] [ literal[int] ], identifier[default_value] = identifier[defval] ) keyword[if] identifier[defval] keyword[is] keyword[None] : identifier[p] [ literal[int] ]= identifier[make_sentence] ( literal[string] , identifier[SYMBOL_TABLE] . identifier[access_var] ( identifier[p] [ literal[int] ][ literal[int] ][ literal[int] ], identifier[p] . identifier[lineno] ( literal[int] )), identifier[value] )
def p_var_decl_ini(p): """ var_decl : DIM idlist typedef EQ expr | CONST idlist typedef EQ expr """ p[0] = None if len(p[2]) != 1: syntax_error(p.lineno(1), 'Initialized variables must be declared one by one.') return # depends on [control=['if'], data=[]] if p[5] is None: return # depends on [control=['if'], data=[]] if not is_static(p[5]): if isinstance(p[5], symbols.UNARY): p[5] = make_constexpr(p.lineno(4), p[5]) # Delayed constant evaluation # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if p[3].implicit: p[3] = symbols.TYPEREF(p[5].type_, p.lexer.lineno, implicit=True) # depends on [control=['if'], data=[]] value = make_typecast(p[3], p[5], p.lineno(4)) defval = value if is_static(p[5]) else None if p[1] == 'DIM': SYMBOL_TABLE.declare_variable(p[2][0][0], p[2][0][1], p[3], default_value=defval) # depends on [control=['if'], data=[]] else: SYMBOL_TABLE.declare_const(p[2][0][0], p[2][0][1], p[3], default_value=defval) if defval is None: # Okay do a delayed initialization p[0] = make_sentence('LET', SYMBOL_TABLE.access_var(p[2][0][0], p.lineno(1)), value) # depends on [control=['if'], data=[]]
def get_attribute_data(attr_ids, node_ids, **kwargs): """ For a given attribute or set of attributes, return all the resources and resource scenarios in the network """ node_attrs = db.DBSession.query(ResourceAttr).\ options(joinedload_all('attr')).\ filter(ResourceAttr.node_id.in_(node_ids), ResourceAttr.attr_id.in_(attr_ids)).all() ra_ids = [] for ra in node_attrs: ra_ids.append(ra.id) resource_scenarios = db.DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all() for rs in resource_scenarios: if rs.dataset.hidden == 'Y': try: rs.dataset.check_read_permission(kwargs.get('user_id')) except: rs.dataset.value = None db.DBSession.expunge(rs) return node_attrs, resource_scenarios
def function[get_attribute_data, parameter[attr_ids, node_ids]]: constant[ For a given attribute or set of attributes, return all the resources and resource scenarios in the network ] variable[node_attrs] assign[=] call[call[call[call[name[db].DBSession.query, parameter[name[ResourceAttr]]].options, parameter[call[name[joinedload_all], parameter[constant[attr]]]]].filter, parameter[call[name[ResourceAttr].node_id.in_, parameter[name[node_ids]]], call[name[ResourceAttr].attr_id.in_, parameter[name[attr_ids]]]]].all, parameter[]] variable[ra_ids] assign[=] list[[]] for taget[name[ra]] in starred[name[node_attrs]] begin[:] call[name[ra_ids].append, parameter[name[ra].id]] variable[resource_scenarios] assign[=] call[call[call[call[call[call[name[db].DBSession.query, parameter[name[ResourceScenario]]].filter, parameter[call[name[ResourceScenario].resource_attr_id.in_, parameter[name[ra_ids]]]]].options, parameter[call[name[joinedload], parameter[constant[resourceattr]]]]].options, parameter[call[name[joinedload_all], parameter[constant[dataset.metadata]]]]].order_by, parameter[name[ResourceScenario].scenario_id]].all, parameter[]] for taget[name[rs]] in starred[name[resource_scenarios]] begin[:] if compare[name[rs].dataset.hidden equal[==] constant[Y]] begin[:] <ast.Try object at 0x7da20c795330> call[name[db].DBSession.expunge, parameter[name[rs]]] return[tuple[[<ast.Name object at 0x7da20c796680>, <ast.Name object at 0x7da20c796860>]]]
keyword[def] identifier[get_attribute_data] ( identifier[attr_ids] , identifier[node_ids] ,** identifier[kwargs] ): literal[string] identifier[node_attrs] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[ResourceAttr] ). identifier[options] ( identifier[joinedload_all] ( literal[string] )). identifier[filter] ( identifier[ResourceAttr] . identifier[node_id] . identifier[in_] ( identifier[node_ids] ), identifier[ResourceAttr] . identifier[attr_id] . identifier[in_] ( identifier[attr_ids] )). identifier[all] () identifier[ra_ids] =[] keyword[for] identifier[ra] keyword[in] identifier[node_attrs] : identifier[ra_ids] . identifier[append] ( identifier[ra] . identifier[id] ) identifier[resource_scenarios] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[ResourceScenario] ). identifier[filter] ( identifier[ResourceScenario] . identifier[resource_attr_id] . identifier[in_] ( identifier[ra_ids] )). identifier[options] ( identifier[joinedload] ( literal[string] )). identifier[options] ( identifier[joinedload_all] ( literal[string] )). identifier[order_by] ( identifier[ResourceScenario] . identifier[scenario_id] ). identifier[all] () keyword[for] identifier[rs] keyword[in] identifier[resource_scenarios] : keyword[if] identifier[rs] . identifier[dataset] . identifier[hidden] == literal[string] : keyword[try] : identifier[rs] . identifier[dataset] . identifier[check_read_permission] ( identifier[kwargs] . identifier[get] ( literal[string] )) keyword[except] : identifier[rs] . identifier[dataset] . identifier[value] = keyword[None] identifier[db] . identifier[DBSession] . identifier[expunge] ( identifier[rs] ) keyword[return] identifier[node_attrs] , identifier[resource_scenarios]
def get_attribute_data(attr_ids, node_ids, **kwargs): """ For a given attribute or set of attributes, return all the resources and resource scenarios in the network """ node_attrs = db.DBSession.query(ResourceAttr).options(joinedload_all('attr')).filter(ResourceAttr.node_id.in_(node_ids), ResourceAttr.attr_id.in_(attr_ids)).all() ra_ids = [] for ra in node_attrs: ra_ids.append(ra.id) # depends on [control=['for'], data=['ra']] resource_scenarios = db.DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all() for rs in resource_scenarios: if rs.dataset.hidden == 'Y': try: rs.dataset.check_read_permission(kwargs.get('user_id')) # depends on [control=['try'], data=[]] except: rs.dataset.value = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] db.DBSession.expunge(rs) # depends on [control=['for'], data=['rs']] return (node_attrs, resource_scenarios)
def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False): """ Register a signal handler for `signal.SIGHUP` that checks for modified files and only acts if at least one modified file is found. @type signal_or_callable: str, int or callable @param signal_or_callable: You can pass either a signal or a callable. The signal can be specified by name or number. If specifying by name, the 'SIG' portion is optional. For example, valid values for SIGINT include 'INT', 'SIGINT' and `signal.SIGINT`. Alternatively, you can pass a callable that will be called with the list of changed files. So the call signature should be `func(list)`. The return value of the callable is ignored. @type verbose: bool or callable @param verbose: Defaults to False. True indicates that a message should be printed. You can also pass a callable such as log.info. """ #noinspection PyUnusedLocal def handle_hup(signum, frame): changed = modified() if changed: if callable(signal_or_callable): func = signal_or_callable args = (changed,) op = 'Calling' try: name = signal_or_callable.__name__ except Exception: name = str(signal_or_callable) else: if isinstance(signal_or_callable, int): name = str(signal_or_callable) signum = signal_or_callable if verbose: for item in dir(signal): if item.startswith('SIG') and getattr(signal, item) == signal_or_callable: name = item break else: name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable signum = getattr(signal, name) func = os.kill args = (os.getpid(), signum) op = 'Sending' if verbose: more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else '' message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more) if callable(verbose): #noinspection PyCallingNonCallable verbose(message) else: print(message) func(*args) files() signal.signal(signal.SIGHUP, handle_hup) signal.siginterrupt(signal.SIGHUP, False)
def function[hup_hook, parameter[signal_or_callable, verbose]]: constant[ Register a signal handler for `signal.SIGHUP` that checks for modified files and only acts if at least one modified file is found. @type signal_or_callable: str, int or callable @param signal_or_callable: You can pass either a signal or a callable. The signal can be specified by name or number. If specifying by name, the 'SIG' portion is optional. For example, valid values for SIGINT include 'INT', 'SIGINT' and `signal.SIGINT`. Alternatively, you can pass a callable that will be called with the list of changed files. So the call signature should be `func(list)`. The return value of the callable is ignored. @type verbose: bool or callable @param verbose: Defaults to False. True indicates that a message should be printed. You can also pass a callable such as log.info. ] def function[handle_hup, parameter[signum, frame]]: variable[changed] assign[=] call[name[modified], parameter[]] if name[changed] begin[:] if call[name[callable], parameter[name[signal_or_callable]]] begin[:] variable[func] assign[=] name[signal_or_callable] variable[args] assign[=] tuple[[<ast.Name object at 0x7da204344430>]] variable[op] assign[=] constant[Calling] <ast.Try object at 0x7da204347370> if name[verbose] begin[:] variable[more] assign[=] <ast.IfExp object at 0x7da18f09d780> variable[message] assign[=] call[constant[{0} {1} because {2}{3} changed].format, parameter[name[op], name[name], call[name[changed]][constant[0]], name[more]]] if call[name[callable], parameter[name[verbose]]] begin[:] call[name[verbose], parameter[name[message]]] call[name[func], parameter[<ast.Starred object at 0x7da18f09f0a0>]] call[name[files], parameter[]] call[name[signal].signal, parameter[name[signal].SIGHUP, name[handle_hup]]] call[name[signal].siginterrupt, parameter[name[signal].SIGHUP, constant[False]]]
keyword[def] identifier[hup_hook] ( identifier[signal_or_callable] = identifier[signal] . identifier[SIGTERM] , identifier[verbose] = keyword[False] ): literal[string] keyword[def] identifier[handle_hup] ( identifier[signum] , identifier[frame] ): identifier[changed] = identifier[modified] () keyword[if] identifier[changed] : keyword[if] identifier[callable] ( identifier[signal_or_callable] ): identifier[func] = identifier[signal_or_callable] identifier[args] =( identifier[changed] ,) identifier[op] = literal[string] keyword[try] : identifier[name] = identifier[signal_or_callable] . identifier[__name__] keyword[except] identifier[Exception] : identifier[name] = identifier[str] ( identifier[signal_or_callable] ) keyword[else] : keyword[if] identifier[isinstance] ( identifier[signal_or_callable] , identifier[int] ): identifier[name] = identifier[str] ( identifier[signal_or_callable] ) identifier[signum] = identifier[signal_or_callable] keyword[if] identifier[verbose] : keyword[for] identifier[item] keyword[in] identifier[dir] ( identifier[signal] ): keyword[if] identifier[item] . identifier[startswith] ( literal[string] ) keyword[and] identifier[getattr] ( identifier[signal] , identifier[item] )== identifier[signal_or_callable] : identifier[name] = identifier[item] keyword[break] keyword[else] : identifier[name] = identifier[signal_or_callable] keyword[if] identifier[signal_or_callable] . identifier[startswith] ( literal[string] ) keyword[else] literal[string] + identifier[signal_or_callable] identifier[signum] = identifier[getattr] ( identifier[signal] , identifier[name] ) identifier[func] = identifier[os] . identifier[kill] identifier[args] =( identifier[os] . identifier[getpid] (), identifier[signum] ) identifier[op] = literal[string] keyword[if] identifier[verbose] : identifier[more] = literal[string] . identifier[format] ( identifier[len] ( identifier[changed] )) keyword[if] identifier[len] ( identifier[changed] )> literal[int] keyword[else] literal[string] identifier[message] = literal[string] . identifier[format] ( identifier[op] , identifier[name] , identifier[changed] [ literal[int] ], identifier[more] ) keyword[if] identifier[callable] ( identifier[verbose] ): identifier[verbose] ( identifier[message] ) keyword[else] : identifier[print] ( identifier[message] ) identifier[func] (* identifier[args] ) identifier[files] () identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGHUP] , identifier[handle_hup] ) identifier[signal] . identifier[siginterrupt] ( identifier[signal] . identifier[SIGHUP] , keyword[False] )
def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False): """ Register a signal handler for `signal.SIGHUP` that checks for modified files and only acts if at least one modified file is found. @type signal_or_callable: str, int or callable @param signal_or_callable: You can pass either a signal or a callable. The signal can be specified by name or number. If specifying by name, the 'SIG' portion is optional. For example, valid values for SIGINT include 'INT', 'SIGINT' and `signal.SIGINT`. Alternatively, you can pass a callable that will be called with the list of changed files. So the call signature should be `func(list)`. The return value of the callable is ignored. @type verbose: bool or callable @param verbose: Defaults to False. True indicates that a message should be printed. You can also pass a callable such as log.info. """ #noinspection PyUnusedLocal def handle_hup(signum, frame): changed = modified() if changed: if callable(signal_or_callable): func = signal_or_callable args = (changed,) op = 'Calling' try: name = signal_or_callable.__name__ # depends on [control=['try'], data=[]] except Exception: name = str(signal_or_callable) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: if isinstance(signal_or_callable, int): name = str(signal_or_callable) signum = signal_or_callable if verbose: for item in dir(signal): if item.startswith('SIG') and getattr(signal, item) == signal_or_callable: name = item break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable signum = getattr(signal, name) func = os.kill args = (os.getpid(), signum) op = 'Sending' if verbose: more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else '' message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more) if callable(verbose): #noinspection PyCallingNonCallable verbose(message) # depends on [control=['if'], data=[]] else: print(message) # depends on [control=['if'], data=[]] func(*args) # depends on [control=['if'], data=[]] files() signal.signal(signal.SIGHUP, handle_hup) signal.siginterrupt(signal.SIGHUP, False)
def getMessage(self): """Returns a colorized log message based on the log level. If the platform is windows the original message will be returned without colorization windows escape codes are crazy. :returns: ``str`` """ msg = str(self.msg) if self.args: msg = msg % self.args if platform.system().lower() == 'windows' or self.levelno < 10: return msg elif self.levelno >= 50: return utils.return_colorized(msg, 'critical') elif self.levelno >= 40: return utils.return_colorized(msg, 'error') elif self.levelno >= 30: return utils.return_colorized(msg, 'warn') elif self.levelno >= 20: return utils.return_colorized(msg, 'info') else: return utils.return_colorized(msg, 'debug')
def function[getMessage, parameter[self]]: constant[Returns a colorized log message based on the log level. If the platform is windows the original message will be returned without colorization windows escape codes are crazy. :returns: ``str`` ] variable[msg] assign[=] call[name[str], parameter[name[self].msg]] if name[self].args begin[:] variable[msg] assign[=] binary_operation[name[msg] <ast.Mod object at 0x7da2590d6920> name[self].args] if <ast.BoolOp object at 0x7da18c4cd180> begin[:] return[name[msg]]
keyword[def] identifier[getMessage] ( identifier[self] ): literal[string] identifier[msg] = identifier[str] ( identifier[self] . identifier[msg] ) keyword[if] identifier[self] . identifier[args] : identifier[msg] = identifier[msg] % identifier[self] . identifier[args] keyword[if] identifier[platform] . identifier[system] (). identifier[lower] ()== literal[string] keyword[or] identifier[self] . identifier[levelno] < literal[int] : keyword[return] identifier[msg] keyword[elif] identifier[self] . identifier[levelno] >= literal[int] : keyword[return] identifier[utils] . identifier[return_colorized] ( identifier[msg] , literal[string] ) keyword[elif] identifier[self] . identifier[levelno] >= literal[int] : keyword[return] identifier[utils] . identifier[return_colorized] ( identifier[msg] , literal[string] ) keyword[elif] identifier[self] . identifier[levelno] >= literal[int] : keyword[return] identifier[utils] . identifier[return_colorized] ( identifier[msg] , literal[string] ) keyword[elif] identifier[self] . identifier[levelno] >= literal[int] : keyword[return] identifier[utils] . identifier[return_colorized] ( identifier[msg] , literal[string] ) keyword[else] : keyword[return] identifier[utils] . identifier[return_colorized] ( identifier[msg] , literal[string] )
def getMessage(self): """Returns a colorized log message based on the log level. If the platform is windows the original message will be returned without colorization windows escape codes are crazy. :returns: ``str`` """ msg = str(self.msg) if self.args: msg = msg % self.args # depends on [control=['if'], data=[]] if platform.system().lower() == 'windows' or self.levelno < 10: return msg # depends on [control=['if'], data=[]] elif self.levelno >= 50: return utils.return_colorized(msg, 'critical') # depends on [control=['if'], data=[]] elif self.levelno >= 40: return utils.return_colorized(msg, 'error') # depends on [control=['if'], data=[]] elif self.levelno >= 30: return utils.return_colorized(msg, 'warn') # depends on [control=['if'], data=[]] elif self.levelno >= 20: return utils.return_colorized(msg, 'info') # depends on [control=['if'], data=[]] else: return utils.return_colorized(msg, 'debug')
def _read_field(self): ''' Read a single byte for field type, then read the value. ''' ftype = self._input[self._pos] self._pos += 1 reader = self.field_type_map.get(ftype) if reader: return reader(self) raise Reader.FieldError('Unknown field type %s', ftype)
def function[_read_field, parameter[self]]: constant[ Read a single byte for field type, then read the value. ] variable[ftype] assign[=] call[name[self]._input][name[self]._pos] <ast.AugAssign object at 0x7da18f09d030> variable[reader] assign[=] call[name[self].field_type_map.get, parameter[name[ftype]]] if name[reader] begin[:] return[call[name[reader], parameter[name[self]]]] <ast.Raise object at 0x7da18f09eb90>
keyword[def] identifier[_read_field] ( identifier[self] ): literal[string] identifier[ftype] = identifier[self] . identifier[_input] [ identifier[self] . identifier[_pos] ] identifier[self] . identifier[_pos] += literal[int] identifier[reader] = identifier[self] . identifier[field_type_map] . identifier[get] ( identifier[ftype] ) keyword[if] identifier[reader] : keyword[return] identifier[reader] ( identifier[self] ) keyword[raise] identifier[Reader] . identifier[FieldError] ( literal[string] , identifier[ftype] )
def _read_field(self): """ Read a single byte for field type, then read the value. """ ftype = self._input[self._pos] self._pos += 1 reader = self.field_type_map.get(ftype) if reader: return reader(self) # depends on [control=['if'], data=[]] raise Reader.FieldError('Unknown field type %s', ftype)
def filter_styles(style, group, other_groups, blacklist=[]): """ Filters styles which are specific to a particular artist, e.g. for a GraphPlot this will filter options specific to the nodes and edges. Arguments --------- style: dict Dictionary of styles and values group: str Group within the styles to filter for other_groups: list Other groups to filter out blacklist: list (optional) List of options to filter out Returns ------- filtered: dict Filtered dictionary of styles """ group = group+'_' filtered = {} for k, v in style.items(): if (any(k.startswith(p) for p in other_groups) or k.startswith(group) or k in blacklist): continue filtered[k] = v for k, v in style.items(): if not k.startswith(group) or k in blacklist: continue filtered[k[len(group):]] = v return filtered
def function[filter_styles, parameter[style, group, other_groups, blacklist]]: constant[ Filters styles which are specific to a particular artist, e.g. for a GraphPlot this will filter options specific to the nodes and edges. Arguments --------- style: dict Dictionary of styles and values group: str Group within the styles to filter for other_groups: list Other groups to filter out blacklist: list (optional) List of options to filter out Returns ------- filtered: dict Filtered dictionary of styles ] variable[group] assign[=] binary_operation[name[group] + constant[_]] variable[filtered] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da20cabd930>, <ast.Name object at 0x7da20cabdc30>]]] in starred[call[name[style].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da20cabce80> begin[:] continue call[name[filtered]][name[k]] assign[=] name[v] for taget[tuple[[<ast.Name object at 0x7da18f09d360>, <ast.Name object at 0x7da18f09cd00>]]] in starred[call[name[style].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da18f09e2c0> begin[:] continue call[name[filtered]][call[name[k]][<ast.Slice object at 0x7da18f00f0d0>]] assign[=] name[v] return[name[filtered]]
keyword[def] identifier[filter_styles] ( identifier[style] , identifier[group] , identifier[other_groups] , identifier[blacklist] =[]): literal[string] identifier[group] = identifier[group] + literal[string] identifier[filtered] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[style] . identifier[items] (): keyword[if] ( identifier[any] ( identifier[k] . identifier[startswith] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[other_groups] ) keyword[or] identifier[k] . identifier[startswith] ( identifier[group] ) keyword[or] identifier[k] keyword[in] identifier[blacklist] ): keyword[continue] identifier[filtered] [ identifier[k] ]= identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[style] . identifier[items] (): keyword[if] keyword[not] identifier[k] . identifier[startswith] ( identifier[group] ) keyword[or] identifier[k] keyword[in] identifier[blacklist] : keyword[continue] identifier[filtered] [ identifier[k] [ identifier[len] ( identifier[group] ):]]= identifier[v] keyword[return] identifier[filtered]
def filter_styles(style, group, other_groups, blacklist=[]): """ Filters styles which are specific to a particular artist, e.g. for a GraphPlot this will filter options specific to the nodes and edges. Arguments --------- style: dict Dictionary of styles and values group: str Group within the styles to filter for other_groups: list Other groups to filter out blacklist: list (optional) List of options to filter out Returns ------- filtered: dict Filtered dictionary of styles """ group = group + '_' filtered = {} for (k, v) in style.items(): if any((k.startswith(p) for p in other_groups)) or k.startswith(group) or k in blacklist: continue # depends on [control=['if'], data=[]] filtered[k] = v # depends on [control=['for'], data=[]] for (k, v) in style.items(): if not k.startswith(group) or k in blacklist: continue # depends on [control=['if'], data=[]] filtered[k[len(group):]] = v # depends on [control=['for'], data=[]] return filtered
def task_pause_info(task_id): """ Executor for `globus task pause-info` """ client = get_client() res = client.task_pause_info(task_id) def _custom_text_format(res): explicit_pauses = [ field for field in EXPLICIT_PAUSE_MSG_FIELDS # n.b. some keys are absent for completed tasks if res.get(field[1]) ] effective_pause_rules = res["pause_rules"] if not explicit_pauses and not effective_pause_rules: safeprint("Task {} is not paused.".format(task_id)) click.get_current_context().exit(0) if explicit_pauses: formatted_print( res, fields=explicit_pauses, text_format=FORMAT_TEXT_RECORD, text_preamble="This task has been explicitly paused.\n", text_epilog="\n" if effective_pause_rules else None, ) if effective_pause_rules: formatted_print( effective_pause_rules, fields=PAUSE_RULE_DISPLAY_FIELDS, text_preamble=( "The following pause rules are effective on this task:\n" ), ) formatted_print(res, text_format=_custom_text_format)
def function[task_pause_info, parameter[task_id]]: constant[ Executor for `globus task pause-info` ] variable[client] assign[=] call[name[get_client], parameter[]] variable[res] assign[=] call[name[client].task_pause_info, parameter[name[task_id]]] def function[_custom_text_format, parameter[res]]: variable[explicit_pauses] assign[=] <ast.ListComp object at 0x7da1b03db940> variable[effective_pause_rules] assign[=] call[name[res]][constant[pause_rules]] if <ast.BoolOp object at 0x7da1b03dab30> begin[:] call[name[safeprint], parameter[call[constant[Task {} is not paused.].format, parameter[name[task_id]]]]] call[call[name[click].get_current_context, parameter[]].exit, parameter[constant[0]]] if name[explicit_pauses] begin[:] call[name[formatted_print], parameter[name[res]]] if name[effective_pause_rules] begin[:] call[name[formatted_print], parameter[name[effective_pause_rules]]] call[name[formatted_print], parameter[name[res]]]
keyword[def] identifier[task_pause_info] ( identifier[task_id] ): literal[string] identifier[client] = identifier[get_client] () identifier[res] = identifier[client] . identifier[task_pause_info] ( identifier[task_id] ) keyword[def] identifier[_custom_text_format] ( identifier[res] ): identifier[explicit_pauses] =[ identifier[field] keyword[for] identifier[field] keyword[in] identifier[EXPLICIT_PAUSE_MSG_FIELDS] keyword[if] identifier[res] . identifier[get] ( identifier[field] [ literal[int] ]) ] identifier[effective_pause_rules] = identifier[res] [ literal[string] ] keyword[if] keyword[not] identifier[explicit_pauses] keyword[and] keyword[not] identifier[effective_pause_rules] : identifier[safeprint] ( literal[string] . identifier[format] ( identifier[task_id] )) identifier[click] . identifier[get_current_context] (). identifier[exit] ( literal[int] ) keyword[if] identifier[explicit_pauses] : identifier[formatted_print] ( identifier[res] , identifier[fields] = identifier[explicit_pauses] , identifier[text_format] = identifier[FORMAT_TEXT_RECORD] , identifier[text_preamble] = literal[string] , identifier[text_epilog] = literal[string] keyword[if] identifier[effective_pause_rules] keyword[else] keyword[None] , ) keyword[if] identifier[effective_pause_rules] : identifier[formatted_print] ( identifier[effective_pause_rules] , identifier[fields] = identifier[PAUSE_RULE_DISPLAY_FIELDS] , identifier[text_preamble] =( literal[string] ), ) identifier[formatted_print] ( identifier[res] , identifier[text_format] = identifier[_custom_text_format] )
def task_pause_info(task_id): """ Executor for `globus task pause-info` """ client = get_client() res = client.task_pause_info(task_id) def _custom_text_format(res): # n.b. some keys are absent for completed tasks explicit_pauses = [field for field in EXPLICIT_PAUSE_MSG_FIELDS if res.get(field[1])] effective_pause_rules = res['pause_rules'] if not explicit_pauses and (not effective_pause_rules): safeprint('Task {} is not paused.'.format(task_id)) click.get_current_context().exit(0) # depends on [control=['if'], data=[]] if explicit_pauses: formatted_print(res, fields=explicit_pauses, text_format=FORMAT_TEXT_RECORD, text_preamble='This task has been explicitly paused.\n', text_epilog='\n' if effective_pause_rules else None) # depends on [control=['if'], data=[]] if effective_pause_rules: formatted_print(effective_pause_rules, fields=PAUSE_RULE_DISPLAY_FIELDS, text_preamble='The following pause rules are effective on this task:\n') # depends on [control=['if'], data=[]] formatted_print(res, text_format=_custom_text_format)
def StartHuntFlowOnClient(client_id, hunt_id): """Starts a flow corresponding to a given hunt on a given client.""" hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) hunt_obj = CompleteHuntIfExpirationTimeReached(hunt_obj) # There may be a little race between foreman rules being removed and # foreman scheduling a client on an (already) paused hunt. Making sure # we don't lose clients in such a race by accepting clients for paused # hunts. if not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(hunt_obj.hunt_state): return if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD: hunt_args = hunt_obj.args.standard if hunt_obj.client_rate > 0: # Given that we use caching in _GetNumClients and hunt_obj may be updated # in another process, we have to account for cases where num_clients_diff # may go below 0. num_clients_diff = max( 0, _GetNumClients(hunt_obj.hunt_id) - hunt_obj.num_clients_at_start_time) next_client_due_msecs = int( num_clients_diff / hunt_obj.client_rate * 60e6) start_at = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch( hunt_obj.last_start_time.AsMicrosecondsSinceEpoch() + next_client_due_msecs) else: start_at = None # TODO(user): remove client_rate support when AFF4 is gone. # In REL_DB always work as if client rate is 0. flow_cls = registry.FlowRegistry.FlowClassByName(hunt_args.flow_name) flow_args = hunt_args.flow_args if hunt_args.HasField("flow_args") else None flow.StartFlow( client_id=client_id, creator=hunt_obj.creator, cpu_limit=hunt_obj.per_client_cpu_limit, network_bytes_limit=hunt_obj.per_client_network_bytes_limit, flow_cls=flow_cls, flow_args=flow_args, start_at=start_at, parent_hunt_id=hunt_id) if hunt_obj.client_limit: if _GetNumClients(hunt_obj.hunt_id) >= hunt_obj.client_limit: PauseHunt(hunt_id) elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE: raise NotImplementedError() else: raise UnknownHuntTypeError("Can't determine hunt type when starting " "hunt %s on client %s." % (client_id, hunt_id))
def function[StartHuntFlowOnClient, parameter[client_id, hunt_id]]: constant[Starts a flow corresponding to a given hunt on a given client.] variable[hunt_obj] assign[=] call[name[data_store].REL_DB.ReadHuntObject, parameter[name[hunt_id]]] variable[hunt_obj] assign[=] call[name[CompleteHuntIfExpirationTimeReached], parameter[name[hunt_obj]]] if <ast.UnaryOp object at 0x7da1b1cc29b0> begin[:] return[None] if compare[name[hunt_obj].args.hunt_type equal[==] name[hunt_obj].args.HuntType.STANDARD] begin[:] variable[hunt_args] assign[=] name[hunt_obj].args.standard if compare[name[hunt_obj].client_rate greater[>] constant[0]] begin[:] variable[num_clients_diff] assign[=] call[name[max], parameter[constant[0], binary_operation[call[name[_GetNumClients], parameter[name[hunt_obj].hunt_id]] - name[hunt_obj].num_clients_at_start_time]]] variable[next_client_due_msecs] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[num_clients_diff] / name[hunt_obj].client_rate] * constant[60000000.0]]]] variable[start_at] assign[=] call[name[rdfvalue].RDFDatetime.FromMicrosecondsSinceEpoch, parameter[binary_operation[call[name[hunt_obj].last_start_time.AsMicrosecondsSinceEpoch, parameter[]] + name[next_client_due_msecs]]]] variable[flow_cls] assign[=] call[name[registry].FlowRegistry.FlowClassByName, parameter[name[hunt_args].flow_name]] variable[flow_args] assign[=] <ast.IfExp object at 0x7da1b1cc0310> call[name[flow].StartFlow, parameter[]] if name[hunt_obj].client_limit begin[:] if compare[call[name[_GetNumClients], parameter[name[hunt_obj].hunt_id]] greater_or_equal[>=] name[hunt_obj].client_limit] begin[:] call[name[PauseHunt], parameter[name[hunt_id]]]
keyword[def] identifier[StartHuntFlowOnClient] ( identifier[client_id] , identifier[hunt_id] ): literal[string] identifier[hunt_obj] = identifier[data_store] . identifier[REL_DB] . identifier[ReadHuntObject] ( identifier[hunt_id] ) identifier[hunt_obj] = identifier[CompleteHuntIfExpirationTimeReached] ( identifier[hunt_obj] ) keyword[if] keyword[not] identifier[rdf_hunt_objects] . identifier[IsHuntSuitableForFlowProcessing] ( identifier[hunt_obj] . identifier[hunt_state] ): keyword[return] keyword[if] identifier[hunt_obj] . identifier[args] . identifier[hunt_type] == identifier[hunt_obj] . identifier[args] . identifier[HuntType] . identifier[STANDARD] : identifier[hunt_args] = identifier[hunt_obj] . identifier[args] . identifier[standard] keyword[if] identifier[hunt_obj] . identifier[client_rate] > literal[int] : identifier[num_clients_diff] = identifier[max] ( literal[int] , identifier[_GetNumClients] ( identifier[hunt_obj] . identifier[hunt_id] )- identifier[hunt_obj] . identifier[num_clients_at_start_time] ) identifier[next_client_due_msecs] = identifier[int] ( identifier[num_clients_diff] / identifier[hunt_obj] . identifier[client_rate] * literal[int] ) identifier[start_at] = identifier[rdfvalue] . identifier[RDFDatetime] . identifier[FromMicrosecondsSinceEpoch] ( identifier[hunt_obj] . identifier[last_start_time] . identifier[AsMicrosecondsSinceEpoch] ()+ identifier[next_client_due_msecs] ) keyword[else] : identifier[start_at] = keyword[None] identifier[flow_cls] = identifier[registry] . identifier[FlowRegistry] . identifier[FlowClassByName] ( identifier[hunt_args] . identifier[flow_name] ) identifier[flow_args] = identifier[hunt_args] . identifier[flow_args] keyword[if] identifier[hunt_args] . identifier[HasField] ( literal[string] ) keyword[else] keyword[None] identifier[flow] . identifier[StartFlow] ( identifier[client_id] = identifier[client_id] , identifier[creator] = identifier[hunt_obj] . identifier[creator] , identifier[cpu_limit] = identifier[hunt_obj] . identifier[per_client_cpu_limit] , identifier[network_bytes_limit] = identifier[hunt_obj] . identifier[per_client_network_bytes_limit] , identifier[flow_cls] = identifier[flow_cls] , identifier[flow_args] = identifier[flow_args] , identifier[start_at] = identifier[start_at] , identifier[parent_hunt_id] = identifier[hunt_id] ) keyword[if] identifier[hunt_obj] . identifier[client_limit] : keyword[if] identifier[_GetNumClients] ( identifier[hunt_obj] . identifier[hunt_id] )>= identifier[hunt_obj] . identifier[client_limit] : identifier[PauseHunt] ( identifier[hunt_id] ) keyword[elif] identifier[hunt_obj] . identifier[args] . identifier[hunt_type] == identifier[hunt_obj] . identifier[args] . identifier[HuntType] . identifier[VARIABLE] : keyword[raise] identifier[NotImplementedError] () keyword[else] : keyword[raise] identifier[UnknownHuntTypeError] ( literal[string] literal[string] %( identifier[client_id] , identifier[hunt_id] ))
def StartHuntFlowOnClient(client_id, hunt_id): """Starts a flow corresponding to a given hunt on a given client.""" hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) hunt_obj = CompleteHuntIfExpirationTimeReached(hunt_obj) # There may be a little race between foreman rules being removed and # foreman scheduling a client on an (already) paused hunt. Making sure # we don't lose clients in such a race by accepting clients for paused # hunts. if not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(hunt_obj.hunt_state): return # depends on [control=['if'], data=[]] if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD: hunt_args = hunt_obj.args.standard if hunt_obj.client_rate > 0: # Given that we use caching in _GetNumClients and hunt_obj may be updated # in another process, we have to account for cases where num_clients_diff # may go below 0. num_clients_diff = max(0, _GetNumClients(hunt_obj.hunt_id) - hunt_obj.num_clients_at_start_time) next_client_due_msecs = int(num_clients_diff / hunt_obj.client_rate * 60000000.0) start_at = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(hunt_obj.last_start_time.AsMicrosecondsSinceEpoch() + next_client_due_msecs) # depends on [control=['if'], data=[]] else: start_at = None # TODO(user): remove client_rate support when AFF4 is gone. # In REL_DB always work as if client rate is 0. flow_cls = registry.FlowRegistry.FlowClassByName(hunt_args.flow_name) flow_args = hunt_args.flow_args if hunt_args.HasField('flow_args') else None flow.StartFlow(client_id=client_id, creator=hunt_obj.creator, cpu_limit=hunt_obj.per_client_cpu_limit, network_bytes_limit=hunt_obj.per_client_network_bytes_limit, flow_cls=flow_cls, flow_args=flow_args, start_at=start_at, parent_hunt_id=hunt_id) if hunt_obj.client_limit: if _GetNumClients(hunt_obj.hunt_id) >= hunt_obj.client_limit: PauseHunt(hunt_id) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE: raise NotImplementedError() # depends on [control=['if'], data=[]] else: raise UnknownHuntTypeError("Can't determine hunt type when starting hunt %s on client %s." % (client_id, hunt_id))
def create_socket(self): """Create a socket for the daemon, depending on the directory location. Args: config_dir (str): The absolute path to the config directory used by the daemon. Returns: socket.socket: The daemon socket. Clients connect to this socket. """ socket_path = os.path.join(self.config_dir, 'pueue.sock') # Create Socket and exit with 1, if socket can't be created try: if os.path.exists(socket_path): os.remove(socket_path) self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(socket_path) self.socket.setblocking(0) self.socket.listen(0) # Set file permissions os.chmod(socket_path, stat.S_IRWXU) except Exception: self.logger.error("Daemon couldn't socket. Aborting") self.logger.exception() sys.exit(1) return self.socket
def function[create_socket, parameter[self]]: constant[Create a socket for the daemon, depending on the directory location. Args: config_dir (str): The absolute path to the config directory used by the daemon. Returns: socket.socket: The daemon socket. Clients connect to this socket. ] variable[socket_path] assign[=] call[name[os].path.join, parameter[name[self].config_dir, constant[pueue.sock]]] <ast.Try object at 0x7da1b0e7c160> return[name[self].socket]
keyword[def] identifier[create_socket] ( identifier[self] ): literal[string] identifier[socket_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[config_dir] , literal[string] ) keyword[try] : keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[socket_path] ): identifier[os] . identifier[remove] ( identifier[socket_path] ) identifier[self] . identifier[socket] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_UNIX] , identifier[socket] . identifier[SOCK_STREAM] ) identifier[self] . identifier[socket] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[socket] . identifier[SO_REUSEADDR] , literal[int] ) identifier[self] . identifier[socket] . identifier[bind] ( identifier[socket_path] ) identifier[self] . identifier[socket] . identifier[setblocking] ( literal[int] ) identifier[self] . identifier[socket] . identifier[listen] ( literal[int] ) identifier[os] . identifier[chmod] ( identifier[socket_path] , identifier[stat] . identifier[S_IRWXU] ) keyword[except] identifier[Exception] : identifier[self] . identifier[logger] . identifier[error] ( literal[string] ) identifier[self] . identifier[logger] . identifier[exception] () identifier[sys] . identifier[exit] ( literal[int] ) keyword[return] identifier[self] . identifier[socket]
def create_socket(self): """Create a socket for the daemon, depending on the directory location. Args: config_dir (str): The absolute path to the config directory used by the daemon. Returns: socket.socket: The daemon socket. Clients connect to this socket. """ socket_path = os.path.join(self.config_dir, 'pueue.sock') # Create Socket and exit with 1, if socket can't be created try: if os.path.exists(socket_path): os.remove(socket_path) # depends on [control=['if'], data=[]] self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(socket_path) self.socket.setblocking(0) self.socket.listen(0) # Set file permissions os.chmod(socket_path, stat.S_IRWXU) # depends on [control=['try'], data=[]] except Exception: self.logger.error("Daemon couldn't socket. Aborting") self.logger.exception() sys.exit(1) # depends on [control=['except'], data=[]] return self.socket
def stat_article_detail_list(self, page=1, start_date=str(date.today()+timedelta(days=-30)), end_date=str(date.today())): """ 获取图文分析数据 返回JSON示例 :: { "hasMore": true, // 说明是否可以增加 page 页码来获取数据 "data": [ { "index": [ "20,816", // 送达人数 "1,944", // 图文页阅读人数 "2,554", // 图文页阅读次数 "9.34%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "47", // 分享转发人数 "61", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-21", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205104027_1\",\"Title\":\"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5feb\\u6765\\u5e26\\u6211\\u56de\\u5bb6\",\"RefDate\":\"20150121\",\"TargetUser\":\"20,816\",\"IntPageReadUser\":\"1,944\",\"IntPageReadCount\":\"2,554\",\"OriPageReadUser\":\"0\",\"OriPageReadCount\":\"0\",\"ShareUser\":\"47\",\"ShareCount\":\"61\",\"AddToFavUser\":\"1\",\"Conversion\":\"0%\",\"PageConversion\":\"9.34%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205104027_1", "title": "回家大作战 | 快来带我回家" }, { "index": [ "20,786", // 送达人数 "2,598", // 图文页阅读人数 "3,368", // 图文页阅读次数 "12.5%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "73", // 分享转发人数 "98", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-20", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205066833_1\",\"Title\":\"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5982\\u4f55\\u4f18\\u96c5\\u5730\\u53bb\\u5f80\\u8f66\\u7ad9\\u548c\\u673a\\u573a\",\"RefDate\":\"20150120\",\"TargetUser\":\"20,786\",\"IntPageReadUser\":\"2,598\",\"IntPageReadCount\":\"3,368\",\"OriPageReadUser\":\"0\",\"OriPageReadCount\":\"0\",\"ShareUser\":\"73\",\"ShareCount\":\"98\",\"AddToFavUser\":\"1\",\"Conversion\":\"0%\",\"PageConversion\":\"12.5%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205066833_1", "title": "回家大作战 | 如何优雅地去往车站和机场" }, { "index": [ "20,745", // 送达人数 "1,355", // 图文页阅读人数 "1,839", // 图文页阅读次数 "6.53%", // (图文页阅读人数 / 送达人数) "145", // 原文页阅读人数 "184", // 原文页阅读次数 "10.7%", // (原文页阅读人数 / 图文页阅读人数) "48", // 分享转发人数 "64", // 分享转发次数 "5" // 微信收藏人数 ], "time": "2015-01-19", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205028693_1\",\"Title\":\"\\u5145\\u7535\\u65f6\\u95f4 | \\u542c\\u542c\\u7535\\u53f0\\uff0c\\u4f18\\u96c5\\u5730\\u63d0\\u5347\\u5b66\\u4e60\\u6548\\u7387\",\"RefDate\":\"20150119\",\"TargetUser\":\"20,745\",\"IntPageReadUser\":\"1,355\",\"IntPageReadCount\":\"1,839\",\"OriPageReadUser\":\"145\",\"OriPageReadCount\":\"184\",\"ShareUser\":\"48\",\"ShareCount\":\"64\",\"AddToFavUser\":\"5\",\"Conversion\":\"10.7%\",\"PageConversion\":\"6.53%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205028693_1", "title": "充电时间 | 听听电台,优雅地提升学习效率" } ] } :param page: 页码 (由于腾讯接口限制,page 从 1 开始,3 条数据为 1 页) :param start_date: 开始时间,默认是今天-30天 (类型: str 格式示例: "2015-01-15") :param end_date: 结束时间,默认是今天 (类型: str 格式示例: "2015-02-01") :return: 返回的 JSON 数据,具体的各项内容解释参见上面的 JSON 返回示例 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ self._init_plugin_token_appid() url = 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format( page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format( page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) if not re.search(r'wechat_token', self.__cookies): for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';' try: data = json.loads(r.text) if data.get('is_session_expire'): raise NeedLoginError(r.text) message = json.dumps(data, ensure_ascii=False) except (KeyError, ValueError): raise NeedLoginError(r.text) return message
def function[stat_article_detail_list, parameter[self, page, start_date, end_date]]: constant[ 获取图文分析数据 返回JSON示例 :: { "hasMore": true, // 说明是否可以增加 page 页码来获取数据 "data": [ { "index": [ "20,816", // 送达人数 "1,944", // 图文页阅读人数 "2,554", // 图文页阅读次数 "9.34%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "47", // 分享转发人数 "61", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-21", "table_data": "{"fields":{"TargetUser":{"thText":"\u9001\u8fbe\u4eba\u6570","number":false,"colAlign":"center","needOrder":false,"precision":0},"IntPageReadUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"IntPageReadCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"PageConversion":{"thText":"\u56fe\u6587\u8f6c\u5316\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"OriPageReadUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"OriPageReadCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"Conversion":{"thText":"\u539f\u6587\u8f6c\u5316\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"ShareUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"ShareCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"AddToFavUser":{"thText":"\u5fae\u4fe1\u6536\u85cf\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0}},"data":[{"MsgId":"205104027_1","Title":"\u56de\u5bb6\u5927\u4f5c\u6218 | \u5feb\u6765\u5e26\u6211\u56de\u5bb6","RefDate":"20150121","TargetUser":"20,816","IntPageReadUser":"1,944","IntPageReadCount":"2,554","OriPageReadUser":"0","OriPageReadCount":"0","ShareUser":"47","ShareCount":"61","AddToFavUser":"1","Conversion":"0%","PageConversion":"9.34%"}],"fixedRow":false,"cssSetting":{"":""},"complexHeader":[[{"field":"TargetUser","thText":"\u9001\u8fbe\u4eba\u6570","rowSpan":2,"colSpan":1},{"thText":"\u56fe\u6587\u9875\u9605\u8bfb","colSpan":3},{"thText":"\u539f\u6587\u9875\u9605\u8bfb","colSpan":3},{"thText":"\u5206\u4eab\u8f6c\u53d1","colSpan":2},{"field":"AddToFavUser","thText":"\u5fae\u4fe1\u6536\u85cf\u4eba\u6570","rowSpan":2,"enable":true}],[{"field":"IntPageReadUser","thText":"\u4eba\u6570"},{"field":"IntPageReadCount","thText":"\u6b21\u6570"},{"field":"PageConversion","thText":"\u56fe\u6587\u8f6c\u5316\u7387"},{"field":"OriPageReadUser","thText":"\u4eba\u6570"},{"field":"OriPageReadCount","thText":"\u6b21\u6570"},{"field":"Conversion","thText":"\u539f\u6587\u8f6c\u5316\u7387"},{"field":"ShareUser","thText":"\u4eba\u6570"},{"field":"ShareCount","thText":"\u6b21\u6570"}]]}", "id": "205104027_1", "title": "回家大作战 | 快来带我回家" }, { "index": [ "20,786", // 送达人数 "2,598", // 图文页阅读人数 "3,368", // 图文页阅读次数 "12.5%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "73", // 分享转发人数 "98", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-20", "table_data": "{"fields":{"TargetUser":{"thText":"\u9001\u8fbe\u4eba\u6570","number":false,"colAlign":"center","needOrder":false,"precision":0},"IntPageReadUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"IntPageReadCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"PageConversion":{"thText":"\u56fe\u6587\u8f6c\u5316\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"OriPageReadUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"OriPageReadCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"Conversion":{"thText":"\u539f\u6587\u8f6c\u5316\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"ShareUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"ShareCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"AddToFavUser":{"thText":"\u5fae\u4fe1\u6536\u85cf\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0}},"data":[{"MsgId":"205066833_1","Title":"\u56de\u5bb6\u5927\u4f5c\u6218 | \u5982\u4f55\u4f18\u96c5\u5730\u53bb\u5f80\u8f66\u7ad9\u548c\u673a\u573a","RefDate":"20150120","TargetUser":"20,786","IntPageReadUser":"2,598","IntPageReadCount":"3,368","OriPageReadUser":"0","OriPageReadCount":"0","ShareUser":"73","ShareCount":"98","AddToFavUser":"1","Conversion":"0%","PageConversion":"12.5%"}],"fixedRow":false,"cssSetting":{"":""},"complexHeader":[[{"field":"TargetUser","thText":"\u9001\u8fbe\u4eba\u6570","rowSpan":2,"colSpan":1},{"thText":"\u56fe\u6587\u9875\u9605\u8bfb","colSpan":3},{"thText":"\u539f\u6587\u9875\u9605\u8bfb","colSpan":3},{"thText":"\u5206\u4eab\u8f6c\u53d1","colSpan":2},{"field":"AddToFavUser","thText":"\u5fae\u4fe1\u6536\u85cf\u4eba\u6570","rowSpan":2,"enable":true}],[{"field":"IntPageReadUser","thText":"\u4eba\u6570"},{"field":"IntPageReadCount","thText":"\u6b21\u6570"},{"field":"PageConversion","thText":"\u56fe\u6587\u8f6c\u5316\u7387"},{"field":"OriPageReadUser","thText":"\u4eba\u6570"},{"field":"OriPageReadCount","thText":"\u6b21\u6570"},{"field":"Conversion","thText":"\u539f\u6587\u8f6c\u5316\u7387"},{"field":"ShareUser","thText":"\u4eba\u6570"},{"field":"ShareCount","thText":"\u6b21\u6570"}]]}", "id": "205066833_1", "title": "回家大作战 | 如何优雅地去往车站和机场" }, { "index": [ "20,745", // 送达人数 "1,355", // 图文页阅读人数 "1,839", // 图文页阅读次数 "6.53%", // (图文页阅读人数 / 送达人数) "145", // 原文页阅读人数 "184", // 原文页阅读次数 "10.7%", // (原文页阅读人数 / 图文页阅读人数) "48", // 分享转发人数 "64", // 分享转发次数 "5" // 微信收藏人数 ], "time": "2015-01-19", "table_data": "{"fields":{"TargetUser":{"thText":"\u9001\u8fbe\u4eba\u6570","number":false,"colAlign":"center","needOrder":false,"precision":0},"IntPageReadUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"IntPageReadCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"PageConversion":{"thText":"\u56fe\u6587\u8f6c\u5316\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"OriPageReadUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"OriPageReadCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"Conversion":{"thText":"\u539f\u6587\u8f6c\u5316\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"ShareUser":{"thText":"\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"ShareCount":{"thText":"\u6b21\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"AddToFavUser":{"thText":"\u5fae\u4fe1\u6536\u85cf\u4eba\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0}},"data":[{"MsgId":"205028693_1","Title":"\u5145\u7535\u65f6\u95f4 | \u542c\u542c\u7535\u53f0\uff0c\u4f18\u96c5\u5730\u63d0\u5347\u5b66\u4e60\u6548\u7387","RefDate":"20150119","TargetUser":"20,745","IntPageReadUser":"1,355","IntPageReadCount":"1,839","OriPageReadUser":"145","OriPageReadCount":"184","ShareUser":"48","ShareCount":"64","AddToFavUser":"5","Conversion":"10.7%","PageConversion":"6.53%"}],"fixedRow":false,"cssSetting":{"":""},"complexHeader":[[{"field":"TargetUser","thText":"\u9001\u8fbe\u4eba\u6570","rowSpan":2,"colSpan":1},{"thText":"\u56fe\u6587\u9875\u9605\u8bfb","colSpan":3},{"thText":"\u539f\u6587\u9875\u9605\u8bfb","colSpan":3},{"thText":"\u5206\u4eab\u8f6c\u53d1","colSpan":2},{"field":"AddToFavUser","thText":"\u5fae\u4fe1\u6536\u85cf\u4eba\u6570","rowSpan":2,"enable":true}],[{"field":"IntPageReadUser","thText":"\u4eba\u6570"},{"field":"IntPageReadCount","thText":"\u6b21\u6570"},{"field":"PageConversion","thText":"\u56fe\u6587\u8f6c\u5316\u7387"},{"field":"OriPageReadUser","thText":"\u4eba\u6570"},{"field":"OriPageReadCount","thText":"\u6b21\u6570"},{"field":"Conversion","thText":"\u539f\u6587\u8f6c\u5316\u7387"},{"field":"ShareUser","thText":"\u4eba\u6570"},{"field":"ShareCount","thText":"\u6b21\u6570"}]]}", "id": "205028693_1", "title": "充电时间 | 听听电台,优雅地提升学习效率" } ] } :param page: 页码 (由于腾讯接口限制,page 从 1 开始,3 条数据为 1 页) :param start_date: 开始时间,默认是今天-30天 (类型: str 格式示例: "2015-01-15") :param end_date: 结束时间,默认是今天 (类型: str 格式示例: "2015-02-01") :return: 返回的 JSON 数据,具体的各项内容解释参见上面的 JSON 返回示例 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 ] call[name[self]._init_plugin_token_appid, parameter[]] variable[url] assign[=] call[constant[http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1].format, parameter[]] variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e6da0>, <ast.Constant object at 0x7da20c6e7a60>, <ast.Constant object at 0x7da20c6e72e0>], [<ast.Constant object at 0x7da20c6e4bb0>, <ast.Call object at 0x7da20c6e7160>, <ast.Attribute object at 0x7da20c6e7f70>]] variable[r] assign[=] call[name[requests].get, parameter[name[url]]] if <ast.UnaryOp object at 0x7da20c6e66b0> begin[:] for taget[name[cookie]] in starred[name[r].cookies] begin[:] <ast.AugAssign object at 0x7da20c6e6080> <ast.Try object at 0x7da20c6e6740> return[name[message]]
keyword[def] identifier[stat_article_detail_list] ( identifier[self] , identifier[page] = literal[int] , identifier[start_date] = identifier[str] ( identifier[date] . identifier[today] ()+ identifier[timedelta] ( identifier[days] =- literal[int] )), identifier[end_date] = identifier[str] ( identifier[date] . identifier[today] ())): literal[string] identifier[self] . identifier[_init_plugin_token_appid] () identifier[url] = literal[string] . identifier[format] ( identifier[page] = identifier[page] , identifier[appid] = identifier[self] . identifier[__appid] , identifier[token] = identifier[self] . identifier[__plugin_token] , identifier[rnd] = identifier[int] ( identifier[time] . identifier[time] ()), identifier[start_date] = identifier[start_date] , identifier[end_date] = identifier[end_date] , ) identifier[headers] ={ literal[string] : literal[string] , literal[string] : literal[string] . identifier[format] ( identifier[page] = identifier[page] , identifier[appid] = identifier[self] . identifier[__appid] , identifier[token] = identifier[self] . identifier[__plugin_token] , identifier[rnd] = identifier[int] ( identifier[time] . identifier[time] ()), identifier[start_date] = identifier[start_date] , identifier[end_date] = identifier[end_date] , ), literal[string] : identifier[self] . identifier[__cookies] , } identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] ) keyword[if] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[self] . identifier[__cookies] ): keyword[for] identifier[cookie] keyword[in] identifier[r] . identifier[cookies] : identifier[self] . identifier[__cookies] += identifier[cookie] . identifier[name] + literal[string] + identifier[cookie] . identifier[value] + literal[string] keyword[try] : identifier[data] = identifier[json] . identifier[loads] ( identifier[r] . identifier[text] ) keyword[if] identifier[data] . identifier[get] ( literal[string] ): keyword[raise] identifier[NeedLoginError] ( identifier[r] . identifier[text] ) identifier[message] = identifier[json] . identifier[dumps] ( identifier[data] , identifier[ensure_ascii] = keyword[False] ) keyword[except] ( identifier[KeyError] , identifier[ValueError] ): keyword[raise] identifier[NeedLoginError] ( identifier[r] . identifier[text] ) keyword[return] identifier[message]
def stat_article_detail_list(self, page=1, start_date=str(date.today() + timedelta(days=-30)), end_date=str(date.today())): """ 获取图文分析数据 返回JSON示例 :: { "hasMore": true, // 说明是否可以增加 page 页码来获取数据 "data": [ { "index": [ "20,816", // 送达人数 "1,944", // 图文页阅读人数 "2,554", // 图文页阅读次数 "9.34%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "47", // 分享转发人数 "61", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-21", "table_data": "{"fields":{"TargetUser":{"thText":"\\u9001\\u8fbe\\u4eba\\u6570","number":false,"colAlign":"center","needOrder":false,"precision":0},"IntPageReadUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"IntPageReadCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"PageConversion":{"thText":"\\u56fe\\u6587\\u8f6c\\u5316\\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"OriPageReadUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"OriPageReadCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"Conversion":{"thText":"\\u539f\\u6587\\u8f6c\\u5316\\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"ShareUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"ShareCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"AddToFavUser":{"thText":"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0}},"data":[{"MsgId":"205104027_1","Title":"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5feb\\u6765\\u5e26\\u6211\\u56de\\u5bb6","RefDate":"20150121","TargetUser":"20,816","IntPageReadUser":"1,944","IntPageReadCount":"2,554","OriPageReadUser":"0","OriPageReadCount":"0","ShareUser":"47","ShareCount":"61","AddToFavUser":"1","Conversion":"0%","PageConversion":"9.34%"}],"fixedRow":false,"cssSetting":{"":""},"complexHeader":[[{"field":"TargetUser","thText":"\\u9001\\u8fbe\\u4eba\\u6570","rowSpan":2,"colSpan":1},{"thText":"\\u56fe\\u6587\\u9875\\u9605\\u8bfb","colSpan":3},{"thText":"\\u539f\\u6587\\u9875\\u9605\\u8bfb","colSpan":3},{"thText":"\\u5206\\u4eab\\u8f6c\\u53d1","colSpan":2},{"field":"AddToFavUser","thText":"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570","rowSpan":2,"enable":true}],[{"field":"IntPageReadUser","thText":"\\u4eba\\u6570"},{"field":"IntPageReadCount","thText":"\\u6b21\\u6570"},{"field":"PageConversion","thText":"\\u56fe\\u6587\\u8f6c\\u5316\\u7387"},{"field":"OriPageReadUser","thText":"\\u4eba\\u6570"},{"field":"OriPageReadCount","thText":"\\u6b21\\u6570"},{"field":"Conversion","thText":"\\u539f\\u6587\\u8f6c\\u5316\\u7387"},{"field":"ShareUser","thText":"\\u4eba\\u6570"},{"field":"ShareCount","thText":"\\u6b21\\u6570"}]]}", "id": "205104027_1", "title": "回家大作战 | 快来带我回家" }, { "index": [ "20,786", // 送达人数 "2,598", // 图文页阅读人数 "3,368", // 图文页阅读次数 "12.5%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "73", // 分享转发人数 "98", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-20", "table_data": "{"fields":{"TargetUser":{"thText":"\\u9001\\u8fbe\\u4eba\\u6570","number":false,"colAlign":"center","needOrder":false,"precision":0},"IntPageReadUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"IntPageReadCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"PageConversion":{"thText":"\\u56fe\\u6587\\u8f6c\\u5316\\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"OriPageReadUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"OriPageReadCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"Conversion":{"thText":"\\u539f\\u6587\\u8f6c\\u5316\\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"ShareUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"ShareCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"AddToFavUser":{"thText":"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0}},"data":[{"MsgId":"205066833_1","Title":"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5982\\u4f55\\u4f18\\u96c5\\u5730\\u53bb\\u5f80\\u8f66\\u7ad9\\u548c\\u673a\\u573a","RefDate":"20150120","TargetUser":"20,786","IntPageReadUser":"2,598","IntPageReadCount":"3,368","OriPageReadUser":"0","OriPageReadCount":"0","ShareUser":"73","ShareCount":"98","AddToFavUser":"1","Conversion":"0%","PageConversion":"12.5%"}],"fixedRow":false,"cssSetting":{"":""},"complexHeader":[[{"field":"TargetUser","thText":"\\u9001\\u8fbe\\u4eba\\u6570","rowSpan":2,"colSpan":1},{"thText":"\\u56fe\\u6587\\u9875\\u9605\\u8bfb","colSpan":3},{"thText":"\\u539f\\u6587\\u9875\\u9605\\u8bfb","colSpan":3},{"thText":"\\u5206\\u4eab\\u8f6c\\u53d1","colSpan":2},{"field":"AddToFavUser","thText":"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570","rowSpan":2,"enable":true}],[{"field":"IntPageReadUser","thText":"\\u4eba\\u6570"},{"field":"IntPageReadCount","thText":"\\u6b21\\u6570"},{"field":"PageConversion","thText":"\\u56fe\\u6587\\u8f6c\\u5316\\u7387"},{"field":"OriPageReadUser","thText":"\\u4eba\\u6570"},{"field":"OriPageReadCount","thText":"\\u6b21\\u6570"},{"field":"Conversion","thText":"\\u539f\\u6587\\u8f6c\\u5316\\u7387"},{"field":"ShareUser","thText":"\\u4eba\\u6570"},{"field":"ShareCount","thText":"\\u6b21\\u6570"}]]}", "id": "205066833_1", "title": "回家大作战 | 如何优雅地去往车站和机场" }, { "index": [ "20,745", // 送达人数 "1,355", // 图文页阅读人数 "1,839", // 图文页阅读次数 "6.53%", // (图文页阅读人数 / 送达人数) "145", // 原文页阅读人数 "184", // 原文页阅读次数 "10.7%", // (原文页阅读人数 / 图文页阅读人数) "48", // 分享转发人数 "64", // 分享转发次数 "5" // 微信收藏人数 ], "time": "2015-01-19", "table_data": "{"fields":{"TargetUser":{"thText":"\\u9001\\u8fbe\\u4eba\\u6570","number":false,"colAlign":"center","needOrder":false,"precision":0},"IntPageReadUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"IntPageReadCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"PageConversion":{"thText":"\\u56fe\\u6587\\u8f6c\\u5316\\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"OriPageReadUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"OriPageReadCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"Conversion":{"thText":"\\u539f\\u6587\\u8f6c\\u5316\\u7387","number":true,"colAlign":"right","needOrder":false,"precision":"2"},"ShareUser":{"thText":"\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"ShareCount":{"thText":"\\u6b21\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0},"AddToFavUser":{"thText":"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570","number":true,"colAlign":"right","needOrder":false,"precision":0}},"data":[{"MsgId":"205028693_1","Title":"\\u5145\\u7535\\u65f6\\u95f4 | \\u542c\\u542c\\u7535\\u53f0\\uff0c\\u4f18\\u96c5\\u5730\\u63d0\\u5347\\u5b66\\u4e60\\u6548\\u7387","RefDate":"20150119","TargetUser":"20,745","IntPageReadUser":"1,355","IntPageReadCount":"1,839","OriPageReadUser":"145","OriPageReadCount":"184","ShareUser":"48","ShareCount":"64","AddToFavUser":"5","Conversion":"10.7%","PageConversion":"6.53%"}],"fixedRow":false,"cssSetting":{"":""},"complexHeader":[[{"field":"TargetUser","thText":"\\u9001\\u8fbe\\u4eba\\u6570","rowSpan":2,"colSpan":1},{"thText":"\\u56fe\\u6587\\u9875\\u9605\\u8bfb","colSpan":3},{"thText":"\\u539f\\u6587\\u9875\\u9605\\u8bfb","colSpan":3},{"thText":"\\u5206\\u4eab\\u8f6c\\u53d1","colSpan":2},{"field":"AddToFavUser","thText":"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570","rowSpan":2,"enable":true}],[{"field":"IntPageReadUser","thText":"\\u4eba\\u6570"},{"field":"IntPageReadCount","thText":"\\u6b21\\u6570"},{"field":"PageConversion","thText":"\\u56fe\\u6587\\u8f6c\\u5316\\u7387"},{"field":"OriPageReadUser","thText":"\\u4eba\\u6570"},{"field":"OriPageReadCount","thText":"\\u6b21\\u6570"},{"field":"Conversion","thText":"\\u539f\\u6587\\u8f6c\\u5316\\u7387"},{"field":"ShareUser","thText":"\\u4eba\\u6570"},{"field":"ShareCount","thText":"\\u6b21\\u6570"}]]}", "id": "205028693_1", "title": "充电时间 | 听听电台,优雅地提升学习效率" } ] } :param page: 页码 (由于腾讯接口限制,page 从 1 开始,3 条数据为 1 页) :param start_date: 开始时间,默认是今天-30天 (类型: str 格式示例: "2015-01-15") :param end_date: 结束时间,默认是今天 (类型: str 格式示例: "2015-02-01") :return: 返回的 JSON 数据,具体的各项内容解释参见上面的 JSON 返回示例 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ self._init_plugin_token_appid() url = 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format(page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date) headers = {'x-requested-with': 'XMLHttpRequest', 'referer': 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format(page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date), 'cookie': self.__cookies} r = requests.get(url, headers=headers) if not re.search('wechat_token', self.__cookies): for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';' # depends on [control=['for'], data=['cookie']] # depends on [control=['if'], data=[]] try: data = json.loads(r.text) if data.get('is_session_expire'): raise NeedLoginError(r.text) # depends on [control=['if'], data=[]] message = json.dumps(data, ensure_ascii=False) # depends on [control=['try'], data=[]] except (KeyError, ValueError): raise NeedLoginError(r.text) # depends on [control=['except'], data=[]] return message
def truncate(s: str, length: int = DEFAULT_CURTAIL) -> str: """ Truncate a string and add an ellipsis (three dots) to the end if it was too long :param s: string to possibly truncate :param length: length to truncate the string to """ if len(s) > length: s = s[: length - 1] + '…' return s
def function[truncate, parameter[s, length]]: constant[ Truncate a string and add an ellipsis (three dots) to the end if it was too long :param s: string to possibly truncate :param length: length to truncate the string to ] if compare[call[name[len], parameter[name[s]]] greater[>] name[length]] begin[:] variable[s] assign[=] binary_operation[call[name[s]][<ast.Slice object at 0x7da1b07968f0>] + constant[…]] return[name[s]]
keyword[def] identifier[truncate] ( identifier[s] : identifier[str] , identifier[length] : identifier[int] = identifier[DEFAULT_CURTAIL] )-> identifier[str] : literal[string] keyword[if] identifier[len] ( identifier[s] )> identifier[length] : identifier[s] = identifier[s] [: identifier[length] - literal[int] ]+ literal[string] keyword[return] identifier[s]
def truncate(s: str, length: int=DEFAULT_CURTAIL) -> str: """ Truncate a string and add an ellipsis (three dots) to the end if it was too long :param s: string to possibly truncate :param length: length to truncate the string to """ if len(s) > length: s = s[:length - 1] + '…' # depends on [control=['if'], data=['length']] return s
def get_if_addr6(iff): """ Returns the main global unicast address associated with provided interface, in human readable form. If no global address is found, None is returned. """ return next((x[0] for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)
def function[get_if_addr6, parameter[iff]]: constant[ Returns the main global unicast address associated with provided interface, in human readable form. If no global address is found, None is returned. ] return[call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b21aeaa0>, constant[None]]]]
keyword[def] identifier[get_if_addr6] ( identifier[iff] ): literal[string] keyword[return] identifier[next] (( identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[in6_getifaddr] () keyword[if] identifier[x] [ literal[int] ]== identifier[iff] keyword[and] identifier[x] [ literal[int] ]== identifier[IPV6_ADDR_GLOBAL] ), keyword[None] )
def get_if_addr6(iff): """ Returns the main global unicast address associated with provided interface, in human readable form. If no global address is found, None is returned. """ return next((x[0] for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)
def confirm(self, question, default=False, true_answer_regex="(?i)^y"): """ Confirm a question with the user. """ return self._io.confirm(question, default, true_answer_regex)
def function[confirm, parameter[self, question, default, true_answer_regex]]: constant[ Confirm a question with the user. ] return[call[name[self]._io.confirm, parameter[name[question], name[default], name[true_answer_regex]]]]
keyword[def] identifier[confirm] ( identifier[self] , identifier[question] , identifier[default] = keyword[False] , identifier[true_answer_regex] = literal[string] ): literal[string] keyword[return] identifier[self] . identifier[_io] . identifier[confirm] ( identifier[question] , identifier[default] , identifier[true_answer_regex] )
def confirm(self, question, default=False, true_answer_regex='(?i)^y'): """ Confirm a question with the user. """ return self._io.confirm(question, default, true_answer_regex)
def adj_nodes_ali(ali_nodes): """Adjust details specific to AliCloud.""" for node in ali_nodes: node.cloud = "alicloud" node.cloud_disp = "AliCloud" node.private_ips = ip_to_str(node.extra['vpc_attributes']['private_ip_address']) node.public_ips = ip_to_str(node.public_ips) node.zone = node.extra['zone_id'] node.size = node.extra['instance_type'] if node.size.startswith('ecs.'): node.size = node.size[len('ecs.'):] return ali_nodes
def function[adj_nodes_ali, parameter[ali_nodes]]: constant[Adjust details specific to AliCloud.] for taget[name[node]] in starred[name[ali_nodes]] begin[:] name[node].cloud assign[=] constant[alicloud] name[node].cloud_disp assign[=] constant[AliCloud] name[node].private_ips assign[=] call[name[ip_to_str], parameter[call[call[name[node].extra][constant[vpc_attributes]]][constant[private_ip_address]]]] name[node].public_ips assign[=] call[name[ip_to_str], parameter[name[node].public_ips]] name[node].zone assign[=] call[name[node].extra][constant[zone_id]] name[node].size assign[=] call[name[node].extra][constant[instance_type]] if call[name[node].size.startswith, parameter[constant[ecs.]]] begin[:] name[node].size assign[=] call[name[node].size][<ast.Slice object at 0x7da1b26771f0>] return[name[ali_nodes]]
keyword[def] identifier[adj_nodes_ali] ( identifier[ali_nodes] ): literal[string] keyword[for] identifier[node] keyword[in] identifier[ali_nodes] : identifier[node] . identifier[cloud] = literal[string] identifier[node] . identifier[cloud_disp] = literal[string] identifier[node] . identifier[private_ips] = identifier[ip_to_str] ( identifier[node] . identifier[extra] [ literal[string] ][ literal[string] ]) identifier[node] . identifier[public_ips] = identifier[ip_to_str] ( identifier[node] . identifier[public_ips] ) identifier[node] . identifier[zone] = identifier[node] . identifier[extra] [ literal[string] ] identifier[node] . identifier[size] = identifier[node] . identifier[extra] [ literal[string] ] keyword[if] identifier[node] . identifier[size] . identifier[startswith] ( literal[string] ): identifier[node] . identifier[size] = identifier[node] . identifier[size] [ identifier[len] ( literal[string] ):] keyword[return] identifier[ali_nodes]
def adj_nodes_ali(ali_nodes): """Adjust details specific to AliCloud.""" for node in ali_nodes: node.cloud = 'alicloud' node.cloud_disp = 'AliCloud' node.private_ips = ip_to_str(node.extra['vpc_attributes']['private_ip_address']) node.public_ips = ip_to_str(node.public_ips) node.zone = node.extra['zone_id'] node.size = node.extra['instance_type'] if node.size.startswith('ecs.'): node.size = node.size[len('ecs.'):] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] return ali_nodes