code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
field_value = None pattern = r'^' + field + r'=(.*)' try: with open("/etc/armbian-release", 'r') as release_file: armbian = release_file.read().split('\n') for line in armbian: match = re.search(pattern, line) if match: field_value = match.group(1) except FileNotFoundError: pass return field_value
def get_armbian_release_field(self, field)
Search /etc/armbian-release, if it exists, for a field and return its value, if found, otherwise None.
2.481901
2.268967
1.093846
# Create X-Ray headers xray_header = construct_xray_header(request.headers) # Get name of service or generate a dynamic one from host name = calculate_segment_name(request.headers['host'].split(':', 1)[0], xray_recorder) sampling_req = { 'host': request.headers['host'], 'method': request.method, 'path': request.path, 'service': name, } sampling_decision = calculate_sampling_decision( trace_header=xray_header, recorder=xray_recorder, sampling_req=sampling_req, ) # Start a segment segment = xray_recorder.begin_segment( name=name, traceid=xray_header.root, parent_id=xray_header.parent, sampling=sampling_decision, ) segment.save_origin_trace_header(xray_header) # Store request metadata in the current segment segment.put_http_meta(http.URL, str(request.url)) segment.put_http_meta(http.METHOD, request.method) if 'User-Agent' in request.headers: segment.put_http_meta(http.USER_AGENT, request.headers['User-Agent']) if 'X-Forwarded-For' in request.headers: segment.put_http_meta(http.CLIENT_IP, request.headers['X-Forwarded-For']) segment.put_http_meta(http.X_FORWARDED_FOR, True) elif 'remote_addr' in request.headers: segment.put_http_meta(http.CLIENT_IP, request.headers['remote_addr']) else: segment.put_http_meta(http.CLIENT_IP, request.remote) try: # Call next middleware or request handler response = await handler(request) except HTTPException as exc: # Non 2XX responses are raised as HTTPExceptions response = exc raise except Exception as err: # Store exception information including the stacktrace to the segment response = None segment.put_http_meta(http.STATUS, 500) stack = stacktrace.get_stacktrace(limit=xray_recorder.max_trace_back) segment.add_exception(err, stack) raise finally: if response is not None: segment.put_http_meta(http.STATUS, response.status) if 'Content-Length' in response.headers: length = int(response.headers['Content-Length']) segment.put_http_meta(http.CONTENT_LENGTH, length) header_str = prepare_response_header(xray_header, segment) response.headers[http.XRAY_HEADER] = header_str xray_recorder.end_segment() return response
async def middleware(request, handler)
Main middleware function, deals with all the X-Ray segment logic
2.547749
2.483724
1.025778
return "%s%s%s%s%s" % (TraceId.VERSION, TraceId.DELIMITER, format(self.start_time, 'x'), TraceId.DELIMITER, self.__number)
def to_id(self)
Convert TraceId object to a string.
6.949367
5.122518
1.356631
if getattr(httplib, PATCH_FLAG, False): return # we set an attribute to avoid multiple wrapping setattr(httplib, PATCH_FLAG, True) wrapt.wrap_function_wrapper( httplib_client_module, 'HTTPConnection._send_request', _send_request ) wrapt.wrap_function_wrapper( httplib_client_module, 'HTTPConnection.getresponse', _xray_traced_http_getresponse ) wrapt.wrap_function_wrapper( httplib_client_module, 'HTTPResponse.read', _xray_traced_http_client_read )
def patch()
patch the built-in `urllib/httplib/httplib.client` methods for tracing.
3.26999
2.918393
1.120476
_PATCHED_MODULES.discard('httplib') setattr(httplib, PATCH_FLAG, False) # _send_request encapsulates putrequest, putheader[s], and endheaders unwrap(httplib.HTTPConnection, '_send_request') unwrap(httplib.HTTPConnection, 'getresponse') unwrap(httplib.HTTPResponse, 'read')
def unpatch()
Unpatch any previously patched modules. This operation is idempotent.
7.205779
7.32867
0.983232
task = asyncio.Task(coro, loop=loop) if task._source_traceback: # flake8: noqa del task._source_traceback[-1] # flake8: noqa # Share context with new task if possible current_task = asyncio.Task.current_task(loop=loop) if current_task is not None and hasattr(current_task, 'context'): setattr(task, 'context', current_task.context) return task
def task_factory(loop, coro)
Task factory function Fuction closely mirrors the logic inside of asyncio.BaseEventLoop.create_task. Then if there is a current task and the current task has a context then share that context with the new task
2.618603
2.628238
0.996334
if sampling_req is None: return False host = sampling_req.get('host', None) method = sampling_req.get('method', None) path = sampling_req.get('path', None) service = sampling_req.get('service', None) service_type = sampling_req.get('service_type', None) return (not host or wildcard_match(self._host, host)) \ and (not method or wildcard_match(self._method, method)) \ and (not path or wildcard_match(self._path, path)) \ and (not service or wildcard_match(self._service, service)) \ and (not service_type or wildcard_match(self._service_type, service_type))
def match(self, sampling_req)
Determines whether or not this sampling rule applies to the incoming request based on some of the request's parameters. Any ``None`` parameter provided will be considered an implicit match.
1.681175
1.621491
1.036808
with self._lock: stats = { 'request_count': self.request_count, 'borrow_count': self.borrow_count, 'sampled_count': self.sampled_count, } self._reset_statistics() return stats
def snapshot_statistics(self)
Take a snapshot of request/borrow/sampled count for reporting back to X-Ray back-end by ``TargetPoller`` and reset those counters.
4.443988
2.408727
1.844953
with self._lock: self._request_count = rule.request_count self._borrow_count = rule.borrow_count self._sampled_count = rule.sampled_count self._reservoir = rule.reservoir rule.reservoir = None
def merge(self, rule)
Migrate all stateful attributes from the old rule
4.477679
4.21191
1.0631
with self._lock: return self._borrow_or_take(now, can_borrow)
def borrow_or_take(self, now, can_borrow)
Decide whether to borrow or take one quota from the reservoir. Return ``False`` if it can neither borrow nor take. This method is thread-safe.
4.07251
3.148052
1.293661
if quota is not None: self._quota = quota if TTL is not None: self._TTL = TTL if interval is not None: self._report_interval = interval / 10
def load_quota(self, quota, TTL, interval)
Load new quota with a TTL. If the input is None, the reservoir will continue using old quota until it expires or has a non-None quota/TTL in a future load.
2.953205
3.045338
0.969746
self._check_ended() if end_time: self.end_time = end_time else: self.end_time = time.time() self.in_progress = False
def close(self, end_time=None)
Close the trace entity by setting `end_time` and flip the in progress flag to False. :param int end_time: Epoch in seconds. If not specified current time will be used.
3.079129
3.027952
1.016901
self._check_ended() subsegment.parent_id = self.id self.subsegments.append(subsegment)
def add_subsegment(self, subsegment)
Add input subsegment as a child subsegment.
4.03246
3.496565
1.153263
self._check_ended() if value is None: return if key == http.STATUS: if isinstance(value, string_types): value = int(value) self.apply_status_code(value) if key in http.request_keys: if 'request' not in self.http: self.http['request'] = {} self.http['request'][key] = value elif key in http.response_keys: if 'response' not in self.http: self.http['response'] = {} self.http['response'][key] = value else: log.warning("ignoring unsupported key %s in http meta.", key)
def put_http_meta(self, key, value)
Add http related metadata. :param str key: Currently supported keys are: * url * method * user_agent * client_ip * status * content_length :param value: status and content_length are int and for other supported keys string should be used.
2.618772
2.718283
0.963392
self._check_ended() if not isinstance(key, string_types): log.warning("ignoring non string type annotation key with type %s.", type(key)) return if not isinstance(value, annotation_value_types): log.warning("ignoring unsupported annotation value type %s.", type(value)) return if any(character not in _valid_annotation_key_characters for character in key): log.warning("ignoring annnotation with unsupported characters in key: '%s'.", key) return self.annotations[key] = value
def put_annotation(self, key, value)
Annotate segment or subsegment with a key-value pair. Annotations will be indexed for later search query. :param str key: annotation key :param object value: annotation value. Any type other than string/number/bool will be dropped
3.911125
4.121302
0.949002
self._check_ended() if not isinstance(namespace, string_types): log.warning("ignoring non string type metadata namespace") return if namespace.startswith('AWS.'): log.warning("Prefix 'AWS.' is reserved, drop metadata with namespace %s", namespace) return if self.metadata.get(namespace, None): self.metadata[namespace][key] = value else: self.metadata[namespace] = {key: value}
def put_metadata(self, key, value, namespace='default')
Add metadata to segment or subsegment. Metadata is not indexed but can be later retrieved by BatchGetTraces API. :param str namespace: optional. Default namespace is `default`. It must be a string and prefix `AWS.` is reserved. :param str key: metadata key under specified namespace :param object value: any object that can be serialized into JSON string
4.526893
4.140889
1.093218
self._check_ended() if not status_code: return if status_code >= 500: self.add_fault_flag() elif status_code == 429: self.add_throttle_flag() self.add_error_flag() elif status_code >= 400: self.add_error_flag()
def apply_status_code(self, status_code)
When a trace entity is generated under the http context, the status code will affect this entity's fault/error/throttle flags. Flip these flags based on status code.
3.324056
2.429614
1.368142
self._check_ended() self.add_fault_flag() if hasattr(exception, '_recorded'): setattr(self, 'cause', getattr(exception, '_cause_id')) return exceptions = [] exceptions.append(Throwable(exception, stack, remote)) self.cause['exceptions'] = exceptions self.cause['working_directory'] = os.getcwd()
def add_exception(self, exception, stack, remote=False)
Add an exception to trace entities. :param Exception exception: the catched exception. :param list stack: the output from python built-in `traceback.extract_stack()`. :param bool remote: If False it means it's a client error instead of a downstream service.
8.297276
9.080528
0.913744
try: return jsonpickle.encode(self, unpicklable=False) except Exception: log.exception("got an exception during serialization")
def serialize(self)
Serialize to JSON document that can be accepted by the X-Ray backend service. It uses jsonpickle to perform serialization.
4.96028
4.085402
1.214147
if not self.parent_id: del properties['parent_id'] if not self.subsegments: del properties['subsegments'] if not self.aws: del properties['aws'] if not self.http: del properties['http'] if not self.cause: del properties['cause'] if not self.annotations: del properties['annotations'] if not self.metadata: del properties['metadata'] properties.pop(ORIGIN_TRACE_HEADER_ATTR_KEY, None) del properties['sampled']
def _delete_empty_properties(self, properties)
Delete empty properties before serialization to avoid extra keys with empty values in the output json.
3.68491
3.42385
1.076247
global settings setting, value = kwargs['setting'], kwargs['value'] if setting == XRAY_NAMESPACE: settings = XRaySettings(value)
def reload_settings(*args, **kwargs)
Reload X-Ray user settings upon Django server hot restart
6.671116
5.027569
1.326907
super(Segment, self).add_subsegment(subsegment) self.increment()
def add_subsegment(self, subsegment)
Add input subsegment as a child subsegment and increment reference counter and total subsegments counter.
6.343923
4.174916
1.519533
super(Segment, self).remove_subsegment(subsegment) self.decrement_subsegments_size()
def remove_subsegment(self, subsegment)
Remove the reference of input subsegment.
6.014486
5.446035
1.104379
super(Segment, self)._check_ended() self.user = user
def set_user(self, user)
set user of a segment. One segment can only have one user. User is indexed and can be later queried.
22.018892
12.324076
1.786657
if not self.aws.get('xray', None): self.aws['xray'] = {} self.aws['xray']['sampling_rule_name'] = rule_name
def set_rule_name(self, rule_name)
Add the matched centralized sampling rule name if a segment is sampled because of that rule. This method should be only used by the recorder.
4.340129
4.267535
1.017011
if not entity: return if hasattr(entity, 'type') and entity.type == 'subsegment': header = entity.parent_segment.get_origin_trace_header() else: header = entity.get_origin_trace_header() data = header.data if header else None to_insert = TraceHeader( root=entity.trace_id, parent=entity.id, sampled=entity.sampled, data=data, ) value = to_insert.to_header_str() headers[http.XRAY_HEADER] = value
def inject_trace_header(headers, entity)
Extract trace id, entity id and sampling decision from the input entity and inject these information to headers. :param dict headers: http headers to inject :param Entity entity: trace entity that the trace header value generated from.
4.560088
4.337383
1.051345
if trace_header.sampled is not None and trace_header.sampled != '?': return trace_header.sampled elif not recorder.sampling: return 1 else: decision = recorder.sampler.should_trace(sampling_req) return decision if decision else 0
def calculate_sampling_decision(trace_header, recorder, sampling_req)
Return 1 or the matched rule name if should sample and 0 if should not. The sampling decision coming from ``trace_header`` always has the highest precedence. If the ``trace_header`` doesn't contain sampling decision then it checks if sampling is enabled or not in the recorder. If not enbaled it returns 1. Otherwise it uses user defined sampling rules to decide.
5.597052
4.572433
1.224086
header_str = headers.get(http.XRAY_HEADER) or headers.get(http.ALT_XRAY_HEADER) if header_str: return TraceHeader.from_header_str(header_str) else: return TraceHeader()
def construct_xray_header(headers)
Construct a ``TraceHeader`` object from dictionary headers of the incoming request. This method should always return a ``TraceHeader`` object regardless of tracing header's presence in the incoming request.
4.518916
3.582501
1.261386
if recorder.dynamic_naming: return recorder.dynamic_naming.get_name(host_name) else: return recorder.service
def calculate_segment_name(host_name, recorder)
Returns the segment name based on recorder configuration and input host name. This is a helper generally used in web framework middleware where a host name is available from incoming request's headers.
5.896443
5.348003
1.10255
if origin_header and origin_header.sampled == '?': new_header = TraceHeader(root=segment.trace_id, sampled=segment.sampled) else: new_header = TraceHeader(root=segment.trace_id) return new_header.to_header_str()
def prepare_response_header(origin_header, segment)
Prepare a trace header to be inserted into response based on original header and the request segment.
4.760053
4.324334
1.10076
s1 = first_cap_re.sub(r'\1_\2', name) # handle acronym words return all_cap_re.sub(r'\1_\2', s1).lower()
def to_snake_case(name)
Convert the input string to snake-cased string.
3.977476
4.043583
0.983651
f = getattr(obj, attr, None) if f and isinstance(f, wrapt.ObjectProxy) and hasattr(f, '__wrapped__'): setattr(obj, attr, f.__wrapped__)
def unwrap(obj, attr)
Will unwrap a `wrapt` attribute :param obj: base object :param attr: attribute on `obj` to unwrap
2.96675
3.093943
0.95889
if hasattr(botocore.client, '_xray_enabled'): return setattr(botocore.client, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'botocore.client', 'BaseClient._make_api_call', _xray_traced_botocore, ) wrapt.wrap_function_wrapper( 'botocore.endpoint', 'Endpoint.prepare_request', inject_header, )
def patch()
Patch botocore client so it generates subsegments when calling AWS services.
3.103131
2.838586
1.093196
seg_name = name or self.service if not seg_name: raise SegmentNameMissingException("Segment name is required.") # Sampling decision is None if not sampled. # In a sampled case it could be either a string or 1 # depending on if centralized or local sampling rule takes effect. decision = True # To disable the recorder, we set the sampling decision to always be false. # This way, when segments are generated, they become dummy segments and are ultimately never sent. # The call to self._sampler.should_trace() is never called either so the poller threads are never started. if not global_sdk_config.sdk_enabled(): sampling = 0 # we respect the input sampling decision # regardless of recorder configuration. if sampling == 0: decision = False elif sampling: decision = sampling elif self.sampling: decision = self._sampler.should_trace() if not decision: segment = DummySegment(seg_name) else: segment = Segment(name=seg_name, traceid=traceid, parent_id=parent_id) self._populate_runtime_context(segment, decision) self.context.put_segment(segment) return segment
def begin_segment(self, name=None, traceid=None, parent_id=None, sampling=None)
Begin a segment on the current thread and return it. The recorder only keeps one segment at a time. Create the second one without closing existing one will overwrite it. :param str name: the name of the segment :param str traceid: trace id of the segment :param int sampling: 0 means not sampled, 1 means sampled
7.49618
7.819087
0.958703
self.context.end_segment(end_time) segment = self.current_segment() if segment and segment.ready_to_send(): self._send_segment()
def end_segment(self, end_time=None)
End the current segment and send it to X-Ray daemon if it is ready to send. Ready means segment and all its subsegments are closed. :param float end_time: segment compeletion in unix epoch in seconds.
5.18889
5.162652
1.005082
entity = self.get_trace_entity() if self._is_subsegment(entity): return entity.parent_segment else: return entity
def current_segment(self)
Return the currently active segment. In a multithreading environment, this will make sure the segment returned is the one created by the same thread.
7.206015
6.230449
1.15658
segment = self.current_segment() if not segment: log.warning("No segment found, cannot begin subsegment %s." % name) return None if not segment.sampled: subsegment = DummySubsegment(segment, name) else: subsegment = Subsegment(name, namespace, segment) self.context.put_subsegment(subsegment) return subsegment
def begin_subsegment(self, name, namespace='local')
Begin a new subsegment. If there is open subsegment, the newly created subsegment will be the child of latest opened subsegment. If not, it will be the child of the current open segment. :param str name: the name of the subsegment. :param str namespace: currently can only be 'local', 'remote', 'aws'.
3.652588
3.954668
0.923614
if not self.context.end_subsegment(end_time): return # if segment is already close, we check if we can send entire segment # otherwise we check if we need to stream some subsegments if self.current_segment().ready_to_send(): self._send_segment() else: self.stream_subsegments()
def end_subsegment(self, end_time=None)
End the current active subsegment. If this is the last one open under its parent segment, the entire segment will be sent. :param float end_time: subsegment compeletion in unix epoch in seconds.
7.094139
7.303096
0.971388
entity = self.get_trace_entity() if entity and entity.sampled: entity.put_annotation(key, value)
def put_annotation(self, key, value)
Annotate current active trace entity with a key-value pair. Annotations will be indexed for later search query. :param str key: annotation key :param object value: annotation value. Any type other than string/number/bool will be dropped
7.247387
7.051033
1.027848
entity = self.get_trace_entity() if entity and entity.sampled: entity.put_metadata(key, value, namespace)
def put_metadata(self, key, value, namespace='default')
Add metadata to the current active trace entity. Metadata is not indexed but can be later retrieved by BatchGetTraces API. :param str namespace: optional. Default namespace is `default`. It must be a string and prefix `AWS.` is reserved. :param str key: metadata key under specified namespace :param object value: any object that can be serialized into JSON string
7.572202
6.357509
1.191064
segment = self.current_segment() if self.streaming.is_eligible(segment): self.streaming.stream(segment, self._stream_subsegment_out)
def stream_subsegments(self)
Stream all closed subsegments to the daemon and remove reference to the parent segment. No-op for a not sampled segment.
9.087377
8.165819
1.112855
segment = self.current_segment() if not segment: return if segment.sampled: self.emitter.send_entity(segment) self.clear_trace_entities()
def _send_segment(self)
Send the current segment to X-Ray daemon if it is present and sampled, then clean up context storage. The emitter will handle failures.
10.967988
6.214489
1.764906
return (not host or wildcard_match(self.host, host)) \ and (not method or wildcard_match(self.method, method)) \ and (not path or wildcard_match(self.path, path))
def applies(self, host, method, path)
Determines whether or not this sampling rule applies to the incoming request based on some of the request's parameters. Any None parameters provided will be considered an implicit match.
2.358289
2.309316
1.021207
if not settings.AWS_XRAY_TRACING_NAME: raise SegmentNameMissingException('Segment name is required.') xray_recorder.configure( daemon_address=settings.AWS_XRAY_DAEMON_ADDRESS, sampling=settings.SAMPLING, sampling_rules=settings.SAMPLING_RULES, context_missing=settings.AWS_XRAY_CONTEXT_MISSING, plugins=settings.PLUGINS, service=settings.AWS_XRAY_TRACING_NAME, dynamic_naming=settings.DYNAMIC_NAMING, streaming_threshold=settings.STREAMING_THRESHOLD, max_trace_back=settings.MAX_TRACE_BACK, stream_sql=settings.STREAM_SQL, ) if settings.PATCH_MODULES: if settings.AUTO_PATCH_PARENT_SEGMENT_NAME is not None: with xray_recorder.in_segment(settings.AUTO_PATCH_PARENT_SEGMENT_NAME): patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS) else: patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS) # if turned on subsegment will be generated on # built-in database and template rendering if settings.AUTO_INSTRUMENT: try: patch_db() except Exception: log.debug('failed to patch Django built-in database') try: patch_template() except Exception: log.debug('failed to patch Django built-in template engine')
def ready(self)
Configure global XRay recorder based on django settings under XRAY_RECORDER namespace. This method could be called twice during server startup because of base command and reload command. So this function must be idempotent
3.604891
3.529072
1.021484
if not global_sdk_config.sdk_enabled(): return with self._lock: if not self._started: self._rule_poller.start() self._target_poller.start() self._started = True
def start(self)
Start rule poller and target poller once X-Ray daemon address and context manager is in place.
6.69452
3.981416
1.681442
if not global_sdk_config.sdk_enabled(): return False if not self._started: self.start() # only front-end that actually uses the sampler spawns poller threads now = int(time.time()) if sampling_req and not sampling_req.get('service_type', None): sampling_req['service_type'] = self._origin elif sampling_req is None: sampling_req = {'service_type': self._origin} matched_rule = self._cache.get_matched_rule(sampling_req, now) if matched_rule: log.debug('Rule %s is selected to make a sampling decision.', matched_rule.name) return self._process_matched_rule(matched_rule, now) else: log.info('No effective centralized sampling rule match. Fallback to local rules.') return self._local_sampler.should_trace(sampling_req)
def should_trace(self, sampling_req=None)
Return the matched sampling rule name if the sampler finds one and decide to sample. If no sampling rule matched, it falls back to the local sampler's ``should_trace`` implementation. All optional arguments are extracted from incoming requests by X-Ray middleware to perform path based sampling.
5.131562
4.671525
1.098477
self._connector.setup_xray_client(ip=daemon_config.tcp_ip, port=daemon_config.tcp_port, client=self.xray_client) self._connector.context = context self._origin = origin
def load_settings(self, daemon_config, context, origin=None)
The pollers have dependency on the context manager of the X-Ray recorder. They will respect the customer specified xray client to poll sampling rules/targets. Otherwise they falls back to use the same X-Ray daemon as the emitter.
6.716172
5.206697
1.28991
import pynamodb if hasattr(botocore.vendored.requests.sessions, '_xray_enabled'): return setattr(botocore.vendored.requests.sessions, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'botocore.vendored.requests.sessions', 'Session.send', _xray_traced_pynamodb, )
def patch()
Patch PynamoDB so it generates subsegements when calling DynamoDB.
3.950928
3.549209
1.113186
entity = self.get_trace_entity() if not entity: log.warning("No segment to end") return if self._is_subsegment(entity): entity.parent_segment.close(end_time) else: entity.close(end_time)
def end_segment(self, end_time=None)
End the current active segment. :param int end_time: epoch in seconds. If not specified the current system time will be used.
4.579146
5.263142
0.87004
entity = self.get_trace_entity() if not entity: log.warning("Active segment or subsegment not found. Discarded %s." % subsegment.name) return entity.add_subsegment(subsegment) self._local.entities.append(subsegment)
def put_subsegment(self, subsegment)
Store the subsegment created by ``xray_recorder`` to the context. If you put a new subsegment while there is already an open subsegment, the new subsegment becomes the child of the existing subsegment.
6.887415
6.360003
1.082926
subsegment = self.get_trace_entity() if self._is_subsegment(subsegment): subsegment.close(end_time) self._local.entities.pop() return True else: log.warning("No subsegment to end.") return False
def end_subsegment(self, end_time=None)
End the current active segment. Return False if there is no subsegment to end. :param int end_time: epoch in seconds. If not specified the current system time will be used.
5.229008
5.698328
0.917639
if not getattr(self._local, 'entities', None): return self.handle_context_missing() return self._local.entities[-1]
def get_trace_entity(self)
Return the current trace entity(segment/subsegment). If there is none, it behaves based on pre-defined ``context_missing`` strategy.
11.724184
6.867766
1.707132
if self.context_missing == 'RUNTIME_ERROR': log.error(MISSING_SEGMENT_MSG) raise SegmentNotFoundException(MISSING_SEGMENT_MSG) else: log.error(MISSING_SEGMENT_MSG)
def handle_context_missing(self)
Called whenever there is no trace entity to access or mutate.
5.723638
5.326294
1.0746
if not os.getenv(LAMBDA_TASK_ROOT_KEY): return None try: os.mkdir(TOUCH_FILE_DIR) except OSError: log.debug('directory %s already exists', TOUCH_FILE_DIR) try: f = open(TOUCH_FILE_PATH, 'w+') f.close() # utime force second parameter in python2.7 os.utime(TOUCH_FILE_PATH, None) except (IOError, OSError): log.warning("Unable to write to %s. Failed to signal SDK initialization." % TOUCH_FILE_PATH) return LambdaContext()
def check_in_lambda()
Return None if SDK is not loaded in AWS Lambda worker. Otherwise drop a touch file and return a lambda context.
4.459353
3.777045
1.180646
current_entity = self.get_trace_entity() if not self._is_subsegment(current_entity) and current_entity.initializing: if global_sdk_config.sdk_enabled(): log.warning("Subsegment %s discarded due to Lambda worker still initializing" % subsegment.name) return current_entity.add_subsegment(subsegment) self._local.entities.append(subsegment)
def put_subsegment(self, subsegment)
Refresh the facade segment every time this function is invoked to prevent a new subsegment from being attached to a leaked segment/subsegment.
7.491597
7.311981
1.024565
header_str = os.getenv(LAMBDA_TRACE_HEADER_KEY) trace_header = TraceHeader.from_header_str(header_str) if not global_sdk_config.sdk_enabled(): trace_header._sampled = False segment = getattr(self._local, 'segment', None) if segment: # Ensure customers don't have leaked subsegments across invocations if not trace_header.root or trace_header.root == segment.trace_id: return else: self._initialize_context(trace_header) else: self._initialize_context(trace_header)
def _refresh_context(self)
Get current facade segment. To prevent resource leaking in Lambda worker, every time there is segment present, we compare its trace id to current environment variables. If it is different we create a new facade segment and clean up subsegments stored.
6.10194
4.944127
1.234179
sampled = None if not global_sdk_config.sdk_enabled(): # Force subsequent subsegments to be disabled and turned into DummySegments. sampled = False elif trace_header.sampled == 0: sampled = False elif trace_header.sampled == 1: sampled = True segment = FacadeSegment( name='facade', traceid=trace_header.root, entityid=trace_header.parent, sampled=sampled, ) setattr(self._local, 'segment', segment) setattr(self._local, 'entities', [])
def _initialize_context(self, trace_header)
Create a facade segment based on environment variables set by AWS Lambda and initialize storage for subsegments.
7.104588
5.438732
1.306295
# Environment Variables take precedence over hardcoded configurations. if cls.XRAY_ENABLED_KEY in os.environ: cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false' else: if type(value) == bool: cls.__SDK_ENABLED = value else: cls.__SDK_ENABLED = True log.warning("Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...")
def set_sdk_enabled(cls, value)
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set, otherwise, set the enabled flag to be equal to the environment variable. If the env variable is an invalid string boolean, it will default to true. :param bool value: Flag to set whether the SDK is enabled or disabled. Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
4.570665
4.427252
1.032393
our_args = list(copy.copy(args)) if len(our_args) == 2 and isinstance(our_args[1], (XRayTracedConn, XRayTracedCursor)): our_args[1] = our_args[1].__wrapped__ return wrapped(*our_args, **kwargs)
def _xray_register_type_fix(wrapped, instance, args, kwargs)
Send the actual connection or curser to register type.
4.136135
3.52402
1.173698
if not header: return cls() try: params = header.strip().split(HEADER_DELIMITER) header_dict = {} data = {} for param in params: entry = param.split('=') key = entry[0] if key in (ROOT, PARENT, SAMPLE): header_dict[key] = entry[1] # Ignore any "Self=" trace ids injected from ALB. elif key != SELF: data[key] = entry[1] return cls( root=header_dict.get(ROOT, None), parent=header_dict.get(PARENT, None), sampled=header_dict.get(SAMPLE, None), data=data, ) except Exception: log.warning("malformed tracing header %s, ignore.", header) return cls()
def from_header_str(cls, header)
Create a TraceHeader object from a tracing header string extracted from a http request headers.
4.31268
4.05944
1.062383
h_parts = [] if self.root: h_parts.append(ROOT + '=' + self.root) if self.parent: h_parts.append(PARENT + '=' + self.parent) if self.sampled is not None: h_parts.append(SAMPLE + '=' + str(self.sampled)) if self.data: for key in self.data: h_parts.append(key + '=' + self.data[key]) return HEADER_DELIMITER.join(h_parts)
def to_header_str(self)
Convert to a tracing header string that can be injected to outgoing http request headers.
2.594834
2.519656
1.029837
if wildcard_match(self._pattern, host_name): return host_name else: return self._fallback
def get_name(self, host_name)
Returns the segment name based on the input host name.
6.340034
6.84939
0.925635
message = "%s%s%s" % (PROTOCOL_HEADER, PROTOCOL_DELIMITER, entity.serialize()) log.debug("sending: %s to %s:%s." % (message, self._ip, self._port)) self._send_data(message)
def send_entity(self, entity)
Serializes a segment/subsegment and sends it to the X-Ray daemon over UDP. By default it doesn't retry on failures. :param entity: a trace entity to send to the X-Ray daemon
4.907573
5.451824
0.900171
if address: daemon_config = DaemonConfig(address) self._ip, self._port = daemon_config.udp_ip, daemon_config.udp_port
def set_daemon_address(self, address)
Set up UDP ip and port from the raw daemon address string using ``DaemonConfig`` class utlities.
5.066297
3.161166
1.602667
if pattern is None or text is None: return False pattern_len = len(pattern) text_len = len(text) if pattern_len == 0: return text_len == 0 # Check the special case of a single * pattern, as it's common if pattern == '*': return True if case_insensitive: pattern = pattern.lower() text = text.lower() # Infix globs are relatively rare, and the below search is expensive. # Check for infix globs and, in their absence, do the simple thing. if '*' not in pattern or pattern.index('*') == len(pattern) - 1: return _simple_wildcard_match(pattern, text) # The res[i] is used to record if there is a match between # the first i chars in text and the first j chars in pattern. # So will return res[textLength+1] in the end # Loop from the beginning of the pattern # case not '*': if text[i]==pattern[j] or pattern[j] is '?', # and res[i] is true, set res[i+1] to true, otherwise false. # case '*': since '*' can match any globing, as long as there is a true # in res before i, all the res[i+1], res[i+2],...,res[textLength] # could be true res = [None] * (text_len + 1) res[0] = True for j in range(0, pattern_len): p = pattern[j] if p != '*': for i in range(text_len - 1, -1, -1): res[i + 1] = res[i] and (p == '?' or (p == text[i])) else: i = 0 while i <= text_len and not res[i]: i += 1 for m in range(i, text_len + 1): res[m] = True res[0] = res[0] and (p == '*') return res[text_len]
def wildcard_match(pattern, text, case_insensitive=True)
Performs a case-insensitive wildcard match against two strings. This method works with pseduo-regex chars; specifically ? and * are supported. An asterisk (*) represents any combination of characters. A question mark (?) represents any single character. :param str pattern: the regex-like pattern to be compared against :param str text: the string to compare against the pattern :param boolean case_insensitive: dafault is True return whether the text matches the pattern
3.902402
3.882088
1.005233
candidates = [] for rule in all_rules: if rule.ever_matched() and rule.time_to_report(): candidates.append(rule) return candidates
def _get_candidates(self, all_rules)
Don't report a rule statistics if any of the conditions is met: 1. The report time hasn't come(some rules might have larger report intervals). 2. The rule is never matched.
5.595045
3.386732
1.652048
if hasattr(aiobotocore.client, '_xray_enabled'): return setattr(aiobotocore.client, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'aiobotocore.client', 'AioBaseClient._make_api_call', _xray_traced_aiobotocore, ) wrapt.wrap_function_wrapper( 'aiobotocore.endpoint', 'AioEndpoint.prepare_request', inject_header, )
def patch()
Patch aiobotocore client so it generates subsegments when calling AWS services.
3.131427
2.800329
1.118235
if not plugins: raise MissingPluginNames("input plugin names are required") modules = [] for plugin in plugins: short_name = PLUGIN_MAPPING.get(plugin.lower(), plugin.lower()) full_path = '%s%s' % (module_prefix, short_name) modules.append(importlib.import_module(full_path)) return tuple(modules)
def get_plugin_modules(plugins)
Get plugin modules from input strings :param tuple plugins: a tuple of plugin names in str
4.05773
3.893077
1.042294
if not segment or not segment.sampled: return False return segment.get_total_subsegments_size() > self.streaming_threshold
def is_eligible(self, segment)
A segment is eligible to have its children subsegments streamed if it is sampled and it breaches streaming threshold.
13.515385
4.869309
2.775627
with self._lock: self._stream(entity, callback)
def stream(self, entity, callback)
Stream out all eligible children of the input entity. :param entity: The target entity to be streamed. :param callback: The function that takes the node and actually send it out.
6.110276
9.429703
0.647982
if isinstance(bind, Connection): engine = bind.engine else: engine = bind m = re.match(r"Engine\((.*?)\)", str(engine)) if m is not None: u = urlparse(m.group(1)) # Add Scheme to uses_netloc or // will be missing from url. uses_netloc.append(u.scheme) safe_url = "" if u.password is None: safe_url = u.geturl() else: # Strip password from URL host_info = u.netloc.rpartition('@')[-1] parts = u._replace(netloc='{}@{}'.format(u.username, host_info)) safe_url = parts.geturl() sql = {} sql['database_type'] = u.scheme sql['url'] = safe_url if u.username is not None: sql['user'] = "{}".format(u.username) return sql
def parse_bind(bind)
Parses a connection string and creates SQL trace metadata
4.355972
4.233685
1.028884
super(Subsegment, self).add_subsegment(subsegment) self.parent_segment.increment()
def add_subsegment(self, subsegment)
Add input subsegment as a child subsegment and increment reference counter and total subsegments counter of the parent segment.
6.299418
3.998648
1.575387
super(Subsegment, self).remove_subsegment(subsegment) self.parent_segment.decrement_subsegments_size()
def remove_subsegment(self, subsegment)
Remove input subsegment from child subsegemnts and decrement parent segment total subsegments count. :param Subsegment: subsegment to remove.
5.653508
5.274222
1.071913
super(Subsegment, self).close(end_time) self.parent_segment.decrement_ref_counter()
def close(self, end_time=None)
Close the trace entity by setting `end_time` and flip the in progress flag to False. Also decrement parent segment's ref counter by 1. :param int end_time: Epoch in seconds. If not specified current time will be used.
9.622422
5.695611
1.689445
def wrapper(self, *args, **kargs): if type(self.context).__name__ == 'AsyncContext': return func(self, *args, **kargs) segment = DummySegment() self.context.set_trace_entity(segment) result = func(self, *args, **kargs) self.context.clear_trace_entities() return result return wrapper
def _context_wrapped(func)
Wrapping boto calls with dummy segment. This is because botocore has two dependencies (requests and httplib) that might be monkey-patched in user code to capture subsegments. The wrapper makes sure there is always a non-sampled segment present when the connector makes an AWS API call using botocore. This context wrapper doesn't work with asyncio based context as event loop is not thread-safe.
3.578621
2.820987
1.26857
new_rules = [] resp = self._xray_client.get_sampling_rules() records = resp['SamplingRuleRecords'] for record in records: rule_def = record['SamplingRule'] if self._is_rule_valid(rule_def): rule = SamplingRule(name=rule_def['RuleName'], priority=rule_def['Priority'], rate=rule_def['FixedRate'], reservoir_size=rule_def['ReservoirSize'], host=rule_def['Host'], service=rule_def['ServiceName'], method=rule_def['HTTPMethod'], path=rule_def['URLPath'], service_type=rule_def['ServiceType']) new_rules.append(rule) return new_rules
def fetch_sampling_rules(self)
Use X-Ray botocore client to get the centralized sampling rules from X-Ray service. The call is proxied and signed by X-Ray Daemon.
2.778486
2.363462
1.1756
now = int(time.time()) report_docs = self._generate_reporting_docs(rules, now) resp = self._xray_client.get_sampling_targets( SamplingStatisticsDocuments=report_docs ) new_docs = resp['SamplingTargetDocuments'] targets_mapping = {} for doc in new_docs: TTL = self._dt_to_epoch(doc['ReservoirQuotaTTL']) if doc.get('ReservoirQuotaTTL', None) else None target = { 'rate': doc['FixedRate'], 'quota': doc.get('ReservoirQuota', None), 'TTL': TTL, 'interval': doc.get('Interval', None), } targets_mapping[doc['RuleName']] = target return targets_mapping, self._dt_to_epoch(resp['LastRuleModification'])
def fetch_sampling_target(self, rules)
Report the current statistics of sampling rules and get back the new assgiend quota/TTL froom the X-Ray service. The call is proxied and signed via X-Ray Daemon.
5.02116
4.283933
1.172091
if not client: client = self._create_xray_client(ip, port) self._xray_client = client
def setup_xray_client(self, ip, port, client)
Setup the xray client based on ip and port. If a preset client is specified, ip and port will be ignored.
3.00372
2.978892
1.008335
if PY2: # The input datetime is from botocore unmarshalling and it is # offset-aware so the timedelta of subtracting this time # to 01/01/1970 using the same tzinfo gives us # Unix Time (also known as POSIX Time). time_delta = dt - datetime(1970, 1, 1).replace(tzinfo=dt.tzinfo) return int(time_delta.total_seconds()) else: # Added in python 3.3+ and directly returns POSIX time. return int(dt.timestamp())
def _dt_to_epoch(self, dt)
Convert a offset-aware datetime to POSIX time.
7.733725
6.935596
1.115077
def _trace_config_ctx_factory(trace_request_ctx): return SimpleNamespace( name=name, trace_request_ctx=trace_request_ctx ) trace_config = aiohttp.TraceConfig(trace_config_ctx_factory=_trace_config_ctx_factory) trace_config.on_request_start.append(begin_subsegment) trace_config.on_request_end.append(end_subsegment) trace_config.on_request_exception.append(end_subsegment_with_exception) return trace_config
def aws_xray_trace_config(name=None)
:param name: name used to identify the subsegment, with None internally the URL will be used as identifier. :returns: TraceConfig.
2.751017
2.631029
1.045605
global runtime_context try: runtime_context = {} r = urlopen('http://169.254.169.254/latest/meta-data/instance-id', timeout=1) runtime_context['instance_id'] = r.read().decode('utf-8') r = urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone', timeout=1) runtime_context['availability_zone'] = r.read().decode('utf-8') except Exception: runtime_context = None log.warning("failed to get ec2 instance metadata.")
def initialize()
Try to get EC2 instance-id and AZ if running on EC2 by querying http://169.254.169.254/latest/meta-data/. If not continue.
2.137408
2.001571
1.067865
if limit is not None and limit == 0: # Nothing to return. This is consistent with the behavior of the # functions in the `traceback` module. return [] stack = traceback.extract_stack() # Remove this `get_stacktrace()` function call from the stack info. # For what we want to report, this is superfluous information and arguably # adds garbage to the report. # Also drop the `traceback.extract_stack()` call above from the returned # stack info, since this is also superfluous. stack = stack[:-2] _exc_type, _exc, exc_traceback = sys.exc_info() if exc_traceback is not None: # If and only if there is a currently triggered exception, combine the # exception traceback information with the current stack state to get a # complete trace. exc_stack = traceback.extract_tb(exc_traceback) stack += exc_stack # Limit the stack trace size, if a limit was specified: if limit is not None: # Copy the behavior of `traceback` functions with a `limit` argument. # See https://docs.python.org/3/library/traceback.html. if limit > 0: # limit > 0: include the last `limit` items stack = stack[-limit:] else: # limit < 0: include the first `abs(limit)` items stack = stack[:abs(limit)] return stack
def get_stacktrace(limit=None)
Get a full stacktrace for the current state of execution. Include the current state of the stack, minus this function. If there is an active exception, include the stacktrace information from the exception as well. :param int limit: Optionally limit stack trace size results. This parmaeters has the same meaning as the `limit` parameter in `traceback.print_stack`. :returns: List of stack trace objects, in the same form as `traceback.extract_stack`.
4.598826
4.397521
1.045777
if sampling_req is None: return self._should_trace(self._default_rule) host = sampling_req.get('host', None) method = sampling_req.get('method', None) path = sampling_req.get('path', None) for rule in self._rules: if rule.applies(host, method, path): return self._should_trace(rule) return self._should_trace(self._default_rule)
def should_trace(self, sampling_req=None)
Return True if the sampler decide to sample based on input information and sampling rules. It will first check if any custom rule should be applied, if not it falls back to the default sampling rule. All optional arugments are extracted from incoming requests by X-Ray middleware to perform path based sampling.
2.209301
2.022868
1.092163
if self.in_lambda_ctx: segment = xray_recorder.current_subsegment() else: segment = xray_recorder.current_segment() segment.put_http_meta(http.STATUS, 500) stack = stacktrace.get_stacktrace(limit=xray_recorder._max_trace_back) segment.add_exception(exception, stack)
def process_exception(self, request, exception)
Add exception information and fault flag to the current segment.
4.520859
3.985353
1.134368
with self._lock: now = int(time.time()) if now != self.this_sec: self.used_this_sec = 0 self.this_sec = now if self.used_this_sec >= self.traces_per_sec: return False self.used_this_sec = self.used_this_sec + 1 return True
def take(self)
Returns True if there are segments left within the current second, otherwise return False.
3.506823
3.102356
1.130374
tid = id(threading.current_thread()) conn = _conn_holder.get(tid) if not conn: with(_rlock): # No other thread would insert a value in our slot, so no need # to recheck existence inside the lock. if 'project_endpoint' not in _options and 'project_id' not in _options: _options['project_endpoint'] = helper.get_project_endpoint_from_env() if 'credentials' not in _options: _options['credentials'] = helper.get_credentials_from_env() # We still need the lock when caching the thread local connection so we # don't race with _conn_holder.clear() in set_options(). _conn_holder[tid] = conn = connection.Datastore(**_options) return conn
def get_default_connection()
Returns the default datastore connection. Defaults endpoint to helper.get_project_endpoint_from_env() and credentials to helper.get_credentials_from_env(). Use set_options to override defaults.
5.99009
4.677497
1.280619
req = datastore.RunQueryRequest() q = req.query set_kind(q, kind='Todo') add_property_orders(q, 'created') resp = datastore.run_query(req) todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results] return todos
def get_all(cls)
Query for all Todo items ordered by creation date. This method is eventually consistent to avoid the need for an extra index.
6.220217
4.861076
1.279597
req = datastore.BeginTransactionRequest() resp = datastore.begin_transaction(req) tx = resp.transaction req = datastore.RunQueryRequest() req.read_options.transaction = tx q = req.query set_kind(q, kind='Todo') add_projection(q, '__key__') set_composite_filter(q.filter, datastore.CompositeFilter.AND, set_property_filter( datastore.Filter(), 'done', datastore.PropertyFilter.EQUAL, True), set_property_filter( datastore.Filter(), '__key__', datastore.PropertyFilter.HAS_ANCESTOR, default_todo_list.key)) resp = datastore.run_query(req) req = datastore.CommitRequest() req.transaction = tx for result in resp.batch.entity_results: req.mutations.add().delete.CopyFrom(result.entity.key) resp = datastore.commit(req) return ''
def archive(cls)
Delete all Todo items that are done.
3.509199
3.309734
1.060266
req = datastore.CommitRequest() req.mode = datastore.CommitRequest.NON_TRANSACTIONAL req.mutations.add().upsert.CopyFrom(self.to_proto()) resp = datastore.commit(req) if not self.id: self.id = resp.mutation_results[0].key.path[-1].id return self
def save(self)
Update or insert a Todo item.
3.797815
3.348713
1.134112
if project_id in self._emulators: return self._emulators[project_id] emulator = self.Create(project_id) self._emulators[project_id] = emulator return emulator
def Get(self, project_id)
Returns an existing emulator instance for the provided project_id. If an emulator instance doesn't yet exist, it creates one. Args: project_id: project ID Returns: a DatastoreEmulator
2.762261
2.773504
0.995946
return DatastoreEmulator(self._emulator_cmd, self._working_directory, project_id, deadline, start_options)
def Create(self, project_id, start_options=None, deadline=10)
Creates an emulator instance. This method will wait for up to 'deadline' seconds for the emulator to start. Args: project_id: project ID start_options: a list of additional command-line options to pass to the emulator 'start' command deadline: number of seconds to wait for the datastore to respond Returns: a DatastoreEmulator Raises: IOError: if the emulator could not be started within the deadline
11.820178
8.316751
1.42125
start = time.time() sleep = 0.05 def Elapsed(): return time.time() - start while True: try: response, _ = self._http.request(self._host) if response.status == 200: logging.info('emulator responded after %f seconds', Elapsed()) return True except (socket.error, httplib.ResponseNotReady): pass if Elapsed() >= deadline: # Out of time; give up. return False else: time.sleep(sleep) sleep *= 2
def _WaitForStartup(self, deadline)
Waits for the emulator to start. Args: deadline: deadline in seconds Returns: True if the emulator responds within the deadline, False otherwise.
3.652428
3.454338
1.057345
headers = {'Content-length': '0'} response, _ = self._http.request('%s/reset' % self._host, method='POST', headers=headers) if response.status == 200: return True else: logging.warning('failed to clear emulator; response was: %s', response)
def Clear(self)
Clears all data from the emulator instance. Returns: True if the data was successfully cleared, False otherwise.
5.243671
3.979491
1.317674
if not self.__running: return logging.info('shutting down the emulator running at %s', self._host) headers = {'Content-length': '0'} response, _ = self._http.request('%s/shutdown' % self._host, method='POST', headers=headers) if response.status != 200: logging.warning('failed to shut down emulator; response: %s', response) self.__running = False # Delete temp files. shutil.rmtree(self._tmp_dir)
def Stop(self)
Stops the emulator instance.
4.121263
3.703185
1.112897
payload = req.SerializeToString() headers = { 'Content-Type': 'application/x-protobuf', 'Content-Length': str(len(payload)), 'X-Goog-Api-Format-Version': '2' } response, content = self._http.request( '%s:%s' % (self._url, method), method='POST', body=payload, headers=headers) if response.status != 200: raise _make_rpc_error(method, response, content) resp = resp_class() resp.ParseFromString(content) return resp
def _call_method(self, method, req, resp_class)
_call_method call the given RPC method over HTTP. It uses the given protobuf message request as the payload and returns the deserialized protobuf message response. Args: method: RPC method name to be called. req: protobuf message for the RPC request. resp_class: protobuf message class for the RPC response. Returns: Deserialized resp_class protobuf message instance. Raises: RPCError: The rpc method call failed.
2.296834
2.216814
1.036097
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV): logging.info('connecting without credentials because %s is set.', _DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV) return None if os.getenv(_DATASTORE_EMULATOR_HOST_ENV): logging.info('connecting without credentials because %s is set.', _DATASTORE_EMULATOR_HOST_ENV) return None if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV) and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)): with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f: key = f.read() credentials = client.SignedJwtAssertionCredentials( os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE) logging.info('connecting using private key file.') return credentials try: credentials = client.GoogleCredentials.get_application_default() credentials = credentials.create_scoped(SCOPE) logging.info('connecting using Google Application Default Credentials.') return credentials except client.ApplicationDefaultCredentialsError, e: logging.error('Unable to find any credentials to use. ' 'If you are running locally, make sure to set the ' '%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV) raise e
def get_credentials_from_env()
Get credentials from environment variables. Preference of credentials is: - No credentials if DATASTORE_EMULATOR_HOST is set. - Google APIs Signed JWT credentials based on DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE environments variables - Google Application Default https://developers.google.com/identity/protocols/application-default-credentials Returns: credentials or None.
2.360494
2.24062
1.053501
project_id = project_id or os.getenv(_DATASTORE_PROJECT_ID_ENV) if not project_id: raise ValueError('project_id was not provided. Either pass it in ' 'directly or set DATASTORE_PROJECT_ID.') # DATASTORE_HOST is deprecated. if os.getenv(_DATASTORE_HOST_ENV): logging.warning('Ignoring value of environment variable DATASTORE_HOST. ' 'To point datastore to a host running locally, use the ' 'environment variable DATASTORE_EMULATOR_HOST') url_override = os.getenv(_DATASTORE_URL_OVERRIDE_ENV) if url_override: return '%s/projects/%s' % (url_override, project_id) localhost = os.getenv(_DATASTORE_EMULATOR_HOST_ENV) if localhost: return ('http://%s/%s/projects/%s' % (localhost, API_VERSION, project_id)) host = host or GOOGLEAPIS_HOST return 'https://%s/%s/projects/%s' % (host, API_VERSION, project_id)
def get_project_endpoint_from_env(project_id=None, host=None)
Get Datastore project endpoint from environment variables. Args: project_id: The Cloud project, defaults to the environment variable DATASTORE_PROJECT_ID. host: The Cloud Datastore API host to use. Returns: the endpoint to use, for example https://datastore.googleapis.com/v1/projects/my-project Raises: ValueError: if the wrong environment variable was set or a project_id was not provided.
2.921142
2.873128
1.016711
for i in range(0, len(path_elements), 2): pair = path_elements[i:i+2] elem = key_proto.path.add() elem.kind = pair[0] if len(pair) == 1: return # incomplete key id_or_name = pair[1] if isinstance(id_or_name, (int, long)): elem.id = id_or_name elif isinstance(id_or_name, basestring): elem.name = id_or_name else: raise TypeError( 'Expected an integer id or string name as argument %d; ' 'received %r (a %s).' % (i + 2, id_or_name, type(id_or_name))) return key_proto
def add_key_path(key_proto, *path_elements)
Add path elements to the given datastore.Key proto message. Args: key_proto: datastore.Key proto message. *path_elements: list of ancestors to add to the key. (kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements represent the entity key, if no terminating id/name: they key will be an incomplete key. Raises: TypeError: the given id or name has the wrong type. Returns: the same datastore.Key. Usage: >>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name datastore.Key(...) >>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id datastore.Key(...) >>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete datastore.Key(...) >>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete datastore.Key(...)
2.428026
2.525833
0.961277
for name, value in property_dict.iteritems(): set_property(entity_proto.properties, name, value, exclude_from_indexes)
def add_properties(entity_proto, property_dict, exclude_from_indexes=None)
Add values to the given datastore.Entity proto message. Args: entity_proto: datastore.Entity proto message. property_dict: a dictionary from property name to either a python object or datastore.Value. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Usage: >>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]}) Raises: TypeError: if a given property value type is not supported.
3.058407
5.110655
0.598437
set_value(property_map[name], value, exclude_from_indexes)
def set_property(property_map, name, value, exclude_from_indexes=None)
Set property value in the given datastore.Property proto message. Args: property_map: a string->datastore.Value protobuf map. name: name of the property. value: python object or datastore.Value. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Usage: >>> set_property(property_proto, 'foo', u'a') Raises: TypeError: if the given value type is not supported.
5.385183
9.636455
0.558835