code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
value_proto.Clear() if isinstance(value, (list, tuple)): for sub_value in value: set_value(value_proto.array_value.values.add(), sub_value, exclude_from_indexes) return # do not set indexed for a list property. if isinstance(value, entity_pb2.Value): value_proto.MergeFrom(value) elif isinstance(value, unicode): value_proto.string_value = value elif isinstance(value, str): value_proto.blob_value = value elif isinstance(value, bool): value_proto.boolean_value = value elif isinstance(value, (int, long)): value_proto.integer_value = value elif isinstance(value, float): value_proto.double_value = value elif isinstance(value, datetime.datetime): to_timestamp(value, value_proto.timestamp_value) elif isinstance(value, entity_pb2.Key): value_proto.key_value.CopyFrom(value) elif isinstance(value, entity_pb2.Entity): value_proto.entity_value.CopyFrom(value) else: raise TypeError('value type: %r not supported' % (value,)) if exclude_from_indexes is not None: value_proto.exclude_from_indexes = exclude_from_indexes
def set_value(value_proto, value, exclude_from_indexes=None)
Set the corresponding datastore.Value _value field for the given arg. Args: value_proto: datastore.Value proto message. value: python object or datastore.Value. (unicode value will set a datastore string value, str value will set a blob string value). Undefined behavior if value is/contains value_proto. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Raises: TypeError: if the given value type is not supported.
1.917216
1.941218
0.987636
field = value_proto.WhichOneof('value_type') if field in __native_value_types: return getattr(value_proto, field) if field == 'timestamp_value': return from_timestamp(value_proto.timestamp_value) if field == 'array_value': return [get_value(sub_value) for sub_value in value_proto.array_value.values] return None
def get_value(value_proto)
Gets the python object equivalent for the given value proto. Args: value_proto: datastore.Value proto message. Returns: the corresponding python object value. timestamps are converted to datetime, and datastore.Value is returned for blob_key_value.
2.775925
3.13
0.886877
return dict((p.key, p.value) for p in entity_proto.property)
def get_property_dict(entity_proto)
Convert datastore.Entity to a dict of property name -> datastore.Value. Args: entity_proto: datastore.Entity proto message. Usage: >>> get_property_dict(entity_proto) {'foo': {string_value='a'}, 'bar': {integer_value=2}} Returns: dict of entity properties.
4.20826
10.438185
0.40316
del query_proto.kind[:] query_proto.kind.add().name = kind
def set_kind(query_proto, kind)
Set the kind constraint for the given datastore.Query proto message.
4.626729
5.423687
0.85306
for order in orders: proto = query_proto.order.add() if order[0] == '-': order = order[1:] proto.direction = query_pb2.PropertyOrder.DESCENDING else: proto.direction = query_pb2.PropertyOrder.ASCENDING proto.property.name = order
def add_property_orders(query_proto, *orders)
Add ordering constraint for the given datastore.Query proto message. Args: query_proto: datastore.Query proto message. orders: list of propertype name string, default to ascending order and set descending if prefixed by '-'. Usage: >>> add_property_orders(query_proto, 'foo') # sort by foo asc >>> add_property_orders(query_proto, '-bar') # sort by bar desc
2.11826
2.453463
0.863376
for p in projection: proto = query_proto.projection.add() proto.property.name = p
def add_projection(query_proto, *projection)
Add projection properties to the given datatstore.Query proto message.
4.213609
3.581075
1.176632
filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
def set_property_filter(filter_proto, name, op, value)
Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
2.904203
5.488219
0.52917
filter_proto.Clear() cf = filter_proto.composite_filter cf.op = op for f in filters: cf.filters.add().CopyFrom(f) return filter_proto
def set_composite_filter(filter_proto, op, *filters)
Set composite filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message op: datastore.CompositeFilter.Operation filters: vararg list of datastore.Filter Returns: the same datastore.Filter. Usage: >>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND, ... set_property_filter(datastore.Filter(), ...), ... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
2.859089
3.476175
0.822481
seconds = long(micros / _MICROS_PER_SECOND) micro_remainder = micros % _MICROS_PER_SECOND timestamp.seconds = seconds timestamp.nanos = micro_remainder * _NANOS_PER_MICRO
def micros_to_timestamp(micros, timestamp)
Convert microseconds from utc epoch to google.protobuf.timestamp. Args: micros: a long, number of microseconds since utc epoch. timestamp: a google.protobuf.timestamp.Timestamp to populate.
3.018973
3.177508
0.950107
if dt.tzinfo: # this is an "aware" datetime with an explicit timezone. Throw an error. raise TypeError('Cannot store a timezone aware datetime. ' 'Convert to UTC and store the naive datetime.') timestamp.seconds = calendar.timegm(dt.timetuple()) timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO
def to_timestamp(dt, timestamp)
Convert datetime to google.protobuf.Timestamp. Args: dt: a timezone naive datetime. timestamp: a google.protobuf.Timestamp to populate. Raises: TypeError: if a timezone aware datetime was provided.
5.968946
5.24809
1.137356
init_params = dict() fit_params = dict() produce_params = dict() for name, param in hyperparameters.get('fixed', dict()).items(): if name in kwargs: value = kwargs.pop(name) elif 'default' in param: value = param['default'] else: raise TypeError("{} required argument '{}' not found".format(self.name, name)) init_params[name] = value for name, param in hyperparameters.get('tunable', dict()).items(): if name in kwargs: init_params[name] = kwargs.pop(name) fit_args = [arg['name'] for arg in self.fit_args] produce_args = [arg['name'] for arg in self.produce_args] for name in list(kwargs.keys()): if name in fit_args: fit_params[name] = kwargs.pop(name) elif name in produce_args: produce_params[name] = kwargs.pop(name) if kwargs: error = "Unexpected hyperparameters '{}'".format(', '.join(kwargs.keys())) raise TypeError(error) return init_params, fit_params, produce_params
def _extract_params(self, kwargs, hyperparameters)
Extract init, fit and produce params from kwargs. The `init_params`, `fit_params` and `produce_params` are extracted from the passed `kwargs` taking the metadata hyperparameters as a reference. During this extraction, make sure that all the required hyperparameters have been given and that nothing unexpected exists in the input. Args: kwargs (dict): dict containing the Keyword arguments that have been passed to the `__init__` method upon initialization. hyperparameters (dict): hyperparameters dictionary, as found in the JSON annotation. Raises: TypeError: A `TypeError` is raised if a required argument is not found in the `kwargs` dict, or if an unexpected argument has been given.
2.069815
2.071754
0.999064
self._hyperparameters.update(hyperparameters) if self._class: LOGGER.debug('Creating a new primitive instance for %s', self.name) self.instance = self.primitive(**self._hyperparameters)
def set_hyperparameters(self, hyperparameters)
Set new hyperparameters. Only the specified hyperparameters are modified, so any other hyperparameter keeps the value that had been previously given. If necessary, a new instance of the primitive is created. Args: hyperparameters (dict): Dictionary containing as keys the name of the hyperparameters and as values the values to be used.
6.13718
5.957629
1.030138
if self.fit_method is not None: fit_args = self._fit_params.copy() fit_args.update(kwargs) getattr(self.instance, self.fit_method)(**fit_args)
def fit(self, **kwargs)
Call the fit method of the primitive. The given keyword arguments will be passed directly to the `fit` method of the primitive instance specified in the JSON annotation. If any of the arguments expected by the produce method had been given during the MLBlock initialization, they will be passed as well. If the fit method was not specified in the JSON annotation, or if the primitive is a simple function, this will be a noop. Args: **kwargs: Any given keyword argument will be directly passed to the primitive fit method. Raises: TypeError: A `TypeError` might be raised if any argument not expected by the primitive fit method is given.
3.192302
3.34518
0.954299
produce_args = self._produce_params.copy() produce_args.update(kwargs) if self._class: return getattr(self.instance, self.produce_method)(**produce_args) produce_args.update(self._hyperparameters) return self.primitive(**produce_args)
def produce(self, **kwargs)
Call the primitive function, or the predict method of the primitive. The given keyword arguments will be passed directly to the primitive, if it is a simple function, or to the `produce` method of the primitive instance specified in the JSON annotation, if it is a class. If any of the arguments expected by the fit method had been given during the MLBlock initialization, they will be passed as well. Returns: The output of the call to the primitive function or primitive produce method.
4.813023
4.667489
1.03118
hyperparameters = {} for block_name, block in self.blocks.items(): hyperparameters[block_name] = block.get_hyperparameters() return hyperparameters
def get_hyperparameters(self)
Get the current hyperparamters of each block. Returns: dict: A dictionary containing the block names as keys and the current block hyperparameters dictionary as values.
2.884434
2.853836
1.010722
for block_name, block_hyperparams in hyperparameters.items(): self.blocks[block_name].set_hyperparameters(block_hyperparams)
def set_hyperparameters(self, hyperparameters)
Set new hyperparameter values for some blocks. Args: hyperparameters (dict): A dictionary containing the block names as keys and the new hyperparameters dictionary as values.
2.966812
2.739384
1.083022
context = { 'X': X, 'y': y } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Fitting block %s", block_name) try: fit_args = self._get_block_args(block_name, block.fit_args, context) block.fit(**fit_args) except Exception: LOGGER.exception("Exception caught fitting MLBlock %s", block_name) raise if block_name != last_block_name: LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise
def fit(self, X=None, y=None, **kwargs)
Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks.
2.208714
2.160726
1.022209
context = { 'X': X } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) if block_name != last_block_name: output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise return outputs
def predict(self, X=None, **kwargs)
Produce predictions using the blocks of this pipeline. Sequentially call the `produce` method of each block, capturing the outputs before calling the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `produce` calls will be taken. Args: X: Data which the pipeline will use to make predictions. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks.
3.171851
3.108792
1.020284
return { 'primitives': self.primitives, 'init_params': self.init_params, 'input_names': self.input_names, 'output_names': self.output_names, 'hyperparameters': self.get_hyperparameters(), 'tunable_hyperparameters': self._tunable_hyperparameters }
def to_dict(self)
Return all the details of this MLPipeline in a dict. The dict structure contains all the `__init__` arguments of the MLPipeline, as well as the current hyperparameter values and the specification of the tunable_hyperparameters:: { "primitives": [ "a_primitive", "another_primitive" ], "init_params": { "a_primitive": { "an_argument": "a_value" } }, "hyperparameters": { "a_primitive#1": { "an_argument": "a_value", "another_argument": "another_value", }, "another_primitive#1": { "yet_another_argument": "yet_another_value" } }, "tunable_hyperparameters": { "another_primitive#1": { "yet_another_argument": { "type": "str", "default": "a_default_value", "values": [ "a_default_value", "yet_another_value" ] } } } }
2.922543
2.00847
1.455109
with open(path, 'w') as out_file: json.dump(self.to_dict(), out_file, indent=4)
def save(self, path)
Save the specification of this MLPipeline in a JSON file. The content of the JSON file is the dict returned by the `to_dict` method. Args: path (str): Path to the JSON file to write.
2.499715
2.683434
0.931536
hyperparameters = metadata.get('hyperparameters') tunable = metadata.get('tunable_hyperparameters') pipeline = cls( metadata['primitives'], metadata.get('init_params'), metadata.get('input_names'), metadata.get('output_names'), ) if hyperparameters: pipeline.set_hyperparameters(hyperparameters) if tunable is not None: pipeline._tunable_hyperparameters = tunable return pipeline
def from_dict(cls, metadata)
Create a new MLPipeline from a dict specification. The dict structure is the same as the one created by the `to_dict` method. Args: metadata (dict): Dictionary containing the pipeline specification. Returns: MLPipeline: A new MLPipeline instance with the details found in the given specification dictionary.
3.240684
3.307728
0.979731
with open(path, 'r') as in_file: metadata = json.load(in_file) return cls.from_dict(metadata)
def load(cls, path)
Create a new MLPipeline from a JSON specification. The JSON file format is the same as the one created by the `to_dict` method. Args: path (str): Path of the JSON file to load. Returns: MLPipeline: A new MLPipeline instance with the specification found in the JSON file.
3.095593
4.106612
0.753807
if path not in _PRIMITIVES_PATHS: if not os.path.isdir(path): raise ValueError('Invalid path: {}'.format(path)) LOGGER.debug('Adding new primitives path %s', path) _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))
def add_primitives_path(path)
Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before. Args: path (str): path to add Raises: ValueError: A `ValueError` will be raised if the path is not valid.
2.881899
2.968743
0.970747
primitives_paths = list() entry_points = pkg_resources.iter_entry_points('mlprimitives') for entry_point in entry_points: if entry_point.name == 'jsons_path': path = entry_point.load() primitives_paths.append(path) return _PRIMITIVES_PATHS + primitives_paths
def get_primitives_paths()
Get the list of folders where the primitives will be looked for. This list will include the value of any `entry_point` named `jsons_path` published under the name `mlprimitives`. An example of such an entry point would be:: entry_points = { 'mlprimitives': [ 'jsons_path=some_module:SOME_VARIABLE' ] } where the module `some_module` contains a variable such as:: SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons') Returns: list: The list of folders.
3.548731
2.776801
1.277992
for base_path in get_primitives_paths(): parts = name.split('.') number_of_parts = len(parts) for folder_parts in range(number_of_parts): folder = os.path.join(base_path, *parts[:folder_parts]) filename = '.'.join(parts[folder_parts:]) + '.json' json_path = os.path.join(folder, filename) if os.path.isfile(json_path): with open(json_path, 'r') as json_file: LOGGER.debug('Loading primitive %s from %s', name, json_path) return json.load(json_file) raise ValueError("Unknown primitive: {}".format(name))
def load_primitive(name)
Locate and load the JSON annotation of the given primitive. All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file with the given name, and as soon as a JSON with the given name is found it is returned. Args: name (str): name of the primitive to look for. The name should correspond to the primitive, not to the filename, as the `.json` extension will be added dynamically. Returns: dict: The content of the JSON annotation file loaded into a dict. Raises: ValueError: A `ValueError` will be raised if the primitive cannot be found.
2.348722
2.273463
1.033103
dataset_path = _load('usps') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.label.values return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True)
def load_usps()
USPs Digits Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 9298 224x224 RGB photos of handwritten digits, and the target is a 1d numpy integer array containing the label of the digit represented in the image.
5.107259
6.120235
0.834487
dataset_path = _load('handgeometry') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.target.values return Dataset(load_handgeometry.__doc__, X, y, r2_score)
def load_handgeometry()
Hand Geometry Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 112 224x224 RGB photos of hands, and the target is a 1d numpy float array containing the width of the wrist in centimeters.
5.508379
5.796592
0.950279
dataset_path = _load('personae') X = _load_csv(dataset_path, 'data') y = X.pop('label').values return Dataset(load_personae.__doc__, X, y, accuracy_score, stratify=True)
def load_personae()
Personae Dataset. The data of this dataset is a 2d numpy array vector containing 145 entries that include texts written by Dutch users in Twitter, with some additional information about the author, and the target is a 1d numpy binary integer array indicating whether the author was extrovert or not.
8.684189
10.742241
0.808415
dataset_path = _load('umls') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml'))) return Dataset(load_umls.__doc__, X, y, accuracy_score, stratify=True, graph=graph)
def load_umls()
UMLs Dataset. The data consists of information about a 135 Graph and the relations between their nodes given as a DataFrame with three columns, source, target and type, indicating which nodes are related and with which type of link. The target is a 1d numpy binary integer array indicating whether the indicated link exists or not.
5.938552
6.303275
0.942138
dataset_path = _load('dic28') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml'))) graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml'))) graph = graph1.copy() graph.add_nodes_from(graph2.nodes(data=True)) graph.add_edges_from(graph2.edges) graph.add_edges_from(X[['graph1', 'graph2']].values) graphs = { 'graph1': graph1, 'graph2': graph2, } return Dataset(load_dic28.__doc__, X, y, accuracy_score, stratify=True, graph=graph, graphs=graphs)
def load_dic28()
DIC28 Dataset from Pajek. This network represents connections among English words in a dictionary. It was generated from Knuth's dictionary. Two words are connected by an edge if we can reach one from the other by - changing a single character (e. g., work - word) - adding / removing a single character (e. g., ever - fever). There exist 52,652 words (vertices in a network) having 2 up to 8 characters in the dictionary. The obtained network has 89038 edges.
2.895092
2.988257
0.968823
dataset_path = _load('amazon') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml'))) return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph)
def load_amazon()
Amazon product co-purchasing network and ground-truth communities. Network was collected by crawling Amazon website. It is based on Customers Who Bought This Item Also Bought feature of the Amazon website. If a product i is frequently co-purchased with product j, the graph contains an undirected edge from i to j. Each product category provided by Amazon defines each ground-truth community.
5.889113
6.654912
0.884927
dataset_path = _load('jester') X = _load_csv(dataset_path, 'data') y = X.pop('rating').values return Dataset(load_jester.__doc__, X, y, r2_score)
def load_jester()
Ratings from the Jester Online Joke Recommender System. This dataset consists of over 1.7 million instances of (user_id, item_id, rating) triples, which is split 50-50 into train and test data. source: "University of California Berkeley, CA" sourceURI: "http://eigentaste.berkeley.edu/dataset/"
8.803698
9.366721
0.939891
# noqa dataset_path = _load('wikiqa') data = _load_csv(dataset_path, 'data', set_index=True) questions = _load_csv(dataset_path, 'questions', set_index=True) sentences = _load_csv(dataset_path, 'sentences', set_index=True) vocabulary = _load_csv(dataset_path, 'vocabulary', set_index=True) entities = { 'data': (data, 'd3mIndex', None), 'questions': (questions, 'qIndex', None), 'sentences': (sentences, 'sIndex', None), 'vocabulary': (vocabulary, 'index', None) } relationships = [ ('questions', 'qIndex', 'data', 'qIndex'), ('sentences', 'sIndex', 'data', 'sIndex') ] target = data.pop('isAnswer').values return Dataset(load_wikiqa.__doc__, data, target, accuracy_score, startify=True, entities=entities, relationships=relationships)
def load_wikiqa()
A Challenge Dataset for Open-Domain Question Answering. WikiQA dataset is a publicly available set of question and sentence (QS) pairs, collected and annotated for research on open-domain question answering. source: "Microsoft" sourceURI: "https://www.microsoft.com/en-us/research/publication/wikiqa-a-challenge-dataset-for-open-domain-question-answering/#"
3.432835
3.512619
0.977286
dataset = datasets.fetch_20newsgroups() return Dataset(load_newsgroups.__doc__, np.array(dataset.data), dataset.target, accuracy_score, stratify=True)
def load_newsgroups()
20 News Groups Dataset. The data of this dataset is a 1d numpy array vector containing the texts from 11314 newsgroups posts, and the target is a 1d numpy integer array containing the label of one of the 20 topics that they are about.
6.824507
8.255876
0.826624
dataset = datasets.load_iris() return Dataset(load_iris.__doc__, dataset.data, dataset.target, accuracy_score, stratify=True)
def load_iris()
Iris Dataset.
6.598617
6.008321
1.098246
dataset = datasets.load_boston() return Dataset(load_boston.__doc__, dataset.data, dataset.target, r2_score)
def load_boston()
Boston House Prices Dataset.
5.248477
5.150688
1.018986
if n_splits == 1: stratify = self.target if self._stratify else None return train_test_split( self.data, self.target, shuffle=self._shuffle, stratify=stratify ) else: cv_class = StratifiedKFold if self._stratify else KFold cv = cv_class(n_splits=n_splits, shuffle=self._shuffle) splits = list() for train, test in cv.split(self.data, self.target): X_train = self._get_split(self.data, train) y_train = self._get_split(self.target, train) X_test = self._get_split(self.data, test) y_test = self._get_split(self.target, test) splits.append((X_train, X_test, y_train, y_test)) return splits
def get_splits(self, n_splits=1)
Return splits of this dataset ready for Cross Validation. If n_splits is 1, a tuple containing the X for train and test and the y for train and test is returned. Otherwise, if n_splits is bigger than 1, a list of such tuples is returned, one for each split. Args: n_splits (int): Number of times that the data needs to be splitted. Returns: tuple or list: if n_splits is 1, a tuple containing the X for train and test and the y for train and test is returned. Otherwise, if n_splits is bigger than 1, a list of such tuples is returned, one for each split.
1.772042
1.82608
0.970407
tr = TrackedRequest.instance() tr.tag(key, value)
def add(key, value)
Adds context to the currently executing request. :key: Any String identifying the request context. Example: "user_ip", "plan", "alert_count" :value: Any json-serializable type. Example: "1.1.1.1", "free", 100 :returns: nothing.
22.648445
26.551863
0.852989
system_name = platform.system() if system_name == "Linux": libc = cls.libc() return "unknown-linux-{libc}".format(libc=libc) elif system_name == "Darwin": return "apple-darwin" else: return "unknown"
def platform(cls)
What Operating System (and sub-system like glibc / musl)
3.663481
3.324401
1.101997
try: output = subprocess.check_output( ["ldd", "--version"], stderr=subprocess.STDOUT ) except (OSError, subprocess.CalledProcessError): return "gnu" else: if b"musl" in output: return "musl" else: return "gnu"
def libc(cls)
Alpine linux uses a non glibc version of the standard library, it uses the stripped down musl instead. The core agent can be built against it, but which one is running must be detected. Shelling out to `ldd` appears to be the most reliable way to do this.
2.988533
2.226736
1.342114
try: self._connect() self._register() while True: try: body = self.command_queue.get(block=True, timeout=1 * SECOND) except queue.Empty: body = None if body is not None: result = self._send(body) if result: self.command_queue.task_done() else: # Something was wrong with the socket. self._disconnect() self._connect() self._register() # Check for stop event after a read from the queue. This is to # allow you to open a socket, immediately send to it, and then # stop it. We do this in the Metadata send at application start # time if self._stop_event.is_set(): logger.debug("CoreAgentSocket thread stopping.") break except Exception: logger.debug("CoreAgentSocket thread exception.") finally: self._started_event.clear() self._stop_event.clear() self._stopped_event.set() logger.debug("CoreAgentSocket thread stopped.")
def run(self)
Called by the threading system
4.708503
4.543182
1.036389
global SCOUT_PYTHON_VALUES for key, value in kwargs.items(): SCOUT_PYTHON_VALUES[key] = value
def set(cls, **kwargs)
Sets a configuration value for the Scout agent. Values set here will not override values set in ENV.
7.215427
5.431571
1.328424
if isinstance(value, text_type): return value elif isinstance(value, bytes): return text_type(value, encoding, errors) else: return text_type(value)
def text(value, encoding="utf-8", errors="strict")
Convert a value to str on Python 3 and unicode on Python 2.
2.10601
1.882515
1.118721
@monkeypatch_method(BaseDatabaseWrapper) def cursor(original, self, *args, **kwargs): result = original(*args, **kwargs) return _DetailedTracingCursorWrapper(result, self) logger.debug("Monkey patched SQL")
def install()
Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor` method of BaseDatabaseWrapper, to return a wrapper that instruments any calls going through it.
9.401546
5.687968
1.652883
# A single address, set by this server, returned as an Array remote_addr = cls.ips_from(headers.get("REMOTE_ADDR")) # Could be a CSV list and/or repeated headers that were concatenated. forwarded_ips = cls.ips_from(headers.get("HTTP_X_FORWARDED_FOR")) client_ips = cls.ips_from(headers.get("HTTP_CLIENT_IP")) # We assume these things about the IP headers: # # - X-Forwarded-For will be a list of IPs, one per proxy, or blank. # in order: `client,proxy1,proxy2` # - Client-Ip is propagated from the outermost proxy, or is blank # - REMOTE_ADDR will be the IP that made the request to this server # # X-Forwarded-For and Client-Ip shouldn't be set at the same time, but # if they are, use the one in Forwarded ips = forwarded_ips + client_ips + remote_addr try: return ips[0] except IndexError: return None
def lookup_from_headers(cls, headers)
Given a dictionary of headers (WSGI request.META for instance), look up the most likely user's IP
6.558312
6.274439
1.045243
from django.conf import settings # If MIDDLEWARE is set, update that, with handling of tuple vs array forms if getattr(settings, "MIDDLEWARE", None) is not None: if isinstance(settings.MIDDLEWARE, tuple): settings.MIDDLEWARE = ( ("scout_apm.django.middleware.MiddlewareTimingMiddleware",) + settings.MIDDLEWARE + ("scout_apm.django.middleware.ViewTimingMiddleware",) ) else: settings.MIDDLEWARE.insert( 0, "scout_apm.django.middleware.MiddlewareTimingMiddleware" ) settings.MIDDLEWARE.append( "scout_apm.django.middleware.ViewTimingMiddleware" ) # Otherwise, we're doing old style middleware, do the same thing with # the same handling of tuple vs array forms else: if isinstance(settings.MIDDLEWARE_CLASSES, tuple): settings.MIDDLEWARE_CLASSES = ( ("scout_apm.django.middleware.OldStyleMiddlewareTimingMiddleware",) + settings.MIDDLEWARE_CLASSES + ("scout_apm.django.middleware.OldStyleViewMiddleware",) ) else: settings.MIDDLEWARE_CLASSES.insert( 0, "scout_apm.django.middleware.OldStyleMiddlewareTimingMiddleware" ) settings.MIDDLEWARE_CLASSES.append( "scout_apm.django.middleware.OldStyleViewMiddleware" )
def install_middleware(self)
Attempts to insert the ScoutApm middleware as the first middleware (first on incoming requests, last on outgoing responses).
2.263692
2.199758
1.029064
configs = {} configs["application_root"] = self.app.instance_path for name in current_app.config: if name.startswith("SCOUT_"): value = current_app.config[name] clean_name = name.replace("SCOUT_", "").lower() configs[clean_name] = value ScoutConfig.set(**configs)
def extract_flask_settings(self)
Copies SCOUT_* settings in the app into Scout's config lookup
3.812532
2.948016
1.293253
req = _request_ctx_stack.top.request app = current_app # Return flask's default options response. See issue #40 if req.method == "OPTIONS": return app.make_default_options_response() if req.routing_exception is not None: app.raise_routing_exception(req) # The routing rule has some handy attributes to extract how Flask found # this endpoint rule = req.url_rule # Wrap the real view_func view_func = self.wrap_view_func( app, rule, req, app.view_functions[rule.endpoint], req.view_args ) return view_func(**req.view_args)
def dispatch_request(self)
Modified version of Flask.dispatch_request to call process_view.
5.928931
5.536903
1.070803
operation = view_func.__module__ + "." + view_func.__name__ return self.trace_view_function( view_func, ("Controller", {"path": req.path, "name": operation}) )
def wrap_view_func(self, app, rule, req, view_func, view_kwargs)
This method is called just before the flask view is called. This is done by the dispatch_request method.
6.795243
6.916584
0.982457
try: if ignore_path(request.path): TrackedRequest.instance().tag("ignore_transaction", True) view_name = request.resolver_match._func_path span = TrackedRequest.instance().current_span() if span is not None: span.operation = "Controller/" + view_name Context.add("path", request.path) Context.add("user_ip", RemoteIp.lookup_from_headers(request.META)) if getattr(request, "user", None) is not None: Context.add("username", request.user.get_username()) except Exception: pass
def process_view(self, request, view_func, view_args, view_kwargs)
Capture details about the view_func that is about to execute
5.004458
4.932122
1.014666
counter = count() xml_config = DEFAULT_NETWORK_XML if not {'configuration', 'dynamic_address'} & set(configuration.keys()): raise RuntimeError( "Either configuration or dynamic_address must be specified") if 'configuration' in configuration: with open(configuration['configuration']) as xml_file: xml_config = xml_file.read() while True: if 'dynamic_address' in configuration: address = generate_address(hypervisor, configuration['dynamic_address']) xml_string = network_xml(identifier, xml_config, address=address) else: xml_string = network_xml(identifier, xml_config) try: return hypervisor.networkCreateXML(xml_string) except libvirt.libvirtError as error: if next(counter) > MAX_ATTEMPTS: raise RuntimeError( "Exceeded failed attempts ({}) to get IP address.".format( MAX_ATTEMPTS), "Last error: {}".format(error))
def create(hypervisor, identifier, configuration)
Creates a virtual network according to the given configuration. @param hypervisor: (libvirt.virConnect) connection to libvirt hypervisor. @param identifier: (str) UUID for the virtual network. @param configuration: (dict) network configuration. @return: (libvirt.virNetwork) virtual network.
3.644252
3.581446
1.017536
xml = domain.XMLDesc(0) element = etree.fromstring(xml) subelm = element.find('.//interface[@type="network"]') if subelm is not None: network = subelm.find('.//source').get('network') hypervisor = domain.connect() return hypervisor.networkLookupByName(network) return None
def lookup(domain)
Find the virNetwork object associated to the domain. If the domain has more than one network interface, the first one is returned. None is returned if the domain is not attached to any network.
4.596214
3.954392
1.162306
try: network.destroy() except libvirt.libvirtError as error: raise RuntimeError("Unable to destroy network: {}".format(error))
def delete(network)
libvirt network cleanup. @raise: libvirt.libvirtError.
4.258682
4.277369
0.995631
netname = identifier[:8] network = etree.fromstring(xml) subelement(network, './/name', 'name', identifier) subelement(network, './/uuid', 'uuid', identifier) subelement(network, './/bridge', 'bridge', None, name='virbr-%s' % netname) if address is not None: set_address(network, address) return etree.tostring(network).decode('utf-8')
def network_xml(identifier, xml, address=None)
Fills the XML file with the required fields. * name * uuid * bridge * ip ** dhcp
3.847864
4.116882
0.934655
if network.find('.//ip') is not None: raise RuntimeError("Address already specified in XML configuration.") netmask = str(address.netmask) ipv4 = str(address[1]) dhcp_start = str(address[2]) dhcp_end = str(address[-2]) ip = etree.SubElement(network, 'ip', address=ipv4, netmask=netmask) dhcp = etree.SubElement(ip, 'dhcp') etree.SubElement(dhcp, 'range', start=dhcp_start, end=dhcp_end)
def set_address(network, address)
Sets the given address to the network XML element. Libvirt bridge will have address and DHCP server configured according to the subnet prefix length.
3.438329
3.153723
1.090244
ipv4 = configuration['ipv4'] prefix = configuration['prefix'] subnet_prefix = configuration['subnet_prefix'] subnet_address = ipaddress.IPv4Network(u'/'.join((str(ipv4), str(prefix)))) net_address_pool = subnet_address.subnets(new_prefix=subnet_prefix) return address_lookup(hypervisor, net_address_pool)
def generate_address(hypervisor, configuration)
Generate a valid IP address according to the configuration.
3.606491
3.502601
1.029661
address_pool = set(address_pool) active_addresses = set(active_network_addresses(hypervisor)) try: return random.choice(tuple(address_pool - active_addresses)) except IndexError: raise RuntimeError("All IP addresses are in use")
def address_lookup(hypervisor, address_pool)
Retrieves a valid and available network IP address.
4.343728
4.057517
1.070538
active = [] for network in hypervisor.listNetworks(): try: xml = hypervisor.networkLookupByName(network).XMLDesc(0) except libvirt.libvirtError: # network has been destroyed meanwhile continue else: ip_element = etree.fromstring(xml).find('.//ip') address = ip_element.get('address') netmask = ip_element.get('netmask') active.append(ipaddress.IPv4Network(u'/'.join((address, netmask)), strict=False)) return active
def active_network_addresses(hypervisor)
Query libvirt for the already reserved addresses.
3.0749
2.833452
1.085213
for interface in interfaces.values(): if interface.get('hwaddr') == hwaddr: for address in interface.get('addrs'): if address.get('type') == address_type: return address.get('addr')
def interface_lookup(interfaces, hwaddr, address_type)
Search the address within the interface list.
2.485578
2.357925
1.054138
if self._mac_address is None: self._mac_address = self._get_mac_address() return self._mac_address
def mac_address(self)
Returns the MAC address of the network interface. If multiple interfaces are provided, the address of the first found is returned.
2.783409
2.928194
0.950555
if self._ip4_address is None and self.network is not None: self._ip4_address = self._get_ip_address( libvirt.VIR_IP_ADDR_TYPE_IPV4) return self._ip4_address
def ip4_address(self)
Returns the IPv4 address of the network interface. If multiple interfaces are provided, the address of the first found is returned.
3.443293
3.605504
0.95501
if self._ip6_address is None and self.network is not None: self._ip6_address = self._get_ip_address( libvirt.VIR_IP_ADDR_TYPE_IPV6) return self._ip6_address
def ip6_address(self)
Returns the IPv6 address of the network interface. If multiple interfaces are provided, the address of the first found is returned.
3.626551
3.7004
0.980043
self._assert_transition('shutdown') self.trigger('pre_shutdown', **kwargs) self._execute_command(self.domain.shutdown) self._wait_for_shutdown(timeout) self.trigger('post_shutdown', **kwargs)
def shutdown(self, timeout=None, **kwargs)
Shuts down the Context. Sends an ACPI request to the OS for a clean shutdown. Triggered events:: * pre_poweroff * post_poweroff .. note:: The Guest OS needs to support ACPI requests sent from the host, the completion of the operation is not ensured by the platform. If the Guest OS is still running after the given timeout, a RuntimeError will be raised. @param timeout: (int) amout of seconds to wait for the machine shutdown. @param kwargs: keyword arguments to pass altogether with the events.
4.756782
4.147872
1.146801
self._assert_transition(event) self.trigger('pre_%s' % event, **kwargs) self._execute_command(command, *args) self.trigger('post_%s' % event, **kwargs)
def _command(self, event, command, *args, **kwargs)
Context state controller. Check whether the transition is possible or not, it executes it and triggers the Hooks with the pre_* and post_* events. @param event: (str) event generated by the command. @param command: (virDomain.method) state transition to impose. @raise: RuntimeError.
4.247084
3.117357
1.362399
state = self.domain.state()[0] if event not in STATES_MAP[state]: raise RuntimeError("State transition %s not allowed" % event)
def _assert_transition(self, event)
Asserts the state transition validity.
9.218894
7.344261
1.255251
try: command(*args) except libvirt.libvirtError as error: raise RuntimeError("Unable to execute command. %s" % error)
def _execute_command(self, command, *args)
Execute the state transition command.
5.163534
4.600577
1.122366
create_folder(folder_path) name = snapshot.getName() path = os.path.join(folder_path, '%s.qcow2' % name) process = launch_process(QEMU_IMG, "convert", "-f", "qcow2", "-o", "backing_file=%s" % volume_backing_path(volume), "-O", "qcow2", "-s", name, volume_path(volume), path) collect_process_output(process) return path
def snapshot_to_checkpoint(volume, snapshot, folder_path)
Turns a QEMU internal snapshot into a QCOW file.
3.674767
3.490705
1.052729
with DiskComparator(disk0, disk1) as comparator: results = comparator.compare( size=configuration.get('get_file_size', False), identify=configuration.get('identify_files', False), concurrent=configuration.get('use_concurrency', False)) if configuration.get('extract_files', False): extract = results['created_files'] + results['modified_files'] files = comparator.extract(1, extract, path=configuration['results_folder']) results.update(files) if configuration.get('compare_registries', False): results['registry'] = comparator.compare_registry( concurrent=configuration.get('use_concurrency', False)) return results
def compare_disks(disk0, disk1, configuration)
Compares two disks according to the given configuration.
4.552958
4.566401
0.997056
results_path = os.path.join(self.configuration['results_folder'], "filesystem.json") self.logger.debug("Event %s: start comparing %s with %s.", event, self.checkpoints[0], self.checkpoints[1]) results = compare_disks(self.checkpoints[0], self.checkpoints[1], self.configuration) with open(results_path, 'w') as results_file: json.dump(results, results_file) self.processing_done.set()
def start_processing_handler(self, event)
Asynchronous handler starting the disk analysis process.
3.634585
3.335551
1.08965
module_name, class_name = str(fully_qualified_name).rsplit(".", 1) module = __import__(module_name, globals(), locals(), [class_name], 0) Class = getattr(module, class_name) if not inspect.isclass(Class): raise TypeError( "%s is not of type class: %s" % (class_name, type(Class))) return Class
def lookup_class(fully_qualified_name)
Given its fully qualified name, finds the desired class and imports it. Returns the Class object if found.
2.33329
2.438673
0.956787
handler = lambda _, buff, file_handler: file_handler.write(buff) string = BytesIO() stream = context.domain.connect().newStream(0) context.domain.screenshot(stream, 0, 0) stream.recvAll(handler, string) return string.getvalue()
def screenshot(context)
Takes a screenshot of the vnc connection of the guest. The resulting image file will be in Portable Pixmap format (PPM). @param context: (see.Context) context of the Environment. @return: (str) binary stream containing the screenshot.
8.422255
10.306993
0.81714
if not isinstance(event, Event): event = Event(event, source=source, **kwargs) return event
def prime_event(event, source, **kwargs)
Returns the event ready to be triggered. If the given event is a string an Event instance is generated from it.
3.674876
3.550336
1.035078
thread = Thread(target=synchronous, args=(function, event)) thread.daemon = True thread.start()
def asynchronous(function, event)
Runs the function asynchronously taking care of exceptions.
3.049858
3.337593
0.91379
try: function(event) except Exception as error: logger = get_function_logger(function) logger.exception(error)
def synchronous(function, event)
Runs the function synchronously taking care of exceptions.
3.962158
3.956533
1.001422
self._handlers.sync_handlers[event].append(handler)
def subscribe(self, event, handler)
Subscribes a Handler for the given Event. @param event: (str|see.Event) event to react to. @param handler: (callable) function or method to subscribe.
13.9158
20.627878
0.674611
self._handlers.async_handlers[event].append(handler)
def subscribe_async(self, event, handler)
Subscribes an asynchronous Handler for the given Event. An asynchronous handler is executed concurrently to the others without blocking the Events flow. @param event: (str|see.Event) event to react to. @param handler: (callable) function or method to subscribe.
10.54573
11.204544
0.941201
try: self._handlers.sync_handlers[event].remove(handler) except ValueError: self._handlers.async_handlers[event].remove(handler) else: try: self._handlers.async_handlers[event].remove(handler) except ValueError: pass
def unsubscribe(self, event, handler)
Unsubscribes the Handler from the given Event. Both synchronous and asynchronous handlers are removed. @param event: (str|see.Event) event to which the handler is subscribed. @param handler: (callable) function or method to unsubscribe.
2.667598
2.870424
0.92934
with self._handlers.trigger_mutex: event = prime_event(event, self.__class__.__name__, **kwargs) for handler in self._handlers.async_handlers[event]: asynchronous(handler, event) for handler in self._handlers.sync_handlers[event]: synchronous(handler, event)
def trigger(self, event, **kwargs)
Triggers an event. All subscribed handlers will be executed, asynchronous ones won't block this call. @param event: (str|see.Event) event intended to be raised.
5.166384
6.002249
0.860741
domain = etree.fromstring(xml) subelement(domain, './/name', 'name', identifier) subelement(domain, './/uuid', 'uuid', identifier) devices = subelement(domain, './/devices', 'devices', None) disk = subelement(devices, './/disk', 'disk', None, type='file', device='disk') subelement(disk, './/source', 'source', None, file=disk_path) return etree.tostring(domain).decode('utf-8')
def domain_xml(identifier, xml, disk_path)
Fills the XML file with the required fields. * name * uuid * devices
2.446355
2.683906
0.911491
with open(configuration['configuration']) as config_file: domain_config = config_file.read() xml = domain_xml(identifier, domain_config, disk_path) return hypervisor.defineXML(xml)
def domain_create(hypervisor, identifier, configuration, disk_path)
libvirt Domain definition. @raise: ConfigError, IOError, libvirt.libvirtError.
3.88526
4.031852
0.963642
if domain is not None: try: if domain.isActive(): domain.destroy() except libvirt.libvirtError: logger.exception("Unable to destroy the domain.") try: domain.undefine() except libvirt.libvirtError: try: domain.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA) # domain with snapshots except libvirt.libvirtError: logger.exception("Unable to undefine the domain.")
def domain_delete(domain, logger)
libvirt domain undefinition. @raise: libvirt.libvirtError.
2.951015
2.63922
1.118139
disk_path = self.provider_image self._hypervisor = libvirt.open( self.configuration.get('hypervisor', 'vbox:///session')) self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], disk_path)
def allocate(self)
Initializes libvirt resources.
10.969505
8.455391
1.297339
if self._domain is not None: domain_delete(self._domain, self.logger) if self._hypervisor is not None: try: self._hypervisor.close() except Exception: self.logger.exception("Unable to close hypervisor connection.")
def deallocate(self)
Releases all resources.
4.118324
3.780924
1.089238
if self._image is None: if isinstance(self.configuration['disk']['image'], dict): ProviderClass = lookup_provider_class( self.configuration['disk']['image']['provider']) self._image = ProviderClass( self.configuration['disk']['image']).image else: # If image is not a dictionary, return it as is for backwards # compatibility self._image = self.configuration['disk']['image'] return self._image
def provider_image(self)
Image path getter. This method uses a pluggable image provider to retrieve an image's path.
3.667223
3.639618
1.007585
logging.info("Executing %s command %s.", asynchronous and 'asynchronous' or 'synchronous', args) process = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: timeout = asynchronous and 1 or None output = process.communicate(timeout=timeout)[0].decode('utf8') except subprocess.TimeoutExpired: pass if asynchronous: return PopenOutput(None, 'Asynchronous call.') else: return PopenOutput(process.returncode, output)
def run_command(args, asynchronous=False)
Executes a command returning its exit code and output.
3.438476
3.466051
0.992044
logging.debug("New GET request.") query = parse_qs(urlparse(self.path).query) command = query['command'][0].split(' ') async = bool(int(query.get('async', [False])[0])) output = run_command(command, asynchronous=async) self.respond(output)
def do_GET(self)
Run simple command with parameters.
4.331907
3.968002
1.09171
logging.debug("New POST request.") query = parse_qs(urlparse(self.path).query) sample = query['sample'][0] async = bool(int(query.get('async', [False])[0])) path = self.store_file(mkdtemp(), sample) command = query['command'][0].format(sample=path).split(' ') output = run_command(command, asynchronous=async) self.respond(output)
def do_POST(self)
Upload a file and execute a command.
5.408769
4.962191
1.089996
response = {'exit_code': output.code, 'command_output': output.log} self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(bytes(json.dumps(response), "utf8"))
def respond(self, output)
Generates server response.
2.98984
2.88507
1.036314
path = os.path.join(folder, name) length = self.headers['content-length'] with open(path, 'wb') as sample: sample.write(self.rfile.read(int(length))) return path
def store_file(self, folder, name)
Stores the uploaded file in the given path.
3.149229
2.783839
1.131254
self.logger.debug("Event %s: starting Volatility process(es).", event) for snapshot in self.snapshots: self.process_snapshot(snapshot) self.processing_done.set()
def start_processing_handler(self, event)
Asynchronous handler starting the Volatility processes.
7.908901
4.753393
1.663843
pool = etree.fromstring(pool_xml) base_volume = etree.fromstring(base_volume_xml) pool_path = pool.find('.//path').text base_path = base_volume.find('.//target/path').text target_path = os.path.join(pool_path, '%s.qcow2' % identifier) volume_xml = VOLUME_DEFAULT_CONFIG.format(identifier, target_path) volume = etree.fromstring(volume_xml) base_volume_capacity = base_volume.find(".//capacity") volume.append(base_volume_capacity) if cow: backing_xml = BACKING_STORE_DEFAULT_CONFIG.format(base_path) backing_store = etree.fromstring(backing_xml) volume.append(backing_store) return etree.tostring(volume).decode('utf-8')
def disk_xml(identifier, pool_xml, base_volume_xml, cow)
Clones volume_xml updating the required fields. * name * target path * backingStore
2.355055
2.397985
0.982098
path = os.path.join(pool_path, identifier) if not os.path.exists(path): os.makedirs(path) xml = POOL_DEFAULT_CONFIG.format(identifier, path) return hypervisor.storagePoolCreateXML(xml, 0)
def pool_create(hypervisor, identifier, pool_path)
Storage pool creation. The following values are set in the XML configuration: * name * target/path * target/permission/label
3.337357
4.143159
0.80551
try: volume = hypervisor.storageVolLookupByPath(disk_path) return volume.storagePoolLookupByVolume() except libvirt.libvirtError: return None
def pool_lookup(hypervisor, disk_path)
Storage pool lookup. Retrieves the the virStoragepool which contains the disk at the given path.
3.936948
4.475796
0.879608
path = etree.fromstring(storage_pool.XMLDesc(0)).find('.//path').text volumes_delete(storage_pool, logger) try: storage_pool.destroy() except libvirt.libvirtError: logger.exception("Unable to delete storage pool.") try: if os.path.exists(path): shutil.rmtree(path) except EnvironmentError: logger.exception("Unable to delete storage pool folder.")
def pool_delete(storage_pool, logger)
Storage Pool deletion, removes all the created disk images within the pool and the pool itself.
3.103454
3.050797
1.01726
try: for vol_name in storage_pool.listVolumes(): try: vol = storage_pool.storageVolLookupByName(vol_name) vol.delete(0) except libvirt.libvirtError: logger.exception( "Unable to delete storage volume %s.", vol_name) except libvirt.libvirtError: logger.exception("Unable to delete storage volumes.")
def volumes_delete(storage_pool, logger)
Deletes all storage volume disks contained in the given storage pool.
2.372797
2.315449
1.024767
cow = configuration.get('copy_on_write', False) try: volume = hypervisor.storageVolLookupByPath(image) except libvirt.libvirtError: if os.path.exists(image): pool_path = os.path.dirname(image) logger.info("LibVirt pool does not exist, creating {} pool".format( pool_path.replace('/', '_'))) pool = hypervisor.storagePoolDefineXML(BASE_POOL_CONFIG.format( pool_path.replace('/', '_'), pool_path)) pool.setAutostart(True) pool.create() pool.refresh() volume = hypervisor.storageVolLookupByPath(image) else: raise RuntimeError( "%s disk does not exist." % image) xml = disk_xml(identifier, storage_pool.XMLDesc(0), volume.XMLDesc(0), cow) if cow: storage_pool.createXML(xml, 0) else: storage_pool.createXMLFrom(xml, volume, 0)
def disk_clone(hypervisor, identifier, storage_pool, configuration, image, logger)
Disk image cloning. Given an original disk image it clones it into a new one, the clone will be created within the storage pool. The following values are set into the disk XML configuration: * name * target/path * target/permission/label * backingStore/path if copy on write is enabled
3.189228
3.380327
0.943467
network_name = None self._hypervisor = libvirt.open( self.configuration.get('hypervisor', 'qemu:///system')) self._storage_pool = self._retrieve_pool() if 'network' in self.configuration: self._network = network.create(self._hypervisor, self.identifier, self.configuration['network']) network_name = self._network.name() disk_path = self._retrieve_disk_path() if self._storage_pool is not None: self._storage_pool.refresh() self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], disk_path, network_name=network_name) if self._network is None: self._network = network.lookup(self._domain)
def allocate(self)
Initializes libvirt resources.
3.429466
3.149791
1.088792
if self._domain is not None: domain_delete(self._domain, self.logger) if self._network is not None: self._network_delete() if self._storage_pool is not None: self._storage_pool_delete() if self._hypervisor is not None: self._hypervisor_delete()
def deallocate(self)
Releases all resources.
3.238744
3.069095
1.055277
disk_clone(self._hypervisor, self.identifier, self._storage_pool, configuration, self.provider_image, self.logger) disk_name = self._storage_pool.listVolumes()[0] return self._storage_pool.storageVolLookupByName(disk_name).path()
def _clone_disk(self, configuration)
Clones the disk and returns the path to the new disk.
6.305408
5.940499
1.061427
if isinstance(configuration, dict): return configuration else: with open(configuration) as configfile: return json.load(configfile)
def load_configuration(configuration)
Returns a dictionary, accepts a dictionary or a path to a JSON file.
3.003639
2.243208
1.338993