code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if old_labels is None:
old_labels = set(mapping)
if new_labels is None:
new_labels = set(itervalues(mapping))
# counter will be used to generate the intermediate labels, as an easy optimization
# we start the counter with a high number because often variables are labeled by
# integers starting from 0
counter = itertools.count(2 * len(mapping))
old_to_intermediate = {}
intermediate_to_new = {}
for old, new in iteritems(mapping):
if old == new:
# we can remove self-labels
continue
if old in new_labels or new in old_labels:
# try to get a new unique label
lbl = next(counter)
while lbl in new_labels or lbl in old_labels:
lbl = next(counter)
# add it to the mapping
old_to_intermediate[old] = lbl
intermediate_to_new[lbl] = new
else:
old_to_intermediate[old] = new
# don't need to add it to intermediate_to_new because it is a self-label
return old_to_intermediate, intermediate_to_new | def resolve_label_conflict(mapping, old_labels=None, new_labels=None) | Resolve a self-labeling conflict by creating an intermediate labeling.
Args:
mapping (dict):
A dict mapping the current variable labels to new ones.
old_labels (set, optional, default=None):
The keys of mapping. Can be passed in for performance reasons. These are not checked.
new_labels (set, optional, default=None):
The values of mapping. Can be passed in for performance reasons. These are not checked.
Returns:
tuple: A 2-tuple containing:
dict: A map from the keys of mapping to an intermediate labeling
dict: A map from the intermediate labeling to the values of mapping. | 3.371398 | 3.091483 | 1.090544 |
try:
from dimod.roof_duality._fix_variables import fix_variables_wrapper
except ImportError:
raise ImportError("c++ extension roof_duality is not built")
if sampling_mode:
method = 2 # roof-duality only
else:
method = 1 # roof-duality and strongly connected components
linear = bqm.linear
if all(v in linear for v in range(len(bqm))):
# we can work with the binary form of the bqm directly
fixed = fix_variables_wrapper(bqm.binary, method)
else:
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method)
fixed = {inverse_mapping[v]: val for v, val in fixed.items()}
if bqm.vartype is Vartype.SPIN:
return {v: 2*val - 1 for v, val in fixed.items()}
else:
return fixed | def fix_variables(bqm, sampling_mode=True) | Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225 | 3.786476 | 3.529959 | 1.072668 |
if _is_sampleset_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return SampleSet.from_serializable(obj)
elif _is_bqm_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return BinaryQuadraticModel.from_serializable(obj)
return obj | def dimod_object_hook(obj) | JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders. | 3.668188 | 4.271035 | 0.858852 |
if isinstance(label, list):
return tuple(_decode_label(v) for v in label)
return label | def _decode_label(label) | Convert a list label into a tuple. Works recursively on nested lists. | 3.627795 | 2.178777 | 1.66506 |
if isinstance(label, tuple):
return [_encode_label(v) for v in label]
return label | def _encode_label(label) | Convert a tuple label into a list. Works recursively on nested tuples. | 3.798506 | 2.267231 | 1.675394 |
multiplier, multiplicand, product, aux = variables
return BinaryQuadraticModel({multiplier: -.5,
multiplicand: -.5,
product: -.5,
aux: -1.},
{(multiplier, multiplicand): .5,
(multiplier, product): .5,
(multiplier, aux): 1.,
(multiplicand, product): .5,
(multiplicand, aux): 1.,
(product, aux): 1.},
2.,
Vartype.SPIN) | def _spin_product(variables) | Create a bqm with a gap of 2 that represents the product of two variables.
Note that spin-product requires an auxiliary variable.
Args:
variables (list):
multiplier, multiplicand, product, aux
Returns:
:obj:`.BinaryQuadraticModel` | 3.355827 | 2.659509 | 1.261822 |
multiplier, multiplicand, product = variables
return BinaryQuadraticModel({multiplier: 0.0,
multiplicand: 0.0,
product: 3.0},
{(multiplier, multiplicand): 1.0,
(multiplier, product): -2.0,
(multiplicand, product): -2.0},
0.0,
Vartype.BINARY) | def _binary_product(variables) | Create a bqm with a gap of 2 that represents the product of two variables.
Args:
variables (list):
multiplier, multiplicand, product
Returns:
:obj:`.BinaryQuadraticModel` | 3.574762 | 2.952704 | 1.210674 |
if bqm is None:
if vartype is None:
raise ValueError("one of vartype and bqm must be provided")
bqm = BinaryQuadraticModel.empty(vartype)
else:
if not isinstance(bqm, BinaryQuadraticModel):
raise TypeError('create_using must be a BinaryQuadraticModel')
if vartype is not None and vartype is not bqm.vartype:
raise ValueError("one of vartype and create_using must be provided")
bqm.info['reduction'] = {}
new_poly = {}
for term, bias in iteritems(poly):
if len(term) == 0:
bqm.add_offset(bias)
elif len(term) == 1:
v, = term
bqm.add_variable(v, bias)
else:
new_poly[term] = bias
return _reduce_degree(bqm, new_poly, vartype, strength) | def make_quadratic(poly, strength, vartype=None, bqm=None) | Create a binary quadratic model from a higher order polynomial.
Args:
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of
variables and `bias` the associated bias.
strength (float):
Strength of the reduction constraint. Insufficient strength can result in the
binary quadratic model not having the same minimizations as the polynomial.
vartype (:class:`.Vartype`, optional):
Vartype of the polynomial. If `bqm` is provided, vartype is not required.
bqm (:class:`.BinaryQuadraticModel`, optional):
The terms of the reduced polynomial are added to this binary quadratic model.
If not provided, a new binary quadratic model is created.
Returns:
:class:`.BinaryQuadraticModel`
Examples:
>>> poly = {(0,): -1, (1,): 1, (2,): 1.5, (0, 1): -1, (0, 1, 2): -2}
>>> bqm = dimod.make_quadratic(poly, 5.0, dimod.SPIN) | 2.671674 | 2.706834 | 0.987011 |
if all(len(term) <= 2 for term in poly):
# termination criteria, we are already quadratic
bqm.add_interactions_from(poly)
return bqm
# determine which pair of variables appear most often
paircounter = Counter()
for term in poly:
if len(term) > 2:
for u, v in itertools.combinations(term, 2):
pair = frozenset((u, v))
paircounter[pair] += 1
pair, __ = paircounter.most_common(1)[0]
u, v = pair
# make a new product variable and aux variable and add constraint that u*v == p
p = '{}*{}'.format(u, v)
while p in bqm.linear:
p = '_' + p
if vartype is Vartype.BINARY:
constraint = _binary_product([u, v, p])
bqm.info['reduction'][(u, v)] = {'product': p}
else:
aux = 'aux{},{}'.format(u, v)
while aux in bqm.linear:
aux = '_' + aux
constraint = _spin_product([u, v, p, aux])
bqm.info['reduction'][(u, v)] = {'product': p, 'auxiliary': aux}
constraint.scale(scale)
bqm.update(constraint)
new_poly = {}
for interaction, bias in poly.items():
if u in interaction and v in interaction:
if len(interaction) == 2:
# in this case we are reducing a quadratic bias, so it becomes linear and can
# be removed
assert len(interaction) >= 2
bqm.add_variable(p, bias)
continue
interaction = tuple(s for s in interaction if s not in pair)
interaction += (p,)
if interaction in new_poly:
new_poly[interaction] += bias
else:
new_poly[interaction] = bias
return _reduce_degree(bqm, new_poly, vartype, scale) | def _reduce_degree(bqm, poly, vartype, scale) | helper function for make_quadratic | 3.526964 | 3.466671 | 1.017392 |
msg = ("poly_energy is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energy")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energy(sample_like) | def poly_energy(sample_like, poly) | Calculates energy of a sample from a higher order polynomial.
Args:
sample (samples_like):
A raw sample. `samples_like` is an extension of NumPy's
array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias.
Returns:
float: The energy of the sample. | 6.815462 | 7.397756 | 0.921288 |
msg = ("poly_energies is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energies")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energies(samples_like) | def poly_energies(samples_like, poly) | Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s). | 6.596929 | 8.055868 | 0.818897 |
# step through idx values in adj to pick a random one, random.choice does not work on dicts
n = random_state.randint(len(adj))
for idx, v in enumerate(adj):
if idx == n:
break
start = v
walk = [start]
visited = {start: 0}
while True:
if len(walk) > 1:
# as long as we don't step back one we won't have any repeated edges
previous = walk[-2]
neighbors = [u for u in adj[walk[-1]] if u != previous]
else:
neighbors = list(adj[walk[-1]])
if not neighbors:
# we've walked into a dead end
return None
# get a random neighbor
u = random_state.choice(neighbors)
if u in visited:
# if we've seen this neighbour, then we have a cycle starting from it
return walk[visited[u]:]
else:
# add to walk and keep moving
walk.append(u)
visited[u] = len(visited) | def _random_cycle(adj, random_state) | Find a cycle using a random graph walk. | 4.299936 | 4.085408 | 1.052511 |
if spin_reversal_variables is not None:
# this kwarg does not actually make sense for multiple SRTs. To
# get the same functionality a user should apply them by hand
# to their BQM before submitting.
import warnings
warnings.warn("'spin_reversal_variables' kwarg is deprecated and no longer functions.",
DeprecationWarning)
# make a main response
responses = []
flipped_bqm = bqm.copy()
transform = {v: False for v in bqm.variables}
for ii in range(num_spin_reversal_transforms):
# flip each variable with a 50% chance
for v in bqm:
if random() > .5:
transform[v] = not transform[v]
flipped_bqm.flip_variable(v)
flipped_response = self.child.sample(flipped_bqm, **kwargs)
tf_idxs = [flipped_response.variables.index(v)
for v, flip in transform.items() if flip]
if bqm.vartype is Vartype.SPIN:
flipped_response.record.sample[:, tf_idxs] = -1 * flipped_response.record.sample[:, tf_idxs]
else:
flipped_response.record.sample[:, tf_idxs] = 1 - flipped_response.record.sample[:, tf_idxs]
responses.append(flipped_response)
return concatenate(responses) | def sample(self, bqm, num_spin_reversal_transforms=2, spin_reversal_variables=None, **kwargs) | Sample from the binary quadratic model.
Args:
bqm (:obj:`~dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_spin_reversal_transforms (integer, optional, default=2):
Number of spin reversal transform runs.
spin_reversal_variables (list/dict, optional):
Deprecated and no longer functional.
Returns:
:obj:`.SampleSet`
Examples:
This example runs 100 spin reversals applied to one variable of a QUBO problem.
>>> import dimod
...
>>> base_sampler = dimod.ExactSolver()
>>> composed_sampler = dimod.SpinReversalTransformComposite(base_sampler)
>>> Q = {('a', 'a'): -1, ('b', 'b'): -1, ('a', 'b'): 2}
>>> response = composed_sampler.sample_qubo(Q,
... num_spin_reversal_transforms=100,
... spin_reversal_variables={'a'})
>>> len(response)
400
>>> print(next(response.data())) # doctest: +SKIP
Sample(sample={'a': 0, 'b': 1}, energy=-1.0) | 3.53583 | 3.602665 | 0.981448 |
self.items_length += len(header)
if _left:
self.deque.appendleft((header, f))
else:
self.deque.append((header, f)) | def append(self, header, f, _left=False) | Add a column to the table.
Args:
header (str):
Column header
f (function(datum)->str):
Makes the row string from the datum. Str returned by f should
have the same width as header. | 3.85464 | 4.766439 | 0.808704 |
width = len(str(num_rows - 1))
def f(datum):
return str(datum.idx).ljust(width)
header = ' '*width
self.append(header, f) | def append_index(self, num_rows) | Add an index column.
Left justified, width is determined by the space needed to print the
largest index. | 8.429655 | 7.398877 | 1.139315 |
vstr = str(v).rjust(2) # the variable will be len 0, or 1
length = len(vstr)
if vartype is dimod.SPIN:
def f(datum):
return _spinstr(datum.sample[v], rjust=length)
else:
def f(datum):
return _binarystr(datum.sample[v], rjust=length)
self.append(vstr, f, _left=_left) | def append_sample(self, v, vartype, _left=False) | Add a sample column | 5.563852 | 5.624817 | 0.989161 |
if np.issubdtype(vector.dtype, np.integer):
# determine the length we need
largest = str(max(vector.max(), vector.min(), key=abs))
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return str(getattr(datum, name)).rjust(length)
elif np.issubdtype(vector.dtype, np.floating):
largest = np.format_float_positional(max(vector.max(), vector.min(), key=abs),
precision=6, trim='0')
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return np.format_float_positional(getattr(datum, name),
precision=6, trim='0',
).rjust(length)
else:
length = 7
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
r = repr(getattr(datum, name))
if len(r) > length:
r = r[:length-3] + '...'
return r.rjust(length)
self.append(header, f, _left=_left) | def append_vector(self, name, vector, _left=False) | Add a data vectors column. | 2.21829 | 2.170679 | 1.021934 |
sio = StringIO()
self.fprint(obj, stream=sio, **kwargs)
return sio.getvalue() | def format(self, obj, **kwargs) | Return the formatted representation of the object as a string. | 4.451285 | 3.827863 | 1.162864 |
if stream is None:
stream = sys.stdout
options = self.options
options.update(kwargs)
if isinstance(obj, dimod.SampleSet):
self._print_sampleset(obj, stream, **options)
return
raise TypeError("cannot format type {}".format(type(obj))) | def fprint(self, obj, stream=None, **kwargs) | Prints the formatted representation of the object on stream | 3.67695 | 3.532861 | 1.040785 |
itertup = iter(samplesets)
try:
first = next(itertup)
except StopIteration:
raise ValueError("samplesets must contain at least one SampleSet")
vartype = first.vartype
variables = first.variables
records = [first.record]
records.extend(_iter_records(itertup, vartype, variables))
# dev note: I was able to get ~2x performance boost when trying to
# implement the same functionality here by hand (I didn't know that
# this function existed then). However I think it is better to use
# numpy's function and rely on their testing etc. If however this becomes
# a performance bottleneck in the future, it might be worth changing.
record = recfunctions.stack_arrays(records, defaults=defaults,
asrecarray=True, usemask=False)
return SampleSet(record, variables, {}, vartype) | def concatenate(samplesets, defaults=None) | Combine SampleSets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
An iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8) | 7.200865 | 7.296465 | 0.986898 |
if aggregate_samples:
return cls.from_samples(samples_like, vartype, energy,
info=info, num_occurrences=num_occurrences,
aggregate_samples=False,
**vectors).aggregate()
# get the samples, variable labels
samples, variables = as_samples(samples_like)
if sort_labels and variables: # need something to sort
try:
reindex, new_variables = zip(*sorted(enumerate(variables),
key=lambda tup: tup[1]))
except TypeError:
# unlike types are not sortable in python3, so we do nothing
pass
else:
if new_variables != variables:
# avoid the copy if possible
samples = samples[:, reindex]
variables = new_variables
num_samples, num_variables = samples.shape
energy = np.asarray(energy)
# num_occurrences
if num_occurrences is None:
num_occurrences = np.ones(num_samples, dtype=int)
else:
num_occurrences = np.asarray(num_occurrences)
# now construct the record
datatypes = [('sample', samples.dtype, (num_variables,)),
('energy', energy.dtype),
('num_occurrences', num_occurrences.dtype)]
for key, vector in vectors.items():
vectors[key] = vector = np.asarray(vector)
datatypes.append((key, vector.dtype, vector.shape[1:]))
record = np.rec.array(np.zeros(num_samples, dtype=datatypes))
record['sample'] = samples
record['energy'] = energy
record['num_occurrences'] = num_occurrences
for key, vector in vectors.items():
record[key] = vector
if info is None:
info = {}
return cls(record, variables, info, vartype) | def from_samples(cls, samples_like, vartype, energy, info=None,
num_occurrences=None, aggregate_samples=False,
sort_labels=True, **vectors) | Build a :class:`SampleSet` from raw samples.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like_.
See :func:`.as_samples`.
vartype (:class:`.Vartype`/str/set):
Variable type for the :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
energy (array_like):
Vector of energies.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If true, returned :obj:`.SampleSet` will have all unique samples.
sort_labels (bool, optional, default=True):
If true, :attr:`.SampleSet.variables` will be in sorted-order.
Note that mixed types are not sortable in which case the given
order will be maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
This example creates a SampleSet out of a samples_like object (a dict).
>>> import dimod
>>> import numpy as np
...
>>> dimod.SampleSet.from_samples(dimod.as_samples({'a': 0, 'b': 1, 'c': 0}),
... 'BINARY', 0) # doctest: +SKIP
SampleSet(rec.array([([0, 1, 0], 0, 1)],
... dtype=[('sample', 'i1', (3,)), ('energy', '<i4'), ('num_occurrences', '<i4')]),
... ['a', 'b', 'c'], {}, 'BINARY')
.. _array_like: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays | 2.632097 | 2.652561 | 0.992285 |
# more performant to do this once, here rather than again in bqm.energies
# and in cls.from_samples
samples_like = as_samples(samples_like)
energies = bqm.energies(samples_like)
return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs) | def from_samples_bqm(cls, samples_like, bqm, **kwargs) | Build a SampleSet from raw samples using a BinaryQuadraticModel to get energies and vartype.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.
See :func:`.as_samples`.
bqm (:obj:`.BinaryQuadraticModel`):
A binary quadratic model. It is used to calculate the energies
and set the vartype.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If true, returned :obj:`.SampleSet` will have all unique samples.
sort_labels (bool, optional, default=True):
If true, :attr:`.SampleSet.variables` will be in sorted-order.
Note that mixed types are not sortable in which case the given
order will be maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> samples = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm) | 4.757988 | 7.889197 | 0.603102 |
obj = cls.__new__(cls)
obj._future = future
if result_hook is None:
def result_hook(future):
return future.result()
elif not callable(result_hook):
raise TypeError("expected result_hook to be callable")
obj._result_hook = result_hook
return obj | def from_future(cls, future, result_hook=None) | Construct a :class:`SampleSet` referencing the result of a future computation.
Args:
future (object):
Object that contains or will contain the information needed to construct a
:class:`SampleSet`. If `future` has a :meth:`~concurrent.futures.Future.done` method,
this determines the value returned by :meth:`.SampleSet.done`.
result_hook (callable, optional):
A function that is called to resolve the future. Must accept the future and return
a :obj:`.SampleSet`. If not provided, set to
.. code-block:: python
def result_hook(future):
return future.result()
Returns:
:obj:`.SampleSet`
Notes:
The future is resolved on the first read of any of the :class:`SampleSet` properties.
Examples:
Run a dimod sampler on a single thread and load the returned future into :class:`SampleSet`.
>>> import dimod
>>> from concurrent.futures import ThreadPoolExecutor
...
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> with ThreadPoolExecutor(max_workers=1) as executor:
... future = executor.submit(dimod.ExactSolver().sample, bqm)
... sampleset = dimod.SampleSet.from_future(future)
>>> sampleset.record
rec.array([([-1, -1], -1., 1), ([ 1, -1], 1., 1), ([ 1, 1], -1., 1),
([-1, 1], 1., 1)],
dtype=[('sample', 'i1', (2,)), ('energy', '<f8'), ('num_occurrences', '<i8')]) | 2.242282 | 3.638209 | 0.616315 |
return {field: self.record[field] for field in self.record.dtype.names
if field != 'sample'} | def data_vectors(self) | The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1]) | 7.874732 | 6.88102 | 1.144414 |
try:
return next(self.data(sorted_by='energy', name='Sample'))
except StopIteration:
raise ValueError('{} is empty'.format(self.__class__.__name__)) | def first(self) | Sample with the lowest-energy.
Raises:
ValueError: If empty.
Example:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})
>>> sampleset.first
Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1) | 8.964645 | 7.071824 | 1.267657 |
return (not hasattr(self, '_future')) or (not hasattr(self._future, 'done')) or self._future.done() | def done(self) | Return True if a pending computation is done.
Used when a :class:`SampleSet` is constructed with :meth:`SampleSet.from_future`.
Examples:
This example uses a :class:`~concurrent.futures.Future` object directly. Typically
a :class:`~concurrent.futures.Executor` sets the result of the future
(see documentation for :mod:`concurrent.futures`).
>>> import dimod
>>> from concurrent.futures import Future
...
>>> future = Future()
>>> sampleset = dimod.SampleSet.from_future(future)
>>> future.done()
False
>>> future.set_result(dimod.ExactSolver().sample_ising({0: -1}, {}))
>>> future.done()
True
>>> sampleset.record.sample
array([[-1],
[ 1]], dtype=int8) | 5.670046 | 5.792067 | 0.978933 |
if n is not None:
return self.samples(sorted_by=sorted_by)[:n]
if sorted_by is None:
samples = self.record.sample
else:
order = np.argsort(self.record[sorted_by])
samples = self.record.sample[order]
return SamplesArray(samples, self.variables) | def samples(self, n=None, sorted_by='energy') | Return an iterable over the samples.
Args:
n (int, optional, default=None):
Maximum number of samples to return in the view.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None,
samples are returned in record order.
Returns:
:obj:`.SamplesArray`: A view object mapping variable labels to
values.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> for sample in sampleset.samples():
... print(sample)
{'a': -1, 'b': 1}
{'a': 1, 'b': -1}
{'a': -1, 'b': -1}
{'a': 1, 'b': 1}
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> samples = sampleset.samples()
>>> samples[0]
{'a': -1, 'b': 1}
>>> samples[0, 'a']
-1
>>> samples[0, ['b', 'a']]
array([ 1, -1], dtype=int8)
>>> samples[1:, ['a', 'b']]
array([[ 1, -1],
[-1, -1],
[ 1, 1]], dtype=int8) | 3.173551 | 3.121147 | 1.01679 |
record = self.record
if fields is None:
# make sure that sample, energy is first
fields = self._REQUIRED_FIELDS + [field for field in record.dtype.fields
if field not in self._REQUIRED_FIELDS]
if index:
fields.append('idx')
if sorted_by is None:
order = np.arange(len(self))
elif index:
# we want a stable sort but it can be slower
order = np.argsort(record[sorted_by], kind='stable')
else:
order = np.argsort(record[sorted_by])
if reverse:
order = np.flip(order)
if name is None:
# yielding a tuple
def _pack(values):
return tuple(values)
else:
# yielding a named tuple
SampleTuple = namedtuple(name, fields)
def _pack(values):
return SampleTuple(*values)
def _values(idx):
for field in fields:
if field == 'sample':
sample = SampleView(record.sample[idx, :], self.variables)
if sample_dict_cast:
sample = dict(sample)
yield sample
elif field == 'idx':
yield idx
else:
yield record[field][idx]
for idx in order:
yield _pack(_values(idx)) | def data(self, fields=None, sorted_by='energy', name='Sample', reverse=False,
sample_dict_cast=True, index=False) | Iterate over the data in the :class:`SampleSet`.
Args:
fields (list, optional, default=None):
If specified, only these fields are included in the yielded tuples.
The special field name 'sample' can be used to view the samples.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None, the samples are yielded
in record order.
name (str/None, optional, default='Sample'):
Name of the yielded namedtuples or None to yield regular tuples.
reverse (bool, optional, default=False):
If True, yield in reverse order.
sample_dict_cast (bool, optional, default=False):
If True, samples are returned as dicts rather than
:class:`.SampleView`. Note that this can lead to very heavy memory
usage.
index (bool, optional, default=False):
If True, `datum.idx` gives the corresponding index of the
:attr:`.SampleSet.record`.
Yields:
namedtuple/tuple: The data in the :class:`SampleSet`, in the order specified by the input
`fields`.
Examples:
>>> import dimod
...
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> for datum in sampleset.data(fields=['sample', 'energy']): # doctest: +SKIP
... print(datum)
Sample(sample={'a': -1, 'b': -1}, energy=-1.5)
Sample(sample={'a': 1, 'b': -1}, energy=-0.5)
Sample(sample={'a': 1, 'b': 1}, energy=-0.5)
Sample(sample={'a': -1, 'b': 1}, energy=2.5)
>>> for energy, in sampleset.data(fields=['energy'], sorted_by='energy'):
... print(energy)
...
-1.5
-0.5
-0.5
2.5
>>> print(next(sampleset.data(fields=['energy'], name='ExactSolverSample')))
ExactSolverSample(energy=-1.5) | 3.119273 | 3.075504 | 1.014232 |
return self.__class__(self.record.copy(),
self.variables, # a new one is made in all cases
self.info.copy(),
self.vartype) | def copy(self) | Create a shallow copy. | 13.650075 | 11.927248 | 1.144445 |
if not inplace:
return self.copy().change_vartype(vartype, energy_offset, inplace=True)
if energy_offset:
self.record.energy = self.record.energy + energy_offset
if vartype is self.vartype:
return self # we're done!
if vartype is Vartype.SPIN and self.vartype is Vartype.BINARY:
self.record.sample = 2 * self.record.sample - 1
self._vartype = vartype
elif vartype is Vartype.BINARY and self.vartype is Vartype.SPIN:
self.record.sample = (self.record.sample + 1) // 2
self._vartype = vartype
else:
raise ValueError("Cannot convert from {} to {}".format(self.vartype, vartype))
return self | def change_vartype(self, vartype, energy_offset=0.0, inplace=True) | Return the :class:`SampleSet` with the given vartype.
Args:
vartype (:class:`.Vartype`/str/set):
Variable type to use for the new :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
energy_offset (number, optional, defaul=0.0):
Constant value applied to the 'energy' field of :attr:`SampleSet.record`.
inplace (bool, optional, default=True):
If True, the instantiated :class:`SampleSet` is updated; otherwise, a new
:class:`SampleSet` is returned.
Returns:
:obj:`.SampleSet`: SampleSet with changed vartype. If `inplace` is True, returns itself.
Examples:
This example creates a binary copy of a spin-valued :class:`SampleSet`.
>>> import dimod
...
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> sampleset_binary = sampleset.change_vartype(dimod.BINARY, energy_offset=1.0, inplace=False)
>>> sampleset_binary.vartype is dimod.BINARY
True
>>> for datum in sampleset_binary.data(fields=['sample', 'energy', 'num_occurrences']): # doctest: +SKIP
... print(datum)
Sample(sample={'a': 0, 'b': 0}, energy=-0.5, num_occurrences=1)
Sample(sample={'a': 1, 'b': 0}, energy=0.5, num_occurrences=1)
Sample(sample={'a': 1, 'b': 1}, energy=0.5, num_occurrences=1)
Sample(sample={'a': 0, 'b': 1}, energy=3.5, num_occurrences=1) | 2.061487 | 2.104606 | 0.979512 |
if not inplace:
return self.copy().relabel_variables(mapping, inplace=True)
self.variables.relabel(mapping)
return self | def relabel_variables(self, mapping, inplace=True) | Relabel the variables of a :class:`SampleSet` according to the specified mapping.
Args:
mapping (dict):
Mapping from current variable labels to new, as a dict. If incomplete mapping is
specified, unmapped variables keep their current labels.
inplace (bool, optional, default=True):
If True, the current :class:`SampleSet` is updated; otherwise, a new
:class:`SampleSet` is returned.
Returns:
:class:`.SampleSet`: SampleSet with relabeled variables. If `inplace` is True, returns
itself.
Examples:
This example creates a relabeled copy of a :class:`SampleSet`.
>>> import dimod
...
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> new_sampleset = sampleset.relabel_variables({'a': 0, 'b': 1}, inplace=False)
>>> sampleset.variable_labels # doctest: +SKIP
[0, 1] | 2.92856 | 9.964387 | 0.293903 |
_, indices, inverse = np.unique(self.record.sample, axis=0,
return_index=True, return_inverse=True)
# unique also sorts the array which we don't want, so we undo the sort
order = np.argsort(indices)
indices = indices[order]
record = self.record[indices]
# fix the number of occurrences
record.num_occurrences = 0
for old_idx, new_idx in enumerate(inverse):
new_idx = order[new_idx]
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
# dev note: we don't check the energies as they should be the same
# for individual samples
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | def aggregate(self) | Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are. | 5.179663 | 4.598803 | 1.126307 |
samples, labels = as_samples(samples_like)
num_samples = len(self)
# we don't handle multiple values
if samples.shape[0] == num_samples:
# we don't need to do anything, it's already the correct shape
pass
elif samples.shape[0] == 1 and num_samples:
samples = np.repeat(samples, num_samples, axis=0)
else:
msg = ("mismatched shape. The samples to append should either be "
"a single sample or should match the length of the sample "
"set. Empty sample sets cannot be appended to.")
raise ValueError(msg)
# append requires the new variables to be unique
variables = self.variables
if any(v in variables for v in labels):
msg = "Appended samples cannot contain variables in sample set"
raise ValueError(msg)
new_variables = list(variables) + labels
new_samples = np.hstack((self.record.sample, samples))
return type(self).from_samples((new_samples, new_variables),
self.vartype,
info=copy.deepcopy(self.info), # make a copy
sort_labels=sort_labels,
**self.data_vectors) | def append_variables(self, samples_like, sort_labels=True) | Create a new sampleset with the given variables with values added.
Not defined for empty sample sets. Note that when `sample_like` is
a :obj:`.SampleSet`, the data vectors and info are ignored.
Args:
samples_like:
Samples to add to the sample set. Should either be a single
sample or should match the length of the sample set. See
:func:`.as_samples` for what is allowed to be `samples_like`.
sort_labels (bool, optional, default=True):
If true, returned :attr:`.SampleSet.variables` will be in
sorted-order. Note that mixed types are not sortable in which
case the given order will be maintained.
Returns:
:obj:`.SampleSet`: A new sample set with the variables/values added.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1},
... {'a': +1, 'b': +1}],
... dimod.SPIN,
... energy=[-1.0, 1.0])
>>> new = sampleset.append_variables({'c': -1})
>>> print(new)
a b c energy num_oc.
0 -1 +1 -1 -1.0 1
1 +1 +1 -1 1.0 1
['SPIN', 2 rows, 2 samples, 3 variables]
Add variables from another sampleset to the original above. Note
that the energies do not change.
>>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1},
... {'c': +1, 'd': +1}],
... dimod.SPIN,
... energy=[-2.0, 1.0])
>>> new = sampleset.append_variables(another)
>>> print(new)
a b c d energy num_oc.
0 -1 +1 -1 +1 -1.0 1
1 +1 +1 +1 +1 1.0 1
['SPIN', 2 rows, 2 samples, 4 variables] | 4.405931 | 3.998599 | 1.101869 |
if len(self) == 0:
# empty so all are lowest
return self.copy()
record = self.record
# want all the rows within tolerance of the minimal energy
close = np.isclose(record.energy,
np.min(record.energy),
rtol=rtol, atol=atol)
record = record[close]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | def lowest(self, rtol=1.e-5, atol=1.e-8) | Return a sample set containing the lowest-energy samples.
A sample is included if its energy is within tolerance of the lowest
energy in the sample set. The following equation is used to determine
if two values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
See :func:`numpy.isclose` for additional details and caveats.
Args:
rtol (float, optional, default=1.e-5):
The relative tolerance (see above).
atol (float, optional, default=1.e-8):
The absolute tolerance (see above).
Returns:
:obj:`.SampleSet`: A new sample set containing the lowest energy
samples as delimited by configured tolerances from the lowest energy
sample in the current sample set.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': .001},
... {('a', 'b'): -1})
>>> print(sampleset.lowest())
a b energy num_oc.
0 -1 -1 -1.001 1
['SPIN', 1 rows, 1 samples, 2 variables]
>>> print(sampleset.lowest(atol=.1))
a b energy num_oc.
0 -1 -1 -1.001 1
1 +1 +1 -0.999 1
['SPIN', 2 rows, 2 samples, 2 variables]
Note:
"Lowest energy" is the lowest energy in the sample set. This is not
always the "ground energy" which is the lowest energy possible
for a binary quadratic model. | 5.807034 | 4.922888 | 1.179599 |
schema_version = "2.0.0"
record = {name: array2bytes(vector)
for name, vector in self.data_vectors.items()}
record['sample'] = array2bytes(np.packbits(self.record.sample > 0))
if not use_bytes:
for name in record:
record[name] = base64.b64encode(record[name]).decode("UTF-8")
return {"basetype": "SampleSet",
"type": type(self).__name__,
"record": record,
"sample_dtype": str(self.record.sample.dtype), # need this to unpack
"sample_shape": self.record.sample.shape, # need this to unpack
"variable_type": self.vartype.name,
"info": self.info,
"version": {"dimod": __version__,
"sampleset_schema": schema_version},
"variable_labels": list(self.variables),
"use_bytes": bool(use_bytes)} | def to_serializable(self, use_bytes=False, bytes_type=bytes) | Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable` | 4.341419 | 4.145173 | 1.047343 |
schema_version = "2.0.0"
if obj["version"]['sampleset_schema'] == "1.0.0":
import warnings
msg = ("sampleset is serialized with a deprecated format and will no longer "
"work in dimod 0.9.0.")
warnings.warn(msg)
from dimod.serialization.json import sampleset_decode_hook
return sampleset_decode_hook(obj, cls=cls)
elif obj["version"]['sampleset_schema'] != schema_version:
raise ValueError("cannot load legacy serialization formats")
vartype = Vartype[obj['variable_type']]
if obj['use_bytes']:
record = obj['record']
else:
record = {name: base64.b64decode(vector) for name, vector in obj['record'].items()}
vectors = {name: bytes2array(vector) for name, vector in record.items()}
# get the samples and unpack then
shape = obj['sample_shape']
dtype = obj['sample_dtype']
sample = np.unpackbits(vectors.pop('sample'))[:shape[0]*shape[1]].astype(dtype).reshape(shape)
# convert to the correct dtype
if vartype is Vartype.SPIN:
sample = np.asarray(2*sample-1, dtype=dtype)
variables = [tuple(v) if isinstance(v, list) else v
for v in obj["variable_labels"]]
info = obj['info']
return cls.from_samples((sample, variables), vartype, info=info,
**vectors) | def from_serializable(cls, obj) | Deserialize a :class:`SampleSet`.
Args:
obj (dict):
A :class:`SampleSet` serialized by :meth:`~.SampleSet.to_serializable`.
Returns:
:obj:`.SampleSet`
Examples:
This example encodes and decodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
>>> new_samples = dimod.SampleSet.from_serializable(json.loads(s))
See also:
:meth:`~.SampleSet.to_serializable` | 4.221476 | 4.331661 | 0.974563 |
import pandas as pd
if sample_column:
df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True))
else:
# work directly with the record, it's much faster
df = pd.DataFrame(self.record.sample, columns=self.variables)
for field in sorted(self.record.dtype.fields): # sort for consistency
if field == 'sample':
continue
df.loc[:, field] = self.record[field]
return df | def to_pandas_dataframe(self, sample_column=False) | Convert a SampleSet to a Pandas DataFrame
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1 | 6.493738 | 6.159123 | 1.054328 |
record = response.record
label_dict = response.variables.index
if len(bqm.info['reduction']) == 0:
return np.array([1] * len(record.sample))
penalty_vector = np.prod([record.sample[:, label_dict[qi]] *
record.sample[:, label_dict[qj]]
== record.sample[:,
label_dict[valdict['product']]]
for (qi, qj), valdict in
bqm.info['reduction'].items()], axis=0)
return penalty_vector | def penalty_satisfaction(response, bqm) | Creates a penalty satisfaction list
Given a sampleSet and a bqm object, will create a binary list informing
whether the penalties introduced during degree reduction are satisfied for
each sample in sampleSet
Args:
response (:obj:`.SampleSet`): Samples corresponding to provided bqm
bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains
its reduction info.
Returns:
:obj:`numpy.ndarray`: a binary array of penalty satisfaction information | 5.501566 | 5.2005 | 1.057892 |
record = response.record
penalty_vector = penalty_satisfaction(response, bqm)
original_variables = bqm.variables
if discard_unsatisfied:
samples_to_keep = list(map(bool, list(penalty_vector)))
penalty_vector = np.array([True] * np.sum(samples_to_keep))
else:
samples_to_keep = list(map(bool, [1] * len(record.sample)))
samples = record.sample[samples_to_keep]
energy_vector = poly.energies((samples, response.variables))
if not keep_penalty_variables:
original_variables = poly.variables
idxs = [response.variables.index[v] for v in original_variables]
samples = np.asarray(samples[:, idxs])
num_samples, num_variables = np.shape(samples)
datatypes = [('sample', np.dtype(np.int8), (num_variables,)),
('energy', energy_vector.dtype),
('penalty_satisfaction',
penalty_vector.dtype)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names if
name not in {'sample',
'energy'})
data = np.rec.array(np.empty(num_samples, dtype=datatypes))
data.sample = samples
data.energy = energy_vector
for name in record.dtype.names:
if name not in {'sample', 'energy'}:
data[name] = record[name][samples_to_keep]
data['penalty_satisfaction'] = penalty_vector
response.info['reduction'] = bqm.info['reduction']
if penalty_strength is not None:
response.info['penalty_strength'] = penalty_strength
return SampleSet(data, original_variables, response.info,
response.vartype) | def polymorph_response(response, poly, bqm,
penalty_strength=None,
keep_penalty_variables=True,
discard_unsatisfied=False) | Transforms the sampleset for the higher order problem.
Given a response of a penalized HUBO, this function creates a new sampleset
object, taking into account penalty information and calculates the
energies of samples for the higherorder problem.
Args:
response (:obj:`.SampleSet`): response for a penalized hubo.
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the
reduced problem.
penalty_strength (float, optional): default is None, if provided,
will be added to the info field of the returned sampleSet object.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
Returns:
(:obj:`.SampleSet'): A sampleSet object that has additional penalty
information. The energies of samples are calculated for the HUBO
ignoring the penalty variables. | 2.929311 | 2.89768 | 1.010916 |
bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype)
response = self.child.sample(bqm, **parameters)
return polymorph_response(response, poly, bqm,
penalty_strength=penalty_strength,
keep_penalty_variables=keep_penalty_variables,
discard_unsatisfied=discard_unsatisfied) | def sample_poly(self, poly, penalty_strength=1.0,
keep_penalty_variables=False,
discard_unsatisfied=False, **parameters) | Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet` | 3.095687 | 4.10334 | 0.754431 |
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {frozenset(term) for term in ignored_terms}
# scale and normalize happen in-place so we need to make a copy
original, poly = poly, poly.copy()
if scalar is not None:
poly.scale(scalar, ignored_terms=ignored_terms)
else:
poly.normalize(bias_range=bias_range, poly_range=poly_range,
ignored_terms=ignored_terms)
# we need to know how much we scaled by, which we can do by looking
# at the biases
try:
v = next(v for v, bias in original.items()
if bias and v not in ignored_terms)
except StopIteration:
# nothing to scale
scalar = 1
else:
scalar = poly[v] / original[v]
sampleset = self.child.sample_poly(poly, **parameters)
if ignored_terms:
# we need to recalculate the energy
sampleset.record.energy = original.energies((sampleset.record.sample,
sampleset.variables))
else:
sampleset.record.energy /= scalar
return sampleset | def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None,
ignored_terms=None, **parameters) | Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler. | 3.665095 | 3.452867 | 1.061464 |
tkw = self._truncate_kwargs
if self._aggregate:
return self.child.sample_poly(poly, **kwargs).aggregate().truncate(**tkw)
else:
return self.child.sample_poly(poly, **kwargs).truncate(**tkw) | def sample_poly(self, poly, **kwargs) | Sample from the binary polynomial and truncate output.
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet` | 5.012397 | 5.5065 | 0.910269 |
itersamples = iter(samples_dicts)
first_sample = next(itersamples)
if labels is None:
labels = list(first_sample)
num_variables = len(labels)
def _iter_samples():
yield np.fromiter((first_sample[v] for v in labels),
count=num_variables, dtype=np.int8)
try:
for sample in itersamples:
yield np.fromiter((sample[v] for v in labels),
count=num_variables, dtype=np.int8)
except KeyError:
msg = ("Each dict in 'samples' must have the same keys.")
raise ValueError(msg)
return np.stack(list(_iter_samples())), labels | def _samples_dicts_to_array(samples_dicts, labels) | Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None. | 2.670012 | 2.507655 | 1.064744 |
if not len(sample):
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0)
num_samples, num_variables = sample.shape
if 'num_occurrences' not in vectors:
vectors['num_occurrences'] = [1] * num_samples
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for kwarg, vector in vectors.items():
dtype = float if kwarg == 'energy' else None
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if len(vector.shape) < 1 or vector.shape[0] != num_samples:
msg = ('{} and sample have a mismatched shape {}, {}. They must have the same size '
'in the first axis.').format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for kwarg, vector in datavectors.items():
data[kwarg] = vector
return data | def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors) | Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs] | 2.977496 | 2.913404 | 1.021999 |
# there is no np.is_array_like so we use a try-except block
try:
# trying to cast it to int8 rules out list of dictionaries. If we didn't try to cast
# then it would just create a vector of np.object
samples = np.asarray(samples_like, dtype=np.int8)
except TypeError:
# if labels are None, they are set here
samples, variable_labels = _samples_dicts_to_array(samples_like, variable_labels)
assert samples.dtype == np.int8, 'sanity check'
record = data_struct_array(samples, **vectors)
# if labels are still None, set them here. We could do this in an else in the try-except
# block, but the samples-array might not have the correct shape
if variable_labels is None:
__, num_variables = record.sample.shape
variable_labels = list(range(num_variables))
return cls(record, variable_labels, info, vartype) | def from_samples(cls, samples_like, vectors, info, vartype, variable_labels=None) | Build a response from samples.
Args:
samples_like:
A collection of samples. 'samples_like' is an extension of NumPy's array_like
to include an iterable of sample dictionaries (as returned by
:meth:`.Response.samples`).
data_vectors (dict[field, :obj:`numpy.array`/list]):
Additional per-sample data as a dict of vectors. Each vector is the
same length as `samples_matrix`. The key 'energy' and it's vector is required.
info (dict):
Information about the response as a whole formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the response. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_labels (list, optional):
Determines the variable labels if samples_like is not an iterable of dictionaries.
If samples_like is not an iterable of dictionaries and if variable_labels is not
provided then index labels are used.
Returns:
:obj:`.Response`
Examples:
From dicts
>>> import dimod
...
>>> samples = [{'a': -1, 'b': +1}, {'a': -1, 'b': -1}]
>>> response = dimod.Response.from_samples(samples, {'energy': [-1, 0]}, {}, dimod.SPIN)
From an array
>>> import dimod
>>> import numpy as np
...
>>> samples = np.ones((2, 3), dtype='int8') # 2 samples, 3 variables
>>> response = dimod.Response.from_samples(samples, {'energy': [-1.0, -1.0]}, {},
... dimod.SPIN, variable_labels=['a', 'b', 'c']) | 6.841155 | 7.531393 | 0.908352 |
'''
.. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present
'''
re_template = r"\s*{config}\s*=.*".format(config=config)
found = re.search(re_template, configs.exhaleDoxygenStdin)
if required:
return found is not None
else:
return found is None | def _valid_config(config, required) | .. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present | 10.233698 | 3.802764 | 2.691121 |
'''
This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output.
'''
# Quick sanity check to make sure the bare minimum have been set in the configs
err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly."
if configs.containmentFolder is None:
raise RuntimeError(err_msg.format(config="containmentFolder"))
if configs.rootFileName is None:
raise RuntimeError(err_msg.format(config="rootFileName"))
if configs.rootFileTitle is None:
raise RuntimeError(err_msg.format(config="rootFileTitle"))
if configs.doxygenStripFromPath is None:
raise RuntimeError(err_msg.format(config="doxygenStripFromPath"))
# From here on, we assume that everything else has been checked / configured.
try:
textRoot = ExhaleRoot()
except:
utils.fancyError("Unable to create an `ExhaleRoot` object:")
try:
sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML.")))
start = utils.get_time()
textRoot.parse()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while parsing:")
try:
sys.stdout.write("{0}\n".format(
utils.info("Exhale: generating reStructuredText documents.")
))
start = utils.get_time()
textRoot.generateFullAPI()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: generated reStructuredText documents in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while generating:")
# << verboseBuild
# toConsole only prints if verbose mode is enabled
textRoot.toConsole()
# allow access to the result after-the-fact
configs._the_app.exhale_root = textRoot | def explode() | This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output. | 3.99386 | 2.263959 | 1.764105 |
if self.kind == "function":
# TODO: breathe bug with templates and overloads, don't know what to do...
return "{name}({parameters})".format(
name=self.name,
parameters=", ".join(self.parameters)
)
return self.name | def breathe_identifier(self) | The unique identifier for breathe directives.
.. note::
This method is currently assumed to only be called for nodes that are
in :data:`exhale.utils.LEAF_LIKE_KINDS` (see also
:func:`exhale.graph.ExhaleRoot.generateSingleNodeRST` where it is used).
**Return**
:class:`python:str`
Usually, this will just be ``self.name``. However, for functions in
particular the signature must be included to distinguish overloads. | 6.325151 | 5.751553 | 1.099729 |
if self.kind == "function":
return "{template}{return_type} {name}({parameters})".format(
template="template <{0}> ".format(", ".join(self.template)) if self.template else "",
return_type=self.return_type,
name=self.name,
parameters=", ".join(self.parameters)
)
raise RuntimeError(
"full_signature may only be called for a 'function', but {name} is a '{kind}' node.".format(
name=self.name, kind=self.kind
)
) | def full_signature(self) | The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != "function"``. | 2.86436 | 2.404504 | 1.191248 |
'''
.. todo::
document this, create another method for creating this without the need for
generating links, to be used in making the node titles and labels
'''
if not self.template_params:
return None
else:
param_stream = StringIO()
for param_t, decl_n, def_n in self.template_params:
refid, typeid = param_t
# Say you wanted a custom link text 'custom', and somewhere
# else you had an internal link '.. _some_link:'. Then you do
# `custom <some_link_>`_
# LOL. RST is confusing
if refid:
# Easy case: the refid is something Exhale is explicitly documenting
if refid in nodeByRefid:
link = "{0}_".format(nodeByRefid[refid].link_name)
else:
# It's going to get generated by Breathe down the line, we need
# to reference the page the directive will appear on.
parent_refid = ""
for key in nodeByRefid:
if len(key) > len(parent_refid) and key in refid:
parent_refid = key
parent = nodeByRefid[parent_refid]
parent_page = os.path.basename(parent.file_name.replace(".rst", ".html"))
link = "{page}#{refid}".format(page=parent_page, refid=refid)
param_stream.write(
"#. `{typeid} <{link}>`_".format(
typeid=typeid,
# Not necessarily an ExhaleNode link, should be a link by
# the time Breathe is finished?
link=link
)
)
close_please = False
else:
param_stream.write("#. ``{typeid}".format(typeid=typeid))
close_please = True
# The type is in there, but when parsed it may have given something like
# `class X` for the typeid (meaning nothing else to write). For others,
# the decl_n is the declared name of the template parameter. E.g. it
# was parsed as `typeid <- class` and `decl_n <- X`.
if decl_n:
param_stream.write(" ")
if not close_please:
param_stream.write("``")
param_stream.write("{decl_n}".format(decl_n=decl_n))
close_please = True
# When templates provide a default value, `def_n` is it. When parsed,
# if the `decl_n` and `def_n` are the same, `def_n` is explicitly set
# to be None.
if def_n:
param_stream.write(" ")
if not close_please:
param_stream.write("``")
param_stream.write("= {def_n}``".format(def_n=def_n))
close_please = True
if close_please:
param_stream.write("``")
param_stream.write("\n")
param_stream.write("\n")
param_value = param_stream.getvalue()
param_stream.close()
return param_value | def templateParametersStringAsRestList(self, nodeByRefid) | .. todo::
document this, create another method for creating this without the need for
generating links, to be used in making the node titles and labels | 5.566667 | 4.806118 | 1.158246 |
'''
.. todo:: long time from now: intersphinx should be possible here
'''
# lst should either be self.base_compounds or self.derived_compounds
if not lst:
return None
bod_stream = StringIO()
for prot, refid, string in lst:
bod_stream.write("- ")
# Include the prototype
if prot:
bod_stream.write("``{0}".format(prot))
please_close = True
else:
please_close = False
# Create the link, if possible
# TODO: how to do intersphinx links here?
if refid:
# TODO: why are these links not working????????????????????????????????
###########flake8breaks :/ :/ :/ :/ :/ :/ :/ :/ :/ :/ :/ :/ :/ :/ :/ :/
# if please_close:
# bod_stream.write("`` ") # close prototype
# bod_stream.write("`{name} <{link}_>`_".format(
# # name=string.replace("<", ">").replace(">", "<"),
# name=string.replace("<", "").replace(">", ""),
# link=nodeByRefid[refid].link_name
# ))
if not please_close:
bod_stream.write("``")
else:
bod_stream.write(" ")
bod_stream.write("{string}`` (:ref:`{link}`)".format(
string=string,
link=nodeByRefid[refid].link_name
))
else:
if not please_close:
bod_stream.write("``")
else:
bod_stream.write(" ")
bod_stream.write("{0}``".format(string))
bod_stream.write("\n")
bod_value = bod_stream.getvalue()
bod_stream.close()
return bod_value | def baseOrDerivedListString(self, lst, nodeByRefid) | .. todo:: long time from now: intersphinx should be possible here | 3.991021 | 3.497262 | 1.141184 |
'''
Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to.
'''
if self.kind == "namespace":
lst.append(self)
for c in self.children:
c.findNestedNamespaces(lst) | def findNestedNamespaces(self, lst) | Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to. | 5.444863 | 1.654322 | 3.291297 |
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst) | def findNestedDirectories(self, lst) | Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to. | 5.677312 | 1.656278 | 3.427752 |
'''
Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to.
'''
if self.kind == "class" or self.kind == "struct":
lst.append(self)
for c in self.children:
c.findNestedClassLike(lst) | def findNestedClassLike(self, lst) | Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to. | 4.943155 | 1.524103 | 3.243321 |
'''
Recursive helper function for finding nested enums. If this node is a class or
struct it may have had an enum added to its child list. When this occurred, the
enum was removed from ``self.enums`` in the :class:`~exhale.graph.ExhaleRoot`
class and needs to be rediscovered by calling this method on all of its
children. If this node is an enum, it is because a parent class or struct
called this method, in which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to
:func:`~exhale.graph.ExhaleRoot.generateNodeDocuments`.
:Parameters:
``lst`` (list)
The list each enum is to be appended to.
'''
if self.kind == "enum":
lst.append(self)
for c in self.children:
c.findNestedEnums(lst) | def findNestedEnums(self, lst) | Recursive helper function for finding nested enums. If this node is a class or
struct it may have had an enum added to its child list. When this occurred, the
enum was removed from ``self.enums`` in the :class:`~exhale.graph.ExhaleRoot`
class and needs to be rediscovered by calling this method on all of its
children. If this node is an enum, it is because a parent class or struct
called this method, in which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to
:func:`~exhale.graph.ExhaleRoot.generateNodeDocuments`.
:Parameters:
``lst`` (list)
The list each enum is to be appended to. | 8.826309 | 1.317811 | 6.697707 |
'''
Recursive helper function for finding nested unions. If this node is a class or
struct it may have had a union added to its child list. When this occurred, the
union was removed from ``self.unions`` in the :class:`~exhale.graph.ExhaleRoot`
class and needs to be rediscovered by calling this method on all of its
children. If this node is a union, it is because a parent class or struct
called this method, in which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to
:func:`~exhale.graph.ExhaleRoot.generateNodeDocuments`.
:Parameters:
``lst`` (list)
The list each union is to be appended to.
'''
if self.kind == "union":
lst.append(self)
for c in self.children:
c.findNestedUnions(lst) | def findNestedUnions(self, lst) | Recursive helper function for finding nested unions. If this node is a class or
struct it may have had a union added to its child list. When this occurred, the
union was removed from ``self.unions`` in the :class:`~exhale.graph.ExhaleRoot`
class and needs to be rediscovered by calling this method on all of its
children. If this node is a union, it is because a parent class or struct
called this method, in which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to
:func:`~exhale.graph.ExhaleRoot.generateNodeDocuments`.
:Parameters:
``lst`` (list)
The list each union is to be appended to. | 8.975955 | 1.306997 | 6.867615 |
'''
Debugging tool for printing hierarchies / ownership to the console. Recursively
calls children ``toConsole`` if this node is not a directory or a file, and
``printChildren == True``.
.. todo:: fmt_spec docs needed. keys are ``kind`` and values are color spec
:Parameters:
``level`` (int)
The indentation level to be used, should be greater than or equal to 0.
``printChildren`` (bool)
Whether or not the ``toConsole`` method for the children found in
``self.children`` should be called with ``level+1``. Default is True,
set to False for directories and files.
'''
indent = " " * level
utils.verbose_log("{indent}- [{kind}]: {name}".format(
indent=indent,
kind=utils._use_color(self.kind, fmt_spec[self.kind], sys.stderr),
name=self.name
))
# files are children of directories, the file section will print those children
if self.kind == "dir":
for c in self.children:
c.toConsole(level + 1, fmt_spec, printChildren=False)
elif printChildren:
if self.kind == "file":
next_indent = " " * (level + 1)
utils.verbose_log("{next_indent}[[[ location=\"{loc}\" ]]]".format(
next_indent=next_indent,
loc=self.location
))
for incl in self.includes:
utils.verbose_log("{next_indent}- #include <{incl}>".format(
next_indent=next_indent,
incl=incl
))
for ref, name in self.included_by:
utils.verbose_log("{next_indent}- included by: [{name}]".format(
next_indent=next_indent,
name=name
))
for n in self.namespaces_used:
n.toConsole(level + 1, fmt_spec, printChildren=False)
for c in self.children:
c.toConsole(level + 1, fmt_spec)
elif self.kind == "class" or self.kind == "struct":
relevant_children = []
for c in self.children:
if c.kind == "class" or c.kind == "struct" or \
c.kind == "enum" or c.kind == "union":
relevant_children.append(c)
for rc in sorted(relevant_children):
rc.toConsole(level + 1, fmt_spec)
elif self.kind != "union":
for c in self.children:
c.toConsole(level + 1, fmt_spec) | def toConsole(self, level, fmt_spec, printChildren=True) | Debugging tool for printing hierarchies / ownership to the console. Recursively
calls children ``toConsole`` if this node is not a directory or a file, and
``printChildren == True``.
.. todo:: fmt_spec docs needed. keys are ``kind`` and values are color spec
:Parameters:
``level`` (int)
The indentation level to be used, should be greater than or equal to 0.
``printChildren`` (bool)
Whether or not the ``toConsole`` method for the children found in
``self.children`` should be called with ``level+1``. Default is True,
set to False for directories and files. | 3.51556 | 2.130556 | 1.650067 |
'''
Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`~exhale.graph.ExhaleRoot.deepSortList` for more information on
when this is necessary.
'''
self.children.sort()
for c in self.children:
c.typeSort() | def typeSort(self) | Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`~exhale.graph.ExhaleRoot.deepSortList` for more information on
when this is necessary. | 7.761649 | 1.746968 | 4.442926 |
'''
Whether or not this node should be included in the class view hierarchy. Helper
method for :func:`~exhale.graph.ExhaleNode.toHierarchy`. Sets the member
variable ``self.in_class_hierarchy`` to True if appropriate.
:Return (bool):
True if this node should be included in the class view --- either it is a
node of kind ``struct``, ``class``, ``enum``, ``union``, or it is a
``namespace`` that one or more if its descendants was one of the previous
four kinds. Returns False otherwise.
'''
if self.kind == "namespace":
for c in self.children:
if c.inClassHierarchy():
return True
return False
else:
# flag that this node is already in the class view so we can find the
# missing top level nodes at the end
self.in_class_hierarchy = True
# Skip children whose names were requested to be explicitly ignored.
for exclude in configs._compiled_listing_exclude:
if exclude.match(self.name):
return False
return self.kind in {"struct", "class", "enum", "union"} | def inClassHierarchy(self) | Whether or not this node should be included in the class view hierarchy. Helper
method for :func:`~exhale.graph.ExhaleNode.toHierarchy`. Sets the member
variable ``self.in_class_hierarchy`` to True if appropriate.
:Return (bool):
True if this node should be included in the class view --- either it is a
node of kind ``struct``, ``class``, ``enum``, ``union``, or it is a
``namespace`` that one or more if its descendants was one of the previous
four kinds. Returns False otherwise. | 7.11984 | 2.619694 | 2.717814 |
'''
Whether or not this node should be included in the file view hierarchy. Helper
method for :func:`~exhale.graph.ExhaleNode.toHierarchy`. Sets the member
variable ``self.in_file_hierarchy`` to True if appropriate.
:Return (bool):
True if this node should be included in the file view --- either it is a
node of kind ``file``, or it is a ``dir`` that one or more if its
descendants was a ``file``. Returns False otherwise.
'''
if self.kind == "file":
# flag that this file is already in the directory view so that potential
# missing files can be found later.
self.in_file_hierarchy = True
return True
elif self.kind == "dir":
for c in self.children:
if c.inFileHierarchy():
return True
return False | def inFileHierarchy(self) | Whether or not this node should be included in the file view hierarchy. Helper
method for :func:`~exhale.graph.ExhaleNode.toHierarchy`. Sets the member
variable ``self.in_file_hierarchy`` to True if appropriate.
:Return (bool):
True if this node should be included in the file view --- either it is a
node of kind ``file``, or it is a ``dir`` that one or more if its
descendants was a ``file``. Returns False otherwise. | 6.461352 | 1.925473 | 3.355722 |
'''
The first method that should be called after creating an ExhaleRoot object. The
Breathe graph is parsed first, followed by the Doxygen xml documents. By the
end of this method, all of the ``self.<breathe_kind>``, ``self.all_compounds``,
and ``self.all_nodes`` lists as well as the ``self.node_by_refid`` dictionary
will be populated. Lastly, this method sorts all of the internal lists. The
order of execution is exactly
1. :func:`~exhale.graph.ExhaleRoot.discoverAllNodes`
2. :func:`~exhale.graph.ExhaleRoot.reparentAll`
3. Populate ``self.node_by_refid`` using ``self.all_nodes``.
4. :func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`
5. :func:`~exhale.graph.ExhaleRoot.filePostProcess`
6. :func:`~exhale.graph.ExhaleRoot.parseFunctionSignatures`.
7. :func:`~exhale.graph.ExhaleRoot.sortInternals`
'''
self.discoverAllNodes()
# now reparent everything we can
# NOTE: it's very important that this happens before `fileRefDiscovery`, since
# in that method we only want to consider direct descendants
self.reparentAll()
# now that we have all of the nodes, store them in a convenient manner for refid
# lookup when parsing the Doxygen xml files
for n in self.all_nodes:
self.node_by_refid[n.refid] = n
# find missing relationships using the Doxygen xml files
self.fileRefDiscovery()
self.filePostProcess()
# gather the function signatures
self.parseFunctionSignatures()
# sort all of the lists we just built
self.sortInternals() | def parse(self) | The first method that should be called after creating an ExhaleRoot object. The
Breathe graph is parsed first, followed by the Doxygen xml documents. By the
end of this method, all of the ``self.<breathe_kind>``, ``self.all_compounds``,
and ``self.all_nodes`` lists as well as the ``self.node_by_refid`` dictionary
will be populated. Lastly, this method sorts all of the internal lists. The
order of execution is exactly
1. :func:`~exhale.graph.ExhaleRoot.discoverAllNodes`
2. :func:`~exhale.graph.ExhaleRoot.reparentAll`
3. Populate ``self.node_by_refid`` using ``self.all_nodes``.
4. :func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`
5. :func:`~exhale.graph.ExhaleRoot.filePostProcess`
6. :func:`~exhale.graph.ExhaleRoot.parseFunctionSignatures`.
7. :func:`~exhale.graph.ExhaleRoot.sortInternals` | 4.883 | 1.885476 | 2.589798 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.discoverAllNodes`. If the node
is not in self.all_nodes yet, add it to both self.all_nodes as well as the
corresponding ``self.<breathe_kind>`` list.
:Parameters:
``node`` (ExhaleNode)
The node to begin tracking if not already present.
'''
if node not in self.all_nodes:
self.all_nodes.append(node)
self.node_by_refid[node.refid] = node
if node.kind == "class" or node.kind == "struct":
self.class_like.append(node)
elif node.kind == "namespace":
self.namespaces.append(node)
elif node.kind == "enum":
self.enums.append(node)
elif node.kind == "enumvalue":
self.enum_values.append(node)
elif node.kind == "define":
self.defines.append(node)
elif node.kind == "file":
self.files.append(node)
elif node.kind == "dir":
self.dirs.append(node)
elif node.kind == "function":
self.functions.append(node)
elif node.kind == "variable":
self.variables.append(node)
elif node.kind == "group":
self.groups.append(node)
elif node.kind == "typedef":
self.typedefs.append(node)
elif node.kind == "union":
self.unions.append(node) | def trackNodeIfUnseen(self, node) | Helper method for :func:`~exhale.graph.ExhaleRoot.discoverAllNodes`. If the node
is not in self.all_nodes yet, add it to both self.all_nodes as well as the
corresponding ``self.<breathe_kind>`` list.
:Parameters:
``node`` (ExhaleNode)
The node to begin tracking if not already present. | 2.312263 | 1.498292 | 1.543266 |
'''
Fixes some of the parental relationships lost in parsing the Breathe graph.
File relationships are recovered in
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. This method simply calls in
this order:
1. :func:`~exhale.graph.ExhaleRoot.reparentUnions`
2. :func:`~exhale.graph.ExhaleRoot.reparentClassLike`
3. :func:`~exhale.graph.ExhaleRoot.reparentDirectories`
4. :func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes`
5. :func:`~exhale.graph.ExhaleRoot.reparentNamespaces`
'''
self.reparentUnions()
self.reparentClassLike()
self.reparentDirectories()
self.renameToNamespaceScopes()
self.reparentNamespaces()
# make sure all children lists are unique (no duplicate children)
for node in self.all_nodes:
node.children = list(set(node.children)) | def reparentAll(self) | Fixes some of the parental relationships lost in parsing the Breathe graph.
File relationships are recovered in
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. This method simply calls in
this order:
1. :func:`~exhale.graph.ExhaleRoot.reparentUnions`
2. :func:`~exhale.graph.ExhaleRoot.reparentClassLike`
3. :func:`~exhale.graph.ExhaleRoot.reparentDirectories`
4. :func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes`
5. :func:`~exhale.graph.ExhaleRoot.reparentNamespaces` | 4.347672 | 1.51403 | 2.871589 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Namespaces and
classes should have the unions defined in them to be in the child list of itself
rather than floating around. Union nodes that are reparented (e.g. a union
defined in a class) will be removed from the list ``self.unions`` since the
Breathe directive for its parent (e.g. the class) will include the documentation
for the union. The consequence of this is that a union defined in a class will
**not** appear in the full api listing of Unions.
'''
# unions declared in a class will not link to the individual union page, so
# we will instead elect to remove these from the list of unions
removals = []
for u in self.unions:
parts = u.name.split("::")
if len(parts) >= 2:
# TODO: nested unions are not supported right now...
parent_name = "::".join(p for p in parts[:-1])
reparented = False
# see if the name matches any potential parents
for node in itertools.chain(self.class_like, self.namespaces):
if node.name == parent_name:
node.children.append(u)
u.parent = node
reparented = True
break
# if not reparented, try the namespaces
if reparented:
removals.append(u)
else:
# << verboseBuild
utils.verbose_log(
"The union {0} has '::' in its name, but no parent was found!".format(u.name),
utils.AnsiColors.BOLD_RED
)
# remove the unions from self.unions that were declared in class_like objects
for rm in removals:
self.unions.remove(rm) | def reparentUnions(self) | Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Namespaces and
classes should have the unions defined in them to be in the child list of itself
rather than floating around. Union nodes that are reparented (e.g. a union
defined in a class) will be removed from the list ``self.unions`` since the
Breathe directive for its parent (e.g. the class) will include the documentation
for the union. The consequence of this is that a union defined in a class will
**not** appear in the full api listing of Unions. | 6.903361 | 3.187922 | 2.165473 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly.
'''
removals = []
for cl in self.class_like:
parts = cl.name.split("::")
if len(parts) > 1:
parent_name = "::".join(parts[:-1])
for parent_cl in self.class_like:
if parent_cl.name == parent_name:
parent_cl.children.append(cl)
cl.parent = parent_cl
removals.append(cl)
break
for rm in removals:
if rm in self.class_like:
self.class_like.remove(rm) | def reparentClassLike(self) | Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly. | 4.499157 | 1.540169 | 2.921211 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Adds
subdirectories as children to the relevant directory ExhaleNode. If a node in
``self.dirs`` is added as a child to a different directory node, it is removed
from the ``self.dirs`` list.
'''
dir_parts = []
dir_ranks = []
for d in self.dirs:
parts = d.name.split(os.sep)
for p in parts:
if p not in dir_parts:
dir_parts.append(p)
dir_ranks.append((len(parts), d))
traversal = sorted(dir_ranks)
removals = []
for rank, directory in reversed(traversal):
# rank one means top level directory
if rank < 2:
break
# otherwise, this is nested
for p_rank, p_directory in reversed(traversal):
if p_rank == rank - 1:
if p_directory.name == os.path.dirname(directory.name):
p_directory.children.append(directory)
directory.parent = p_directory
if directory not in removals:
removals.append(directory)
break
for rm in removals:
self.dirs.remove(rm) | def reparentDirectories(self) | Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Adds
subdirectories as children to the relevant directory ExhaleNode. If a node in
``self.dirs`` is added as a child to a different directory node, it is removed
from the ``self.dirs`` list. | 3.893753 | 2.354201 | 1.653959 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Some compounds in
Breathe such as functions and variables do not have the namespace name they are
declared in before the name of the actual compound. This method prepends the
appropriate (nested) namespace name before the name of any child that does not
already have it.
For example, the variable ``MAX_DEPTH`` declared in namespace ``external`` would
have its ExhaleNode's ``name`` attribute changed from ``MAX_DEPTH`` to
``external::MAX_DEPTH``.
'''
for n in self.namespaces:
namespace_name = "{0}::".format(n.name)
for child in n.children:
if namespace_name not in child.name:
child.name = "{0}{1}".format(namespace_name, child.name) | def renameToNamespaceScopes(self) | Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Some compounds in
Breathe such as functions and variables do not have the namespace name they are
declared in before the name of the actual compound. This method prepends the
appropriate (nested) namespace name before the name of any child that does not
already have it.
For example, the variable ``MAX_DEPTH`` declared in namespace ``external`` would
have its ExhaleNode's ``name`` attribute changed from ``MAX_DEPTH`` to
``external::MAX_DEPTH``. | 6.746808 | 1.48365 | 4.54744 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Adds nested
namespaces as children to the relevant namespace ExhaleNode. If a node in
``self.namespaces`` is added as a child to a different namespace node, it is
removed from the ``self.namespaces`` list. Because these are removed from
``self.namespaces``, it is important that
:func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes` is called before this
method.
'''
namespace_parts = []
namespace_ranks = []
for n in self.namespaces:
parts = n.name.split("::")
for p in parts:
if p not in namespace_parts:
namespace_parts.append(p)
namespace_ranks.append((len(parts), n))
traversal = sorted(namespace_ranks)
removals = []
for rank, namespace in reversed(traversal):
# rank one means top level namespace
if rank < 2:
continue
# otherwise, this is nested
for p_rank, p_namespace in reversed(traversal):
if p_rank == rank - 1:
if p_namespace.name == "::".join(namespace.name.split("::")[:-1]):
p_namespace.children.append(namespace)
namespace.parent = p_namespace
if namespace not in removals:
removals.append(namespace)
continue
removals = []
for nspace in self.namespaces:
if nspace.parent and nspace.parent.kind == "namespace" and nspace not in removals:
removals.append(nspace)
for rm in removals:
self.namespaces.remove(rm) | def reparentNamespaces(self) | Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Adds nested
namespaces as children to the relevant namespace ExhaleNode. If a node in
``self.namespaces`` is added as a child to a different namespace node, it is
removed from the ``self.namespaces`` list. Because these are removed from
``self.namespaces``, it is important that
:func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes` is called before this
method. | 3.699821 | 2.159757 | 1.713073 |
'''
The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery` method has been called, each
file will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete.
'''
# directories are already reparented, traverse the children and get a flattened
# list of all directories. previously, all directories should have had their
# names adjusted to remove a potentially leading path separator
nodes_remaining = [d for d in self.dirs]
all_directories = []
while len(nodes_remaining) > 0:
d = nodes_remaining.pop()
all_directories.append(d)
for child in d.children:
if child.kind == "dir":
nodes_remaining.append(child)
all_directories.sort()
for f in self.files:
if not f.location:
sys.stderr.write(utils.critical(
"Cannot reparent file [{0}] because it's location was not discovered.\n".format(
f.name
)
))
continue
elif os.sep not in f.location:
# top-level file, cannot parent do a directory
utils.verbose_log(
"### File [{0}] with location [{1}] was identified as being at the top level".format(
f.name, f.location
),
utils.AnsiColors.BOLD_YELLOW
)
continue
dirname = os.path.dirname(f.location)
found = False
for d in all_directories:
if dirname == d.name:
d.children.append(f)
f.parent = d
found = True
break
if not found:
sys.stderr.write(utils.critical(
"Could not find directory parent of file [{0}] with location [{1}].\n".format(
f.name, f.location
)
)) | def filePostProcess(self) | The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery` method has been called, each
file will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete. | 5.435958 | 2.961779 | 1.835369 |
# Keys: string refid of either namespace or file nodes
# Values: list of function objects that should be defined there
parent_to_func = {}
for func in self.functions:
# Case 1: it is a function inside a namespace, the function information
# is in the namespace's XML file.
if func.parent:
parent_refid = None
if func.parent.kind == "namespace":
parent_refid = func.parent.refid
else:
raise RuntimeError(textwrap.dedent('''
Function [{0}] with refid=[{1}] had a parent of kind '{2}':
Parent name=[{3}], refid=[{4}].
Functions may only have namespace parents. Please report this
issue online, Exhale has a parsing error.
'''.format(func.name, func.refid, func.parent.name, func.parent.refid)))
# Case 2: top-level function, it's information is in the file node's XML.
elif func.def_in_file:
parent_refid = func.def_in_file.refid
else:
utils.verbose_log(utils.critical(
"Cannot parse function [{0}] signature, refid=[{2}], no parent/def_in_file found!".format(
func.name, func.refid
)
))
# If we found a suitable parent refid, gather in parent_to_func.
if parent_refid:
if parent_refid not in parent_to_func:
parent_to_func[parent_refid] = []
parent_to_func[parent_refid].append(func)
# Now we have a mapping of all defining elements to where the function
# signatures _should_ live.
# TODO: setwise comparison / report when children vs parent_to_func[refid] differ?
for refid in parent_to_func:
parent = self.node_by_refid[refid]
parent_contents = utils.nodeCompoundXMLContents(parent)
if not parent_contents:
continue ############flake8efphase: TODO: error, log?
try:
parent_soup = BeautifulSoup(parent_contents, "lxml-xml")
except:
continue
cdef = parent_soup.doxygen.compounddef
func_section = None
for section in cdef.find_all("sectiondef", recursive=False):
if "kind" in section.attrs and section.attrs["kind"] == "func":
func_section = section
break
if not func_section:
continue############flake8efphase: TODO: error, log?
functions = parent_to_func[refid]
for memberdef in func_section.find_all("memberdef", recursive=False):
if "kind" not in memberdef.attrs or memberdef.attrs["kind"] != "function":
continue
func_refid = memberdef.attrs["id"]
func = None
for candidate in functions:
if candidate.refid == func_refid:
func = candidate
break
if not func:
continue ############flake8efphase: TODO: error, log?
functions.remove(func)
# At last, we can actually parse the function signature
# 1. The function return type.
func.return_type = utils.sanitize(
memberdef.find("type", recursive=False).text
)
# 2. The function parameter list.
parameters = []
for param in memberdef.find_all("param", recursive=False):
parameters.append(param.type.text)
func.parameters = utils.sanitize_all(parameters)
# 3. The template parameter list.
templateparamlist = memberdef.templateparamlist
if templateparamlist:
template = []
for param in templateparamlist.find_all("param", recursive=False):
template.append(param.type.text)
func.template = utils.sanitize_all(template) | def parseFunctionSignatures(self) | Search file and namespace node XML contents for function signatures. | 3.912431 | 3.828655 | 1.021881 |
'''
Sort all internal lists (``class_like``, ``namespaces``, ``variables``, etc)
mostly how doxygen would, alphabetical but also hierarchical (e.g. structs
appear before classes in listings). Some internal lists are just sorted, and
some are deep sorted (:func:`~exhale.graph.ExhaleRoot.deepSortList`).
'''
# some of the lists only need to be sorted, some of them need to be sorted and
# have each node sort its children
# leaf-like lists: no child sort
self.defines.sort()
self.enums.sort()
self.enum_values.sort()
self.functions.sort()
self.groups.sort()
self.typedefs.sort()
self.variables.sort()
# hierarchical lists: sort children
self.deepSortList(self.class_like)
self.deepSortList(self.namespaces)
self.deepSortList(self.unions)
self.deepSortList(self.files)
self.deepSortList(self.dirs) | def sortInternals(self) | Sort all internal lists (``class_like``, ``namespaces``, ``variables``, etc)
mostly how doxygen would, alphabetical but also hierarchical (e.g. structs
appear before classes in listings). Some internal lists are just sorted, and
some are deep sorted (:func:`~exhale.graph.ExhaleRoot.deepSortList`). | 7.157285 | 2.520052 | 2.840134 |
'''
This method creates the root library api file that will include all of the
different hierarchy views and full api listing. If ``self.root_directory`` is
not a current directory, it is created first. Afterward, the root API file is
created and its title is written, as well as the value of
``configs.afterTitleDescription``.
'''
try:
if not os.path.isdir(self.root_directory):
os.mkdir(self.root_directory)
except:
utils.fancyError(
"Cannot create the directory: {0}".format(self.root_directory)
)
try:
with codecs.open(self.full_root_file_path, "w", "utf-8") as generated_index:
# Add the metadata if they requested it
if configs.pageLevelConfigMeta:
generated_index.write("{0}\n\n".format(configs.pageLevelConfigMeta))
generated_index.write(textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=configs.rootFileTitle,
heading_mark=utils.heading_mark(
configs.rootFileTitle,
configs.SECTION_HEADING_CHAR
)
)))
if configs.afterTitleDescription:
generated_index.write("\n{0}\n\n".format(configs.afterTitleDescription))
except:
utils.fancyError(
"Unable to create the root api file / header: {0}".format(self.full_root_file_path)
) | def generateAPIRootHeader(self) | This method creates the root library api file that will include all of the
different hierarchy views and full api listing. If ``self.root_directory`` is
not a current directory, it is created first. Afterward, the root API file is
created and its title is written, as well as the value of
``configs.afterTitleDescription``. | 4.840727 | 2.514243 | 1.925322 |
'''
Creates all of the reStructuredText documents related to types parsed by
Doxygen. This includes all leaf-like documents (``class``, ``struct``,
``enum``, ``typedef``, ``union``, ``variable``, and ``define``), as well as
namespace, file, and directory pages.
During the reparenting phase of the parsing process, nested items were added as
a child to their actual parent. For classes, structs, enums, and unions, if
it was reparented to a ``namespace`` it will *remain* in its respective
``self.<breathe_kind>`` list. However, if it was an internally declared child
of a class or struct (nested classes, structs, enums, and unions), this node
will be removed from its ``self.<breathe_kind>`` list to avoid duplication in
the class hierarchy generation.
When generating the full API, though, we will want to include all of these and
therefore must call :func:`~exhale.graph.ExhaleRoot.generateSingleNodeRST` with
all of the nested items. For nested classes and structs, this is done by just
calling ``node.findNestedClassLike`` for every node in ``self.class_like``. The
resulting list then has all of ``self.class_like``, as well as any nested
classes and structs found. With ``enum`` and ``union``, these would have been
reparented to a **class** or **struct** if it was removed from the relevant
``self.<breathe_kind>`` list. Meaning we must make sure that we genererate the
single node RST documents for everything by finding the nested enums and unions
from ``self.class_like``, as well as everything in ``self.enums`` and
``self.unions``.
'''
# initialize all of the nodes first
for node in self.all_nodes:
self.initializeNodeFilenameAndLink(node)
self.adjustFunctionTitles()
# now that all potential ``node.link_name`` members are initialized, generate
# the leaf-like documents
for node in self.all_nodes:
if node.kind in utils.LEAF_LIKE_KINDS:
self.generateSingleNodeRST(node)
# generate the remaining parent-like documents
self.generateNamespaceNodeDocuments()
self.generateFileNodeDocuments()
self.generateDirectoryNodeDocuments() | def generateNodeDocuments(self) | Creates all of the reStructuredText documents related to types parsed by
Doxygen. This includes all leaf-like documents (``class``, ``struct``,
``enum``, ``typedef``, ``union``, ``variable``, and ``define``), as well as
namespace, file, and directory pages.
During the reparenting phase of the parsing process, nested items were added as
a child to their actual parent. For classes, structs, enums, and unions, if
it was reparented to a ``namespace`` it will *remain* in its respective
``self.<breathe_kind>`` list. However, if it was an internally declared child
of a class or struct (nested classes, structs, enums, and unions), this node
will be removed from its ``self.<breathe_kind>`` list to avoid duplication in
the class hierarchy generation.
When generating the full API, though, we will want to include all of these and
therefore must call :func:`~exhale.graph.ExhaleRoot.generateSingleNodeRST` with
all of the nested items. For nested classes and structs, this is done by just
calling ``node.findNestedClassLike`` for every node in ``self.class_like``. The
resulting list then has all of ``self.class_like``, as well as any nested
classes and structs found. With ``enum`` and ``union``, these would have been
reparented to a **class** or **struct** if it was removed from the relevant
``self.<breathe_kind>`` list. Meaning we must make sure that we genererate the
single node RST documents for everything by finding the nested enums and unions
from ``self.class_like``, as well as everything in ``self.enums`` and
``self.unions``. | 8.248937 | 1.564186 | 5.27363 |
'''
Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace.
'''
# go through all of the top level namespaces
for n in self.namespaces:
# find any nested namespaces
nested_namespaces = []
for child in n.children:
child.findNestedNamespaces(nested_namespaces)
# generate the children first
for nested in reversed(sorted(nested_namespaces)):
self.generateSingleNamespace(nested)
# generate this top level namespace
self.generateSingleNamespace(n) | def generateNamespaceNodeDocuments(self) | Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace. | 6.451226 | 2.397125 | 2.691235 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.generateNamespaceNodeDocuments`.
Writes the reStructuredText file for the given namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node to create the reStructuredText document for.
'''
try:
with codecs.open(nspace.file_name, "w", "utf-8") as gen_file:
# Add the metadata if they requested it
if configs.pageLevelConfigMeta:
gen_file.write("{0}\n\n".format(configs.pageLevelConfigMeta))
nspace.title = "{0} {1}".format(utils.qualifyKind(nspace.kind), nspace.name)
# generate a link label for every generated file
gen_file.write(textwrap.dedent('''
.. _{link}:
{heading}
{heading_mark}
'''.format(
link=nspace.link_name,
heading=nspace.title,
heading_mark=utils.heading_mark(nspace.title, configs.SECTION_HEADING_CHAR)
)))
brief, detailed = parse.getBriefAndDetailedRST(self, nspace)
if brief:
gen_file.write("{0}\n\n".format(brief))
# include the contents directive if requested
contents = utils.contentsDirectiveOrNone(nspace.kind)
if contents:
gen_file.write("{0}\n\n".format(contents))
if detailed:
gen_file.write("{0}\n\n".format(detailed))
# generate the headings and links for the children
children_string = self.generateNamespaceChildrenString(nspace)
gen_file.write(children_string)
except:
utils.fancyError(
"Critical error while generating the file for [{0}]".format(nspace.file_name)
) | def generateSingleNamespace(self, nspace) | Helper method for :func:`~exhale.graph.ExhaleRoot.generateNamespaceNodeDocuments`.
Writes the reStructuredText file for the given namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node to create the reStructuredText document for. | 4.293704 | 3.360738 | 1.277607 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.generateSingleNamespace`, and
:func:`~exhale.graph.ExhaleRoot.generateFileNodeDocuments`. Builds the
body text for the namespace node document that links to all of the child
namespaces, structs, classes, functions, typedefs, unions, and variables
associated with this namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node we are generating the body text for.
:Return (str):
The string to be written to the namespace node's reStructuredText document.
'''
# sort the children
nsp_namespaces = []
nsp_nested_class_like = []
nsp_enums = []
nsp_functions = []
nsp_typedefs = []
nsp_unions = []
nsp_variables = []
for child in nspace.children:
# Skip children whose names were requested to be explicitly ignored.
should_exclude = False
for exclude in configs._compiled_listing_exclude:
if exclude.match(child.name):
should_exclude = True
if should_exclude:
continue
if child.kind == "namespace":
nsp_namespaces.append(child)
elif child.kind == "struct" or child.kind == "class":
child.findNestedClassLike(nsp_nested_class_like)
child.findNestedEnums(nsp_enums)
child.findNestedUnions(nsp_unions)
elif child.kind == "enum":
nsp_enums.append(child)
elif child.kind == "function":
nsp_functions.append(child)
elif child.kind == "typedef":
nsp_typedefs.append(child)
elif child.kind == "union":
nsp_unions.append(child)
elif child.kind == "variable":
nsp_variables.append(child)
# generate their headings if they exist (no Defines...that's not a C++ thing...)
children_stream = StringIO()
self.generateSortedChildListString(children_stream, "Namespaces", nsp_namespaces)
self.generateSortedChildListString(children_stream, "Classes", nsp_nested_class_like)
self.generateSortedChildListString(children_stream, "Enums", nsp_enums)
self.generateSortedChildListString(children_stream, "Functions", nsp_functions)
self.generateSortedChildListString(children_stream, "Typedefs", nsp_typedefs)
self.generateSortedChildListString(children_stream, "Unions", nsp_unions)
self.generateSortedChildListString(children_stream, "Variables", nsp_variables)
# read out the buffer contents, close it and return the desired string
children_string = children_stream.getvalue()
children_stream.close()
return children_string | def generateNamespaceChildrenString(self, nspace) | Helper method for :func:`~exhale.graph.ExhaleRoot.generateSingleNamespace`, and
:func:`~exhale.graph.ExhaleRoot.generateFileNodeDocuments`. Builds the
body text for the namespace node document that links to all of the child
namespaces, structs, classes, functions, typedefs, unions, and variables
associated with this namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node we are generating the body text for.
:Return (str):
The string to be written to the namespace node's reStructuredText document. | 2.852701 | 2.012674 | 1.417369 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.generateNamespaceChildrenString`.
Used to build up a continuous string with all of the children separated out into
titled sections.
This generates a new titled section with ``sectionTitle`` and puts a link to
every node found in ``lst`` in this section. The newly created section is
appended to the existing ``stream`` buffer.
:Parameters:
``stream`` (StringIO)
The already-open StringIO to write the result to.
``sectionTitle`` (str)
The title of the section for this list of children.
``lst`` (list)
A list of ExhaleNode objects that are to be linked to from this section.
This method sorts ``lst`` in place.
'''
if lst:
lst.sort()
stream.write(textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=sectionTitle,
heading_mark=utils.heading_mark(
sectionTitle,
configs.SUB_SECTION_HEADING_CHAR
)
)))
for l in lst:
stream.write(textwrap.dedent('''
- :ref:`{link}`
'''.format(link=l.link_name))) | def generateSortedChildListString(self, stream, sectionTitle, lst) | Helper method for :func:`~exhale.graph.ExhaleRoot.generateNamespaceChildrenString`.
Used to build up a continuous string with all of the children separated out into
titled sections.
This generates a new titled section with ``sectionTitle`` and puts a link to
every node found in ``lst`` in this section. The newly created section is
appended to the existing ``stream`` buffer.
:Parameters:
``stream`` (StringIO)
The already-open StringIO to write the result to.
``sectionTitle`` (str)
The title of the section for this list of children.
``lst`` (list)
A list of ExhaleNode objects that are to be linked to from this section.
This method sorts ``lst`` in place. | 5.16213 | 1.782648 | 2.895766 |
'''
Generates all of the directory reStructuredText documents.
'''
all_dirs = []
for d in self.dirs:
d.findNestedDirectories(all_dirs)
for d in all_dirs:
self.generateDirectoryNodeRST(d) | def generateDirectoryNodeDocuments(self) | Generates all of the directory reStructuredText documents. | 6.524462 | 4.266243 | 1.529323 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.generateDirectoryNodeDocuments`.
Generates the reStructuredText documents for the given directory node.
Directory nodes will only link to files and subdirectories within it.
:Parameters:
``node`` (ExhaleNode)
The directory node to generate the reStructuredText document for.
'''
# find the relevant children: directories and files only
child_dirs = []
child_files = []
for c in node.children:
if c.kind == "dir":
child_dirs.append(c)
elif c.kind == "file":
child_files.append(c)
# generate the subdirectory section
if len(child_dirs) > 0:
heading = "Subdirectories"
child_dirs_string = textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=heading,
heading_mark=utils.heading_mark(
heading,
configs.SUB_SECTION_HEADING_CHAR
)
))
for child_dir in sorted(child_dirs):
child_dirs_string = "{}- :ref:`{}`\n".format(child_dirs_string, child_dir.link_name)
else:
child_dirs_string = ""
# generate the files section
if len(child_files) > 0:
heading = "Files"
child_files_string = textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=heading,
heading_mark=utils.heading_mark(
heading,
configs.SUB_SECTION_HEADING_CHAR
)
))
for child_file in sorted(child_files):
child_files_string = "{}- :ref:`{}`\n".format(child_files_string, child_file.link_name)
else:
child_files_string = ""
if node.parent and node.parent.kind == "dir":
parent_directory = textwrap.dedent('''
|exhale_lsh| :ref:`Parent directory <{parent_link}>` (``{parent_name}``)
.. |exhale_lsh| unicode:: U+021B0 .. UPWARDS ARROW WITH TIP LEFTWARDS
'''.format(
parent_link=node.parent.link_name, parent_name=node.parent.name
))
else:
parent_directory = ""
# generate the file for this directory
try:
#flake8fail get rid of {} in this method
with codecs.open(node.file_name, "w", "utf-8") as gen_file:
# Add the metadata if they requested it
if configs.pageLevelConfigMeta:
gen_file.write("{0}\n\n".format(configs.pageLevelConfigMeta))
# generate a link label for every generated file
link_declaration = ".. _{0}:\n\n".format(node.link_name)
header = textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=node.title,
heading_mark=utils.heading_mark(
node.title,
configs.SECTION_HEADING_CHAR
)
))
path = "\n*Directory path:* ``{path}``\n".format(path=node.name)
# write it all out
gen_file.write("{0}{1}{2}{3}{4}\n{5}\n\n".format(
link_declaration, header, parent_directory, path, child_dirs_string, child_files_string)
)
except:
utils.fancyError(
"Critical error while generating the file for [{0}]".format(node.file_name)
) | def generateDirectoryNodeRST(self, node) | Helper method for :func:`~exhale.graph.ExhaleRoot.generateDirectoryNodeDocuments`.
Generates the reStructuredText documents for the given directory node.
Directory nodes will only link to files and subdirectories within it.
:Parameters:
``node`` (ExhaleNode)
The directory node to generate the reStructuredText document for. | 2.906052 | 2.533778 | 1.146924 |
'''
When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``.
'''
for node in self.all_nodes:
node.file_name = os.path.basename(node.file_name)
if node.kind == "file":
node.program_file = os.path.basename(node.program_file) | def gerrymanderNodeFilenames(self) | When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``. | 8.208669 | 1.656057 | 4.956754 |
'''
Wrapper method to create the view hierarchies. Currently it just calls
:func:`~exhale.graph.ExhaleRoot.generateClassView` and
:func:`~exhale.graph.ExhaleRoot.generateDirectoryView` --- if you want to implement
additional hierarchies, implement the additionaly hierarchy method and call it
from here. Then make sure to ``include`` it in
:func:`~exhale.graph.ExhaleRoot.generateAPIRootBody`.
'''
# gather the class hierarchy data and write it out
class_view_data = self.generateClassView()
self.writeOutHierarchy(True, class_view_data)
# gather the file hierarchy data and write it out
file_view_data = self.generateDirectoryView()
self.writeOutHierarchy(False, file_view_data) | def generateViewHierarchies(self) | Wrapper method to create the view hierarchies. Currently it just calls
:func:`~exhale.graph.ExhaleRoot.generateClassView` and
:func:`~exhale.graph.ExhaleRoot.generateDirectoryView` --- if you want to implement
additional hierarchies, implement the additionaly hierarchy method and call it
from here. Then make sure to ``include`` it in
:func:`~exhale.graph.ExhaleRoot.generateAPIRootBody`. | 5.176383 | 1.709306 | 3.028354 |
'''
Generates the class view hierarchy, writing it to ``self.class_hierarchy_file``.
'''
class_view_stream = StringIO()
for n in self.namespaces:
n.toHierarchy(True, 0, class_view_stream)
# Add everything that was not nested in a namespace.
missing = []
# class-like objects (structs and classes)
for cl in sorted(self.class_like):
if not cl.in_class_hierarchy:
missing.append(cl)
# enums
for e in sorted(self.enums):
if not e.in_class_hierarchy:
missing.append(e)
# unions
for u in sorted(self.unions):
if not u.in_class_hierarchy:
missing.append(u)
if len(missing) > 0:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toHierarchy(True, 0, class_view_stream, idx == last_missing_child)
idx += 1
elif configs.createTreeView:
# need to restart since there were no missing children found, otherwise the
# last namespace will not correctly have a lastChild
class_view_stream.close()
class_view_stream = StringIO()
last_nspace_index = len(self.namespaces) - 1
for idx in range(last_nspace_index + 1):
nspace = self.namespaces[idx]
nspace.toHierarchy(True, 0, class_view_stream, idx == last_nspace_index)
# extract the value from the stream and close it down
class_view_string = class_view_stream.getvalue()
class_view_stream.close()
return class_view_string | def generateClassView(self) | Generates the class view hierarchy, writing it to ``self.class_hierarchy_file``. | 3.543362 | 3.21169 | 1.10327 |
'''
Generates the file view hierarchy, writing it to ``self.file_hierarchy_file``.
'''
file_view_stream = StringIO()
for d in self.dirs:
d.toHierarchy(False, 0, file_view_stream)
# add potential missing files (not sure if this is possible though)
missing = []
for f in sorted(self.files):
if not f.in_file_hierarchy:
missing.append(f)
found_missing = len(missing) > 0
if found_missing:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toHierarchy(False, 0, file_view_stream, idx == last_missing_child)
idx += 1
elif configs.createTreeView:
# need to restart since there were no missing children found, otherwise the
# last directory will not correctly have a lastChild
file_view_stream.close()
file_view_stream = StringIO()
last_dir_index = len(self.dirs) - 1
for idx in range(last_dir_index + 1):
curr_d = self.dirs[idx]
curr_d.toHierarchy(False, 0, file_view_stream, idx == last_dir_index)
# extract the value from the stream and close it down
file_view_string = file_view_stream.getvalue()
file_view_stream.close()
return file_view_string | def generateDirectoryView(self) | Generates the file view hierarchy, writing it to ``self.file_hierarchy_file``. | 4.106856 | 3.568967 | 1.150713 |
'''
Helper function for :func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI`.
Simply writes a subsection to ``openFile`` (a ``toctree`` to the ``file_name``)
of each ExhaleNode in ``sorted(lst)`` if ``len(lst) > 0``. Otherwise, nothing
is written to the file.
:Parameters:
``subsectionTitle`` (str)
The title of this subsection, e.g. ``"Namespaces"`` or ``"Files"``.
``lst`` (list)
The list of ExhaleNodes to be enumerated in this subsection.
``openFile`` (File)
The **already open** file object to write to directly. No safety checks
are performed, make sure this is a real file object that has not been
closed already.
'''
if len(lst) > 0:
openFile.write(textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=subsectionTitle,
heading_mark=utils.heading_mark(
subsectionTitle,
configs.SUB_SUB_SECTION_HEADING_CHAR
)
)))
for l in sorted(lst):
openFile.write(textwrap.dedent('''
.. toctree::
:maxdepth: {depth}
{file}
'''.format(
depth=configs.fullToctreeMaxDepth,
file=l.file_name
))) | def enumerateAll(self, subsectionTitle, lst, openFile) | Helper function for :func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI`.
Simply writes a subsection to ``openFile`` (a ``toctree`` to the ``file_name``)
of each ExhaleNode in ``sorted(lst)`` if ``len(lst) > 0``. Otherwise, nothing
is written to the file.
:Parameters:
``subsectionTitle`` (str)
The title of this subsection, e.g. ``"Namespaces"`` or ``"Files"``.
``lst`` (list)
The list of ExhaleNodes to be enumerated in this subsection.
``openFile`` (File)
The **already open** file object to write to directly. No safety checks
are performed, make sure this is a real file object that has not been
closed already. | 4.634601 | 1.776973 | 2.608144 |
'''
Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;)
'''
fmt_spec = {
"class": utils.AnsiColors.BOLD_MAGENTA,
"struct": utils.AnsiColors.BOLD_CYAN,
"define": utils.AnsiColors.BOLD_YELLOW,
"enum": utils.AnsiColors.BOLD_MAGENTA,
"enumvalue": utils.AnsiColors.BOLD_RED, # red means unused in framework
"function": utils.AnsiColors.BOLD_CYAN,
"file": utils.AnsiColors.BOLD_YELLOW,
"dir": utils.AnsiColors.BOLD_MAGENTA,
"group": utils.AnsiColors.BOLD_RED, # red means unused in framework
"namespace": utils.AnsiColors.BOLD_CYAN,
"typedef": utils.AnsiColors.BOLD_YELLOW,
"union": utils.AnsiColors.BOLD_MAGENTA,
"variable": utils.AnsiColors.BOLD_CYAN
}
self.consoleFormat(
"{0} and {1}".format(
utils._use_color("Classes", fmt_spec["class"], sys.stderr),
utils._use_color("Structs", fmt_spec["struct"], sys.stderr),
),
self.class_like,
fmt_spec
)
self.consoleFormat(
utils._use_color("Defines", fmt_spec["define"], sys.stderr),
self.defines,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enums", fmt_spec["enum"], sys.stderr),
self.enums,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enum Values (unused)", fmt_spec["enumvalue"], sys.stderr),
self.enum_values,
fmt_spec
)
self.consoleFormat(
utils._use_color("Functions", fmt_spec["function"], sys.stderr),
self.functions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Files", fmt_spec["file"], sys.stderr),
self.files,
fmt_spec
)
self.consoleFormat(
utils._use_color("Directories", fmt_spec["dir"], sys.stderr),
self.dirs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Groups (unused)", fmt_spec["group"], sys.stderr),
self.groups,
fmt_spec
)
self.consoleFormat(
utils._use_color("Namespaces", fmt_spec["namespace"], sys.stderr),
self.namespaces,
fmt_spec
)
self.consoleFormat(
utils._use_color("Typedefs", fmt_spec["typedef"], sys.stderr),
self.typedefs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Unions", fmt_spec["union"], sys.stderr),
self.unions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Variables", fmt_spec["variable"], sys.stderr),
self.variables,
fmt_spec
) | def toConsole(self) | Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;) | 1.86018 | 1.640301 | 1.134048 |
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`~exhale.graph.ExhaleNode.toConsole` with ``0``
as the level for every ExhaleNode in ``lst``.
**Parameters**
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console.
'''
if not configs.verboseBuild:
return
utils.verbose_log(textwrap.dedent('''
###########################################################
## {0}
###########################################################'''.format(sectionTitle)))
for l in lst:
l.toConsole(0, fmt_spec) | def consoleFormat(self, sectionTitle, lst, fmt_spec) | Helper method for :func:`~exhale.graph.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`~exhale.graph.ExhaleNode.toConsole` with ``0``
as the level for every ExhaleNode in ``lst``.
**Parameters**
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console. | 5.10704 | 2.123314 | 2.405221 |
'''
Generates a string ``.. contents::`` directives according to the rules outlined in
the :ref:`using_contents_directives` section.
**Parameters**
``kind`` (str)
The ``kind`` of the compound (one of :data:`~exhale.utils.AVAILABLE_KINDS`).
**Return**
``str`` or ``None``
If this ``kind`` should have a ``.. contents::`` directive, it returns the
string that can be written to file. Otherwise, ``None`` is returned.
'''
if configs.contentsDirectives and kind in configs.kindsWithContentsDirectives:
ret = "\n.. contents:: {contentsTitle}".format(
contentsTitle=configs.contentsTitle
)
if configs.contentsSpecifiers:
specs = "\n".join(s for s in configs.contentsSpecifiers)
ret = "{directive}\n{specs}".format(
directive=ret,
specs=prefix(" ", specs)
)
return "{full_directive}\n\n".format(full_directive=ret)
else:
return None | def contentsDirectiveOrNone(kind) | Generates a string ``.. contents::`` directives according to the rules outlined in
the :ref:`using_contents_directives` section.
**Parameters**
``kind`` (str)
The ``kind`` of the compound (one of :data:`~exhale.utils.AVAILABLE_KINDS`).
**Return**
``str`` or ``None``
If this ``kind`` should have a ``.. contents::`` directive, it returns the
string that can be written to file. Otherwise, ``None`` is returned. | 5.699548 | 2.439874 | 2.336001 |
'''
Creates the "pickleable" dictionary that will be used with
:data:`~exhale.configs.customSpecificationsMapping` supplied to ``exhale_args`` in
your ``conf.py``.
**Parameters**
``func`` (types.FunctionType)
A callable function that takes as input a string from
:data:`~exhale.utils.AVAILABLE_KINDS` and returns a ``list`` of strings.
The empty list ``[]`` indicates to use the Breathe defaults.
**Return**
``dict``
A dictionary where the keys are every value in
:data:`~exhale.utils.AVAILABLE_KINDS`, and the values are the ``list``
returns of the input ``func``.
.. note::
To help ensure the dictionary has everything it needs for the rest of Exhale to
function, a "secret" key-value pair is inserted to the returned dictionary.
'''
# Make sure they gave us a function
if not isinstance(func, types.FunctionType):
raise ValueError(
"The input to exhale.util.makeCustomSpecificationsMapping was *NOT* a function: {0}".format(
type(func)
)
)
# Stamp the return to ensure exhale created this function.
ret = {configs._closure_map_sanity_check: configs._closure_map_sanity_check}
try:
# Because we cannot pickle a fully-fledged function object, we are going to go
# through every kind and store its return value.
for kind in AVAILABLE_KINDS:
specs = func(kind)
bad = type(specs) is not list
for s in specs:
if not isinstance(s, six.string_types):
bad = True
break
if bad:
raise RuntimeError(textwrap.dedent('''
The specifications function did not return a valid list for input
`{kind}`
1. Make sure that every entry in the returned list is a string.
2. If you want to use the breathe defaults, you must return the
empty list `[]`.
'''.format(kind=kind)))
ret[kind] = specs
except Exception as e:
raise RuntimeError("Unable to create custom specifications:\n{0}".format(e))
# Everything went according to plan, send it back to `conf.py` :)
return ret | def makeCustomSpecificationsMapping(func) | Creates the "pickleable" dictionary that will be used with
:data:`~exhale.configs.customSpecificationsMapping` supplied to ``exhale_args`` in
your ``conf.py``.
**Parameters**
``func`` (types.FunctionType)
A callable function that takes as input a string from
:data:`~exhale.utils.AVAILABLE_KINDS` and returns a ``list`` of strings.
The empty list ``[]`` indicates to use the Breathe defaults.
**Return**
``dict``
A dictionary where the keys are every value in
:data:`~exhale.utils.AVAILABLE_KINDS`, and the values are the ``list``
returns of the input ``func``.
.. note::
To help ensure the dictionary has everything it needs for the rest of Exhale to
function, a "secret" key-value pair is inserted to the returned dictionary. | 5.990117 | 2.869396 | 2.087588 |
return name.replace(
"<", "<"
).replace(
">", ">"
).replace(
"&", "&"
).replace(
"< ", "<"
).replace(
" >", ">"
).replace(
" &", "&"
).replace(
"& ", "&"
) | def sanitize(name) | Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace. | 2.341747 | 2.350867 | 0.996121 |
'''
Given an input location and language specification, acquire the Pygments lexer to
use for this file.
1. If :data:`configs.lexerMapping <exhale.configs.lexerMapping>` has been specified,
then :data:`configs._compiled_lexer_mapping <exhale.configs._compiled_lexer_mapping>`
will be queried first using the ``location`` parameter.
2. If no matching was found, then the appropriate lexer defined in
:data:`LANG_TO_LEX <exhale.utils.LANG_TO_LEX>` is used.
3. If no matching language is found, ``"none"`` is returned (indicating to Pygments
that no syntax highlighting should occur).
'''
if configs._compiled_lexer_mapping:
for regex in configs._compiled_lexer_mapping:
if regex.match(location):
return configs._compiled_lexer_mapping[regex]
if language in LANG_TO_LEX:
return LANG_TO_LEX[language]
return "none" | def doxygenLanguageToPygmentsLexer(location, language) | Given an input location and language specification, acquire the Pygments lexer to
use for this file.
1. If :data:`configs.lexerMapping <exhale.configs.lexerMapping>` has been specified,
then :data:`configs._compiled_lexer_mapping <exhale.configs._compiled_lexer_mapping>`
will be queried first using the ``location`` parameter.
2. If no matching was found, then the appropriate lexer defined in
:data:`LANG_TO_LEX <exhale.utils.LANG_TO_LEX>` is used.
3. If no matching language is found, ``"none"`` is returned (indicating to Pygments
that no syntax highlighting should occur). | 4.264505 | 1.336225 | 3.191458 |
'''
Based on :data:`~exhale.configs.alwaysColorize`, returns the colorized or
non-colorized output when ``output_stream`` is not a TTY (e.g. redirecting
to a file).
**Parameters**
``msg`` (str)
The message that is going to be printed by the caller of this method.
``ansi_fmt`` (str)
The ANSI color format to use when coloring is supposed to happen.
``output_stream`` (file)
Assumed to be either ``sys.stdout`` or ``sys.stderr``.
**Return**
``str``
The message ``msg`` in color, or not, depending on both
:data:`~exhale.configs.alwaysColorize` and whether or not the
``output_stream`` is a TTY.
'''
if configs._on_rtd or (not configs.alwaysColorize and not output_stream.isatty()):
log = msg
else:
log = colorize(msg, ansi_fmt)
return log | def _use_color(msg, ansi_fmt, output_stream) | Based on :data:`~exhale.configs.alwaysColorize`, returns the colorized or
non-colorized output when ``output_stream`` is not a TTY (e.g. redirecting
to a file).
**Parameters**
``msg`` (str)
The message that is going to be printed by the caller of this method.
``ansi_fmt`` (str)
The ANSI color format to use when coloring is supposed to happen.
``output_stream`` (file)
Assumed to be either ``sys.stdout`` or ``sys.stderr``.
**Return**
``str``
The message ``msg`` in color, or not, depending on both
:data:`~exhale.configs.alwaysColorize` and whether or not the
``output_stream`` is a TTY. | 4.303155 | 1.531517 | 2.809734 |
''' A simple enumeration of the colors to the console to help decide :) '''
for elem in cls.__dict__:
# ignore specials such as __class__ or __module__
if not elem.startswith("__"):
color_fmt = cls.__dict__[elem]
if isinstance(color_fmt, six.string_types) and color_fmt != "BOLD" and color_fmt != "DIM" and \
color_fmt != "UNDER" and color_fmt != "INV":
print("\033[{fmt}AnsiColors.{name}\033[0m".format(fmt=color_fmt, name=elem)) | def printAllColorsToConsole(cls) | A simple enumeration of the colors to the console to help decide :) | 6.047865 | 4.304467 | 1.405021 |
'''
Parses the ``node`` XML document and returns a reStructuredText formatted
string. Helper method for :func:`~exhale.parse.getBriefAndDetailedRST`.
.. todo:: actually document this
'''
if soupTag.para:
children = soupTag.findChildren(recursive=False)
for child in children:
walk(textRoot, child, 0, None, "\n")
contents = soupTag.get_text()
if not heading:
return contents
start = textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=heading,
heading_mark=utils.heading_mark(
heading,
configs.SUB_SECTION_HEADING_CHAR
)
))
return "{0}{1}".format(start, contents)
else:
return "" | def convertDescriptionToRST(textRoot, node, soupTag, heading) | Parses the ``node`` XML document and returns a reStructuredText formatted
string. Helper method for :func:`~exhale.parse.getBriefAndDetailedRST`.
.. todo:: actually document this | 6.559717 | 3.771461 | 1.739304 |
'''
Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this
'''
node_xml_contents = utils.nodeCompoundXMLContents(node)
if not node_xml_contents:
return "", ""
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse [{0}] xml using BeautifulSoup".format(node.name))
try:
# In the file xml definitions, things such as enums or defines are listed inside
# of <sectiondef> tags, which may have some nested <briefdescription> or
# <detaileddescription> tags. So as long as we make sure not to search
# recursively, then the following will extract the file descriptions only
# process the brief description if provided
brief = node_soup.doxygen.compounddef.find_all("briefdescription", recursive=False)
brief_desc = ""
if len(brief) == 1:
brief = brief[0]
# Empty descriptions will usually get parsed as a single newline, which we
# want to ignore ;)
if not brief.get_text().isspace():
brief_desc = convertDescriptionToRST(textRoot, node, brief, None)
# process the detailed description if provided
detailed = node_soup.doxygen.compounddef.find_all("detaileddescription", recursive=False)
detailed_desc = ""
if len(detailed) == 1:
detailed = detailed[0]
if not detailed.get_text().isspace():
detailed_desc = convertDescriptionToRST(textRoot, node, detailed, "Detailed Description")
return brief_desc, detailed_desc
except:
utils.fancyError(
"Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file."
) | def getBriefAndDetailedRST(textRoot, node) | Given an input ``node``, return a tuple of strings where the first element of
the return is the ``brief`` description and the second is the ``detailed``
description.
.. todo:: actually document this | 5.296006 | 4.450863 | 1.189883 |
try:
path = self.endpoints[endpoint]
except KeyError:
msg = 'Unknown endpoint `{0}`'
raise ValueError(msg.format(endpoint))
absolute_url = urljoin(self.target, path)
return absolute_url | def _build_url(self, endpoint) | Builds the absolute URL using the target and desired endpoint. | 3.595913 | 2.973742 | 1.209222 |
url = self._build_url(constants.ADD_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
files = {
'egg': egg
}
json = self.client.post(url, data=data, files=files,
timeout=self.timeout)
return json['spiders'] | def add_version(self, project, version, egg) | Adds a new project egg to the Scrapyd service. First class, maps to
Scrapyd's add version endpoint. | 3.658107 | 3.162836 | 1.156591 |
url = self._build_url(constants.CANCEL_ENDPOINT)
data = {
'project': project,
'job': job,
}
if signal is not None:
data['signal'] = signal
json = self.client.post(url, data=data, timeout=self.timeout)
return json['prevstate'] | def cancel(self, project, job, signal=None) | Cancels a job from a specific project. First class, maps to
Scrapyd's cancel job endpoint. | 3.206613 | 3.100116 | 1.034352 |
url = self._build_url(constants.DELETE_PROJECT_ENDPOINT)
data = {
'project': project,
}
self.client.post(url, data=data, timeout=self.timeout)
return True | def delete_project(self, project) | Deletes all versions of a project. First class, maps to Scrapyd's
delete project endpoint. | 3.841215 | 3.2331 | 1.18809 |
url = self._build_url(constants.DELETE_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
self.client.post(url, data=data, timeout=self.timeout)
return True | def delete_version(self, project, version) | Deletes a specific version of a project. First class, maps to
Scrapyd's delete version endpoint. | 3.178209 | 2.889252 | 1.100011 |
all_jobs = self.list_jobs(project)
for state in constants.JOB_STATES:
job_ids = [job['id'] for job in all_jobs[state]]
if job_id in job_ids:
return state
return '' | def job_status(self, project, job_id) | Retrieves the 'status' of a specific job specified by its id. Derived,
utilises Scrapyd's list jobs endpoint to provide the answer. | 2.969967 | 3.044077 | 0.975654 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.