patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -575,7 +575,7 @@ class decimate(ElementOperation):
The x_range as a tuple of min and max y-value. Auto-ranges
if set to None.""")
- def _process(self, element, key=None):
+ def _apply(self, element, key=None):
if not isinstance(element, Dataset):
raise ValueError("Cannot downsample non-Dataset types.")
if element.interface not in column_interfaces: | 1 | """
Collection of either extremely generic or simple ElementOperation
examples.
"""
import numpy as np
import param
from param import _is_number
from ..core import (ElementOperation, NdOverlay, Overlay, GridMatrix,
HoloMap, Dataset, Element, Collator)
from ..core.data import ArrayInterface, DictInterface
from ..core.util import find_minmax, group_sanitizer, label_sanitizer, pd
from ..element.chart import Histogram, Scatter
from ..element.raster import Raster, Image, RGB, QuadMesh
from ..element.path import Contours, Polygons
from ..element.util import categorical_aggregate2d
from ..streams import RangeXY
column_interfaces = [ArrayInterface, DictInterface]
if pd:
from ..core.data import PandasInterface
column_interfaces.append(PandasInterface)
def identity(x,k): return x
class operation(ElementOperation):
"""
The most generic operation that wraps any callable into an
ElementOperation. The callable needs to accept an HoloViews
component and a key (that may be ignored) and must return a new
HoloViews component.
This class may be useful for turning a HoloViews method into an
operation to define as compositor operation. For instance, the
following definition:
operation.instance(op=lambda x, k: x.collapse(np.subtract))
Could be used to implement a collapse operation to subtracts the
data between Rasters in an Overlay.
"""
output_type = param.Parameter(None, doc="""
The output element type which may be None to disable type
checking.
May be used to declare useful information to other code in
HoloViews e.g required for tab-completion support of operations
registered with compositors.""")
group = param.String(default='Operation', doc="""
The group assigned to the result after having applied the
operator.""")
op = param.Callable(default=identity, doc="""
The operation used to generate a new HoloViews object returned
by the operation. By default, the identity operation is
applied.""")
def _process(self, view, key=None):
retval = self.p.op(view, key)
if (self.p.output_type is not None):
assert isinstance(retval, self.p.output_type), \
"Return value does not match the declared output type."
return retval.relabel(group=self.p.group)
class factory(ElementOperation):
"""
Simple operation that constructs any element that accepts some
other element as input. For instance, RGB and HSV elements can be
created from overlays of Image elements.
"""
output_type = param.Parameter(RGB, doc="""
The output type of the factor operation.
By default, if three overlaid Images elements are supplied,
the corresponding RGB element will be returned. """)
def _process(self, view, key=None):
return self.p.output_type(view)
class chain(ElementOperation):
"""
Defining an ElementOperation chain is an easy way to define a new
ElementOperation from a series of existing ones. The argument is a
list of ElementOperation (or ElementOperation instances) that are
called in sequence to generate the returned element.
chain(operations=[gradient, threshold.instance(level=2)])
This operation can accept an Image instance and would first
compute the gradient before thresholding the result at a level of
2.0.
Instances are only required when arguments need to be passed to
individual operations so the resulting object is a function over a
single argument.
"""
output_type = param.Parameter(Image, doc="""
The output type of the chain operation. Must be supplied if
the chain is to be used as a channel operation.""")
group = param.String(default='Chain', doc="""
The group assigned to the result after having applied the chain.""")
operations = param.List(default=[], class_=ElementOperation, doc="""
A list of ElementOperations (or ElementOperation instances)
that are applied on the input from left to right..""")
def _process(self, view, key=None):
processed = view
for operation in self.p.operations:
processed = operation.process_element(processed, key,
input_ranges=self.p.input_ranges)
return processed.clone(group=self.p.group)
class transform(ElementOperation):
"""
Generic ElementOperation to transform an input Image or RGBA
element into an output Image. The transformation is defined by
the supplied callable that accepts the data of the input Image
(typically a numpy array) and returns the transformed data of the
output Image.
This operator is extremely versatile; for instance, you could
implement an alternative to the explict threshold operator with:
operator=lambda x: np.clip(x, 0, 0.5)
Alternatively, you can implement a transform computing the 2D
autocorrelation using the scipy library with:
operator=lambda x: scipy.signal.correlate2d(x, x)
"""
output_type = Image
group = param.String(default='Transform', doc="""
The group assigned to the result after applying the
transform.""")
operator = param.Callable(doc="""
Function of one argument that transforms the data in the input
Image to the data in the output Image. By default, acts as
the identity function such that the output matches the input.""")
def _process(self, matrix, key=None):
processed = (matrix.data if not self.p.operator
else self.p.operator(matrix.data))
return Image(processed, matrix.bounds, group=self.p.group)
class image_overlay(ElementOperation):
"""
Operation to build a overlay of images to a specification from a
subset of the required elements.
This is useful for reordering the elements of an overlay,
duplicating layers of an overlay or creating blank image elements
in the appropriate positions.
For instance, image_overlay may build a three layered input
suitable for the RGB factory operation even if supplied with one
or two of the required channels (creating blank channels for the
missing elements).
Note that if there is any ambiguity regarding the match, the
strongest match will be used. In the case of a tie in match
strength, the first layer in the input is used. One successful
match is always required.
"""
output_type = Overlay
spec = param.String(doc="""
Specification of the output Overlay structure. For instance:
Image.R * Image.G * Image.B
Will ensure an overlay of this structure is created even if
(for instance) only (Image.R * Image.B) is supplied.
Elements in the input overlay that match are placed in the
appropriate positions and unavailable specification elements
are created with the specified fill group.""")
fill = param.Number(default=0)
default_range = param.Tuple(default=(0,1), doc="""
The default range that will be set on the value_dimension of
any automatically created blank image elements.""")
group = param.String(default='Transform', doc="""
The group assigned to the resulting overlay.""")
@classmethod
def _match(cls, el, spec):
"Return the strength of the match (None if no match)"
spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))
if not isinstance(el, Image) or spec_dict['type'] != 'Image':
raise NotImplementedError("Only Image currently supported")
sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}
strength = 1
for key in ['group', 'label']:
attr_value = sanitizers[key](getattr(el, key))
if key in spec_dict:
if spec_dict[key] != attr_value: return None
strength += 1
return strength
def _match_overlay(self, raster, overlay_spec):
"""
Given a raster or input overlay, generate a list of matched
elements (None if no match) and corresponding tuple of match
strength values.
"""
ordering = [None]*len(overlay_spec) # Elements to overlay
strengths = [0]*len(overlay_spec) # Match strengths
elements = raster.values() if isinstance(raster, Overlay) else [raster]
for el in elements:
for pos in range(len(overlay_spec)):
strength = self._match(el, overlay_spec[pos])
if strength is None: continue # No match
elif (strength <= strengths[pos]): continue # Weaker match
else: # Stronger match
ordering[pos] = el
strengths[pos] = strength
return ordering, strengths
def _process(self, raster, key=None):
specs = tuple(el.strip() for el in self.p.spec.split('*'))
ordering, strengths = self._match_overlay(raster, specs)
if all(el is None for el in ordering):
raise Exception("The image_overlay operation requires at least one match")
completed = []
strongest = ordering[np.argmax(strengths)]
for el, spec in zip(ordering, specs):
if el is None:
spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))
el = Image(np.ones(strongest.data.shape) * self.p.fill,
group=spec_dict.get('group','Image'),
label=spec_dict.get('label',''))
el.vdims[0].range = self.p.default_range
completed.append(el)
return np.prod(completed)
class threshold(ElementOperation):
"""
Threshold a given Image whereby all values higher than a given
level map to the specified high value and all values lower than
that level map to the specified low value.
"""
output_type = Image
level = param.Number(default=0.5, doc="""
The value at which the threshold is applied. Values lower than
the threshold map to the 'low' value and values above map to
the 'high' value.""")
high = param.Number(default=1.0, doc="""
The value given to elements greater than (or equal to) the
threshold.""")
low = param.Number(default=0.0, doc="""
The value given to elements below the threshold.""")
group = param.String(default='Threshold', doc="""
The group assigned to the thresholded output.""")
def _process(self, matrix, key=None):
if not isinstance(matrix, Image):
raise TypeError("The threshold operation requires a Image as input.")
arr = matrix.data
high = np.ones(arr.shape) * self.p.high
low = np.ones(arr.shape) * self.p.low
thresholded = np.where(arr > self.p.level, high, low)
return matrix.clone(thresholded, group=self.p.group)
class gradient(ElementOperation):
"""
Compute the gradient plot of the supplied Image.
If the Image value dimension is cyclic, the smallest step is taken
considered the cyclic range
"""
output_type = Image
group = param.String(default='Gradient', doc="""
The group assigned to the output gradient matrix.""")
def _process(self, matrix, key=None):
if len(matrix.vdims) != 1:
raise ValueError("Input matrix to gradient operation must "
"have single value dimension.")
matrix_dim = matrix.vdims[0]
data = matrix.data
r, c = data.shape
if matrix_dim.cyclic and (None in matrix_dim.range):
raise Exception("Cyclic range must be specified to compute "
"the gradient of cyclic quantities")
cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)
if cyclic_range is not None:
# shift values such that wrapping works ok
data = data - matrix_dim.range[0]
dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]
dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]
if cyclic_range is not None: # Wrap into the specified range
# Convert negative differences to an equivalent positive value
dx = dx % cyclic_range
dy = dy % cyclic_range
#
# Prefer small jumps
dx_negatives = dx - cyclic_range
dy_negatives = dy - cyclic_range
dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)
dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)
return Image(np.sqrt(dx * dx + dy * dy), matrix.bounds, group=self.p.group)
class convolve(ElementOperation):
"""
Apply a convolution to an overlay using the top layer as the
kernel for convolving the bottom layer. Both Image elements in
the input overlay should have a single value dimension.
"""
output_type = Image
group = param.String(default='Convolution', doc="""
The group assigned to the convolved output.""")
kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc="""
A 2-dimensional slice of the kernel layer to use in the
convolution in lbrt (left, bottom, right, top) format. By
default, no slicing is applied.""")
def _process(self, overlay, key=None):
if len(overlay) != 2:
raise Exception("Overlay must contain at least to items.")
[target, kernel] = overlay.get(0), overlay.get(1)
if len(target.vdims) != 1:
raise Exception("Convolution requires inputs with single value dimensions.")
xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])
yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])
k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data
fft1 = np.fft.fft2(target.data)
fft2 = np.fft.fft2(k, s= target.data.shape)
convolved_raw = np.fft.ifft2(fft1 * fft2).real
k_rows, k_cols = k.shape
rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)
convolved = rolled / float(k.sum())
return Image(convolved, bounds=target.bounds, group=self.p.group)
class contours(ElementOperation):
"""
Given a Image with a single channel, annotate it with contour
lines for a given set of contour levels.
The return is an NdOverlay with a Contours layer for each given
level, overlaid on top of the input Image.
"""
output_type = Overlay
levels = param.NumericTuple(default=(0.5,), doc="""
A list of scalar values used to specify the contour levels.""")
group = param.String(default='Level', doc="""
The group assigned to the output contours.""")
filled = param.Boolean(default=False, doc="""
Whether to generate filled contours""")
overlaid = param.Boolean(default=True, doc="""
Whether to overlay the contour on the supplied Element.""")
def _process(self, element, key=None):
try:
from matplotlib import pyplot as plt
except ImportError:
raise ImportError("contours operation requires matplotlib.")
figure_handle = plt.figure()
extent = element.range(0) + element.range(1)[::-1]
if self.p.filled:
contour_fn = plt.contourf
contour_type = Polygons
else:
contour_fn = plt.contour
contour_type = Contours
if type(element) is Raster:
data = [np.flipud(element.data)]
elif isinstance(element, Raster):
data = [element.data]
elif isinstance(element, QuadMesh):
data = (element.dimension_values(0, False),
element.dimension_values(1, False),
element.data[2])
contour_set = contour_fn(*data, extent=extent,
levels=self.p.levels)
contours = NdOverlay(None, kdims=['Levels'])
for level, cset in zip(self.p.levels, contour_set.collections):
paths = []
for path in cset.get_paths():
paths.extend(np.split(path.vertices, np.where(path.codes==1)[0][1:]))
contours[level] = contour_type(paths, level=level, group=self.p.group,
label=element.label, kdims=element.kdims,
vdims=element.vdims)
plt.close(figure_handle)
if self.p.overlaid:
contours = element * contours
return contours
class histogram(ElementOperation):
"""
Returns a Histogram of the input element data, binned into
num_bins over the bin_range (if specified) along the specified
dimension.
"""
bin_range = param.NumericTuple(default=None, length=2, doc="""
Specifies the range within which to compute the bins.""")
dimension = param.String(default=None, doc="""
Along which dimension of the Element to compute the histogram.""")
individually = param.Boolean(default=True, doc="""
Specifies whether the histogram will be rescaled for each Element in a UniformNdMapping.""")
log = param.Boolean(default=False, doc="""
Whether to use base 10 logarithmic samples for the bin edges.""")
mean_weighted = param.Boolean(default=False, doc="""
Whether the weighted frequencies are averaged.""")
normed = param.Boolean(default=True, doc="""
Whether the histogram frequencies are normalized.""")
nonzero = param.Boolean(default=False, doc="""
Whether to use only nonzero values when computing the histogram""")
num_bins = param.Integer(default=20, doc="""
Number of bins in the histogram .""")
weight_dimension = param.String(default=None, doc="""
Name of the dimension the weighting should be drawn from""")
style_prefix = param.String(default=None, allow_None=None, doc="""
Used for setting a common style for histograms in a HoloMap or AdjointLayout.""")
def _process(self, view, key=None):
if self.p.dimension:
selected_dim = self.p.dimension
else:
selected_dim = [d.name for d in view.vdims + view.kdims][0]
data = np.array(view.dimension_values(selected_dim))
if self.p.nonzero:
mask = data > 0
data = data[mask]
if self.p.weight_dimension:
weights = np.array(view.dimension_values(self.p.weight_dimension))
if self.p.nonzero:
weights = weights[mask]
else:
weights = None
hist_range = find_minmax((np.nanmin(data), np.nanmax(data)), (0, -float('inf')))\
if self.p.bin_range is None else self.p.bin_range
# Avoids range issues including zero bin range and empty bins
if hist_range == (0, 0):
hist_range = (0, 1)
data = data[np.invert(np.isnan(data))]
if self.p.log:
bin_min = max([abs(hist_range[0]), data[data>0].min()])
edges = np.logspace(np.log10(bin_min), np.log10(hist_range[1]),
self.p.num_bins+1)
else:
edges = np.linspace(hist_range[0], hist_range[1], self.p.num_bins + 1)
normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed
try:
hist, edges = np.histogram(data[np.isfinite(data)], normed=normed,
range=hist_range, weights=weights, bins=edges)
if not normed and self.p.weight_dimension and self.p.mean_weighted:
hist_mean, _ = np.histogram(data[np.isfinite(data)], normed=normed,
range=hist_range, bins=self.p.num_bins)
hist /= hist_mean
except:
hist = np.zeros(self.p.num_bins)
hist[np.isnan(hist)] = 0
params = {}
if self.p.weight_dimension:
params['vdims'] = [view.get_dimension(self.p.weight_dimension)]
if view.group != view.__class__.__name__:
params['group'] = view.group
return Histogram(hist, edges, kdims=[view.get_dimension(selected_dim)],
label=view.label, **params)
class decimate(ElementOperation):
"""
Decimates any column based Element to a specified number of random
rows if the current view defined by the x_range and y_range
contains more than max_samples. By default the operation returns a
DynamicMap with a RangeXY stream allowing dynamic downsampling.
"""
dynamic = param.Boolean(default=True, doc="""
Enables dynamic processing by default.""")
max_samples = param.Integer(default=5000, doc="""
Maximum number of samples to display at the same time.""")
random_seed = param.Integer(default=42, doc="""
Seed used to initialize randomization.""")
streams = param.List(default=[RangeXY], doc="""
List of streams that are applied if dynamic=True, allowing
for dynamic interaction with the plot.""")
x_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max x-value. Auto-ranges
if set to None.""")
y_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max y-value. Auto-ranges
if set to None.""")
def _process(self, element, key=None):
if not isinstance(element, Dataset):
raise ValueError("Cannot downsample non-Dataset types.")
if element.interface not in column_interfaces:
element = plot.current_frame.clone(datatype=['dataframe', 'dictionary'])
xstart, xend = self.p.x_range if self.p.x_range else element.range(0)
ystart, yend = self.p.y_range if self.p.y_range else element.range(1)
# Slice element to current ranges
xdim, ydim = element.dimensions(label=True)[0:2]
sliced = element.select(**{xdim: (xstart, xend),
ydim: (ystart, yend)})
if len(sliced) > self.p.max_samples:
prng = np.random.RandomState(self.p.random_seed)
length = len(sliced)
if element.interface is PandasInterface:
data = sliced.data.sample(self.p.max_samples,
random_state=prng)
else:
inds = prng.choice(length, self.p.max_samples, False)
if isinstance(element.interface, DictInterface):
data = {k: v[inds] for k, v in sliced.data.items()}
else:
data = sliced.data[inds, :]
sliced = element.clone(data)
return sliced
class interpolate_curve(ElementOperation):
"""
Resamples a Curve using the defined interpolation method, e.g.
to represent changes in y-values as steps.
"""
interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',
'steps-post', 'linear'],
default='steps-mid', doc="""
Controls the transition point of the step along the x-axis.""")
@classmethod
def pts_to_prestep(cls, x, y):
steps = np.zeros((2, 2 * len(x) - 1))
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 0:-2:2]
steps[1:, 0::2] = y
steps[1:, 1::2] = steps[1:, 2::2]
return steps
@classmethod
def pts_to_midstep(cls, x, y):
steps = np.zeros((2, 2 * len(x)))
x = np.asanyarray(x)
steps[0, 1:-1:2] = steps[0, 2::2] = (x[:-1] + x[1:]) / 2
steps[0, 0], steps[0, -1] = x[0], x[-1]
steps[1:, 0::2] = y
steps[1:, 1::2] = steps[1:, 0::2]
return steps
@classmethod
def pts_to_poststep(cls, x, y):
steps = np.zeros((2, 2 * len(x) - 1))
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 2::2]
steps[1:, 0::2] = y
steps[1:, 1::2] = steps[1:, 0:-2:2]
return steps
def _process(self, element, key=None):
INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,
'steps-mid': self.pts_to_midstep,
'steps-post': self.pts_to_poststep}
if self.p.interpolation not in INTERPOLATE_FUNCS:
return element
x, y = element.dimension_values(0), element.dimension_values(1)
array = INTERPOLATE_FUNCS[self.p.interpolation](x, y)
dvals = tuple(element.dimension_values(d) for d in element.dimensions()[2:])
return element.clone((array[0, :], array[1, :])+dvals)
#==================#
# Other operations #
#==================#
class collapse(ElementOperation):
"""
Given an overlay of Element types, collapse into single Element
object using supplied function. Collapsing aggregates over the
key dimensions of each object applying the supplied fn to each group.
This is an example of an ElementOperation that does not involve
any Raster types.
"""
fn = param.Callable(default=np.mean, doc="""
The function that is used to collapse the curve y-values for
each x-value.""")
def _process(self, overlay, key=None):
if isinstance(overlay, NdOverlay):
collapse_map = HoloMap(overlay)
else:
collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})
return collapse_map.collapse(function=self.p.fn)
class gridmatrix(param.ParameterizedFunction):
"""
The gridmatrix operation takes an Element or HoloMap
of Elements as input and creates a GridMatrix object,
which plots each dimension in the Element against
each other dimension. This provides a very useful
overview of high-dimensional data and is inspired
by pandas and seaborn scatter_matrix implementations.
"""
chart_type = param.Parameter(default=Scatter, doc="""
The Element type used to display bivariate distributions
of the data.""")
diagonal_type = param.Parameter(default=None, doc="""
The Element type along the diagonal, may be a Histogram or any
other plot type which can visualize a univariate distribution.
This parameter overrides diagonal_operation.""")
diagonal_operation = param.Parameter(default=histogram, doc="""
The operation applied along the diagonal, may be a histogram-operation
or any other function which returns a viewable element.""")
overlay_dims = param.List(default=[], doc="""
If a HoloMap is supplied this will allow overlaying one or
more of it's key dimensions.""")
def __call__(self, data, **params):
p = param.ParamOverrides(self, params)
if isinstance(data, (HoloMap, NdOverlay)):
ranges = {d.name: data.range(d) for d in data.dimensions()}
data = data.clone({k: GridMatrix(self._process(p, v, ranges))
for k, v in data.items()})
data = Collator(data, merge_type=type(data))()
if p.overlay_dims:
data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))
return data
elif isinstance(data, Element):
data = self._process(p, data)
return GridMatrix(data)
def _process(self, p, element, ranges={}):
# Creates a unified Dataset.data attribute
# to draw the data from
if isinstance(element.data, np.ndarray):
if 'dataframe' in Dataset.datatype:
el_data = element.table('dataframe')
else:
el_data = element.table('dictionary')
else:
el_data = element.data
# Get dimensions to plot against each other
dims = [d for d in element.dimensions()
if _is_number(element.range(d)[0])]
permuted_dims = [(d1, d2) for d1 in dims
for d2 in dims[::-1]]
# Convert Histogram type to operation to avoid one case in the if below.
if p.diagonal_type is Histogram:
p.diagonal_type = None
p.diagonal_operation = histogram
data = {}
for d1, d2 in permuted_dims:
if d1 == d2:
if p.diagonal_type is not None:
values = element.dimension_values(d1)
el = p.diagonal_type(values, vdims=[d1])
elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):
bin_range = ranges.get(d1.name, element.range(d1))
opts = dict(axiswise=True, framewise=True)
el = p.diagonal_operation(element,
dimension=d1.name,
bin_range=bin_range)(norm=opts)
else:
el = p.diagonal_operation(element, dimension=d1.name)
else:
el = p.chart_type(el_data, kdims=[d1],
vdims=[d2], datatype=['dataframe', 'dictionary'])
data[(d1.name, d2.name)] = el
return data
| 1 | 16,675 | Not sure I like the name ``_apply``. Even though ``_process`` is supposed to process elements already, how about ``_process_element`` which processes elements, *excluding* Overlays/NdOverlays. | holoviz-holoviews | py |
@@ -131,6 +131,9 @@ const (
// MachineNameTagKey is the key for machine name.
MachineNameTagKey = "MachineName"
+
+ // NodeRoleTagValue describes the value for the node role.
+ NodeRoleTagValue = "node"
)
// ClusterTagKey generates the key for resources associated with a cluster. | 1 | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// Tags defines a map of tags.
type Tags map[string]string
// Equals returns true if the tags are equal.
func (t Tags) Equals(other Tags) bool {
return reflect.DeepEqual(t, other)
}
// HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of this management tooling.
func (t Tags) HasOwned(cluster string) bool {
value, ok := t[ClusterTagKey(cluster)]
return ok && ResourceLifecycle(value) == ResourceLifecycleOwned
}
// HasAWSCloudProviderOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of the in-tree cloud provider.
func (t Tags) HasAWSCloudProviderOwned(cluster string) bool {
value, ok := t[ClusterAWSCloudProviderTagKey(cluster)]
return ok && ResourceLifecycle(value) == ResourceLifecycleOwned
}
// GetRole returns the Cluster API role for the tagged resource.
func (t Tags) GetRole() string {
return t[NameAWSClusterAPIRole]
}
// Difference returns the difference between this map of tags and the other map of tags.
// Items are considered equals if key and value are equals.
func (t Tags) Difference(other Tags) Tags {
res := make(Tags, len(t))
for key, value := range t {
if otherValue, ok := other[key]; ok && value == otherValue {
continue
}
res[key] = value
}
return res
}
// Merge merges in tags from other. If a tag already exists, it is replaced by the tag in other.
func (t Tags) Merge(other Tags) {
for k, v := range other {
t[k] = v
}
}
// ResourceLifecycle configures the lifecycle of a resource.
type ResourceLifecycle string
const (
// ResourceLifecycleOwned is the value we use when tagging resources to indicate
// that the resource is considered owned and managed by the cluster,
// and in particular that the lifecycle is tied to the lifecycle of the cluster.
ResourceLifecycleOwned = ResourceLifecycle("owned")
// ResourceLifecycleShared is the value we use when tagging resources to indicate
// that the resource is shared between multiple clusters, and should not be destroyed
// if the cluster is destroyed.
ResourceLifecycleShared = ResourceLifecycle("shared")
// NameKubernetesAWSCloudProviderPrefix is the tag name used by the cloud provider to logically
// separate independent cluster resources. We use it to identify which resources we expect
// to be permissive about state changes.
// logically independent clusters running in the same AZ.
// The tag key = NameKubernetesAWSCloudProviderPrefix + clusterID
// The tag value is an ownership value.
NameKubernetesAWSCloudProviderPrefix = "kubernetes.io/cluster/"
// NameAWSProviderPrefix is the tag prefix we use to differentiate
// cluster-api-provider-aws owned components from other tooling that
// uses NameKubernetesClusterPrefix.
NameAWSProviderPrefix = "sigs.k8s.io/cluster-api-provider-aws/"
// NameAWSProviderOwned is the tag name we use to differentiate
// cluster-api-provider-aws owned components from other tooling that
// uses NameKubernetesClusterPrefix.
NameAWSProviderOwned = NameAWSProviderPrefix + "cluster/"
// NameAWSClusterAPIRole is the tag name we use to mark roles for resources
// dedicated to this cluster api provider implementation.
NameAWSClusterAPIRole = NameAWSProviderPrefix + "role"
// NameAWSSubnetAssociation is the tag name we use to mark association for resources
// dedicated to this cluster api provider implementation.
NameAWSSubnetAssociation = NameAWSProviderPrefix + "association"
// SecondarySubnetTagValue is the secondary subnet tag constant value.
SecondarySubnetTagValue = "secondary"
// APIServerRoleTagValue describes the value for the apiserver role.
APIServerRoleTagValue = "apiserver"
// BastionRoleTagValue describes the value for the bastion role.
BastionRoleTagValue = "bastion"
// CommonRoleTagValue describes the value for the common role.
CommonRoleTagValue = "common"
// PublicRoleTagValue describes the value for the public role.
PublicRoleTagValue = "public"
// PrivateRoleTagValue describes the value for the private role.
PrivateRoleTagValue = "private"
// MachineNameTagKey is the key for machine name.
MachineNameTagKey = "MachineName"
)
// ClusterTagKey generates the key for resources associated with a cluster.
func ClusterTagKey(name string) string {
return fmt.Sprintf("%s%s", NameAWSProviderOwned, name)
}
// ClusterAWSCloudProviderTagKey generates the key for resources associated a cluster's AWS cloud provider.
func ClusterAWSCloudProviderTagKey(name string) string {
return fmt.Sprintf("%s%s", NameKubernetesAWSCloudProviderPrefix, name)
}
// BuildParams is used to build tags around an aws resource.
type BuildParams struct {
// Lifecycle determines the resource lifecycle.
Lifecycle ResourceLifecycle
// ClusterName is the cluster associated with the resource.
ClusterName string
// ResourceID is the unique identifier of the resource to be tagged.
ResourceID string
// Name is the name of the resource, it's applied as the tag "Name" on AWS.
// +optional
Name *string
// Role is the role associated to the resource.
// +optional
Role *string
// Any additional tags to be added to the resource.
// +optional
Additional Tags
}
// WithMachineName tags the namespaced machine name
// The machine name will be tagged with key "MachineName".
func (b BuildParams) WithMachineName(m *clusterv1.Machine) BuildParams {
machineNamespacedName := types.NamespacedName{Namespace: m.Namespace, Name: m.Name}
b.Additional[MachineNameTagKey] = machineNamespacedName.String()
return b
}
// WithCloudProvider tags the cluster ownership for a resource.
func (b BuildParams) WithCloudProvider(name string) BuildParams {
b.Additional[ClusterAWSCloudProviderTagKey(name)] = string(ResourceLifecycleOwned)
return b
}
// Build builds tags including the cluster tag and returns them in map form.
func Build(params BuildParams) Tags {
tags := make(Tags)
for k, v := range params.Additional {
tags[k] = v
}
if params.ClusterName != "" {
tags[ClusterTagKey(params.ClusterName)] = string(params.Lifecycle)
}
if params.Role != nil {
tags[NameAWSClusterAPIRole] = *params.Role
}
if params.Name != nil {
tags["Name"] = *params.Name
}
return tags
}
| 1 | 21,456 | Let's use this constant when ASG is created as well (currently hardcoded). | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -152,6 +152,10 @@ CREATE_PACKAGE_HASHES = textwrap.dedent(f"""\
FROM named_packages
""")
+# All GROUP BY statements are supposed to be:
+# - in the order from most unique values to least unique
+# - integers rather than strings
+
OBJECT_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname, | 1 | """
Lambda function that runs Athena queries over CloudTrail logs and .quilt/named_packages/
and creates summaries of object and package access events.
"""
from datetime import datetime, timedelta, timezone
import os
import textwrap
import time
import boto3
ATHENA_DATABASE = os.environ['ATHENA_DATABASE']
# Bucket where CloudTrail logs are located.
CLOUDTRAIL_BUCKET = os.environ['CLOUDTRAIL_BUCKET']
# Bucket where query results will be stored.
QUERY_RESULT_BUCKET = os.environ['QUERY_RESULT_BUCKET']
# Directory where the summary files will be stored.
ACCESS_COUNTS_OUTPUT_DIR = os.environ['ACCESS_COUNTS_OUTPUT_DIR']
# A temporary directory where Athena query results will be written.
QUERY_TEMP_DIR = 'AthenaQueryResults'
# Pre-processed CloudTrail logs, persistent across different runs of the lambda.
OBJECT_ACCESS_LOG_DIR = 'ObjectAccessLog'
# Timestamp for the dir above.
LAST_UPDATE_KEY = f'{OBJECT_ACCESS_LOG_DIR}.last_updated_ts.txt'
# Athena does not allow us to write more than 100 partitions at once.
MAX_OPEN_PARTITIONS = 100
def sql_escape(s):
return s.replace("'", "''")
DROP_CLOUDTRAIL = """DROP TABLE IF EXISTS cloudtrail"""
DROP_OBJECT_ACCESS_LOG = """DROP TABLE IF EXISTS object_access_log"""
DROP_PACKAGE_HASHES = """DROP TABLE IF EXISTS package_hashes"""
CREATE_CLOUDTRAIL = textwrap.dedent(f"""\
CREATE EXTERNAL TABLE cloudtrail (
eventVersion STRING,
userIdentity STRUCT<
type: STRING,
principalId: STRING,
arn: STRING,
accountId: STRING,
invokedBy: STRING,
accessKeyId: STRING,
userName: STRING,
sessionContext: STRUCT<
attributes: STRUCT<
mfaAuthenticated: STRING,
creationDate: STRING>,
sessionIssuer: STRUCT<
type: STRING,
principalId: STRING,
arn: STRING,
accountId: STRING,
userName: STRING>>>,
eventTime STRING,
eventSource STRING,
eventName STRING,
awsRegion STRING,
sourceIpAddress STRING,
userAgent STRING,
errorCode STRING,
errorMessage STRING,
requestParameters STRING,
responseElements STRING,
additionalEventData STRING,
requestId STRING,
eventId STRING,
resources ARRAY<STRUCT<
arn: STRING,
accountId: STRING,
type: STRING>>,
eventType STRING,
apiVersion STRING,
readOnly STRING,
recipientAccountId STRING,
serviceEventDetails STRING,
sharedEventID STRING,
vpcEndpointId STRING
)
PARTITIONED BY (account STRING, region STRING, year STRING, month STRING, day STRING)
ROW FORMAT SERDE 'com.amazon.emr.hive.serde.CloudTrailSerde'
STORED AS INPUTFORMAT 'com.amazon.emr.cloudtrail.CloudTrailInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION 's3://{sql_escape(CLOUDTRAIL_BUCKET)}/AWSLogs/'
TBLPROPERTIES ('classification'='cloudtrail')
""")
ADD_CLOUDTRAIL_PARTITION = textwrap.dedent(f"""\
ALTER TABLE cloudtrail
ADD PARTITION (account = '{{account}}', region = '{{region}}', year = '{{year:04d}}', month = '{{month:02d}}', day = '{{day:02d}}')
LOCATION 's3://{sql_escape(CLOUDTRAIL_BUCKET)}/AWSLogs/{{account}}/CloudTrail/{{region}}/{{year:04d}}/{{month:02d}}/{{day:02d}}/'
""")
CREATE_OBJECT_ACCESS_LOG = textwrap.dedent(f"""\
CREATE EXTERNAL TABLE object_access_log (
eventname STRING,
bucket STRING,
key STRING
)
PARTITIONED BY (date STRING)
ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
LOCATION 's3://{sql_escape(QUERY_RESULT_BUCKET)}/{sql_escape(OBJECT_ACCESS_LOG_DIR)}/'
TBLPROPERTIES ('parquet.compression'='SNAPPY')
""")
REPAIR_OBJECT_ACCESS_LOG = textwrap.dedent("""
MSCK REPAIR TABLE object_access_log
""")
INSERT_INTO_OBJECT_ACCESS_LOG = textwrap.dedent(f"""\
INSERT INTO object_access_log
SELECT eventname, bucket, key, date_format(eventtime, '%Y-%m-%d') AS date
FROM (
SELECT
eventname,
from_iso8601_timestamp(eventtime) AS eventtime,
json_extract_scalar(requestparameters, '$.bucketName') AS bucket,
json_extract_scalar(requestparameters, '$.key') AS key
FROM cloudtrail
WHERE useragent != 'athena.amazonaws.com' AND useragent NOT LIKE '%quilt3-lambdas-es-indexer%'
)
-- Filter out non-S3 events, or S3 events like ListBucket that have no object
-- Select the correct time range
WHERE bucket IS NOT NULL AND key IS NOT NULL AND
eventtime >= from_unixtime({{start_ts:f}}) AND eventtime < from_unixtime({{end_ts:f}})
""")
CREATE_PACKAGE_HASHES = textwrap.dedent(f"""\
CREATE TABLE package_hashes
WITH (
format = 'Parquet',
parquet_compression = 'SNAPPY',
external_location = 's3://{sql_escape(QUERY_RESULT_BUCKET)}/{sql_escape(QUERY_TEMP_DIR)}/package_hashes/'
)
AS
SELECT DISTINCT
-- Parse a file path like `s3://BUCKET/.quilt/named_packages/USER_NAME/PACKAGE_NAME/VERSION`.
-- Only take package names and hashes, without versions, to avoid duplicates.
split_part("$path", '/', 3) AS bucket,
concat(split_part("$path", '/', 6), '/', split_part("$path", '/', 7)) AS name,
hash
FROM named_packages
""")
OBJECT_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
bucket,
key,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log
GROUP BY eventname, bucket, key
""")
PACKAGE_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
package_hashes.bucket AS bucket,
name,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log JOIN package_hashes
ON object_access_log.bucket = package_hashes.bucket AND key = concat('.quilt/packages/', hash)
GROUP BY eventname, package_hashes.bucket, name
""")
PACKAGE_VERSION_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
package_hashes.bucket AS bucket,
name,
hash,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log JOIN package_hashes
ON object_access_log.bucket = package_hashes.bucket AND key = concat('.quilt/packages/', hash)
GROUP BY eventname, package_hashes.bucket, name, hash
""")
BUCKET_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
bucket,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log
GROUP BY eventname, bucket
""")
EXTS_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
bucket,
ext,
CAST(histogram(date) AS JSON) AS counts
FROM (
SELECT
eventname,
bucket,
lower(CASE
WHEN cardinality(parts) > 2 THEN concat(element_at(parts, -2), '.', element_at(parts, -1))
WHEN cardinality(parts) = 2 THEN element_at(parts, -1)
ELSE ''
END
) AS ext,
date
FROM (
SELECT
eventname,
bucket,
split(substr(element_at(split(key, '/'), -1), 2), '.') AS parts,
date
FROM object_access_log
)
)
GROUP BY eventname, bucket, ext
""")
athena = boto3.client('athena')
s3 = boto3.client('s3')
def start_query(query_string):
output = 's3://%s/%s/' % (QUERY_RESULT_BUCKET, QUERY_TEMP_DIR)
response = athena.start_query_execution(
QueryString=query_string,
QueryExecutionContext=dict(Database=ATHENA_DATABASE),
ResultConfiguration=dict(OutputLocation=output)
)
print("Started query:", response)
execution_id = response['QueryExecutionId']
return execution_id
def query_finished(execution_id):
response = athena.get_query_execution(QueryExecutionId=execution_id)
print("Query status:", response)
state = response['QueryExecution']['Status']['State']
if state == 'RUNNING' or state == 'QUEUED':
return False
elif state == 'SUCCEEDED':
return True
elif state == 'FAILED':
raise Exception("Query failed! QueryExecutionId=%r" % execution_id)
elif state == 'CANCELLED':
raise Exception("Query cancelled! QueryExecutionId=%r" % execution_id)
else:
assert False, "Unexpected state: %s" % state
# Athena limitation for DDL queries.
MAX_CONCURRENT_QUERIES = 20
def run_multiple_queries(query_list):
results = [None] * len(query_list)
remaining_queries = list(enumerate(query_list))
remaining_queries.reverse() # Just to make unit tests more sane: we use pop() later, so keep the order the same.
pending_execution_ids = set()
while remaining_queries or pending_execution_ids:
# Remove completed queries. Make a copy of the set before iterating over it.
for execution_id in list(pending_execution_ids):
if query_finished(execution_id):
pending_execution_ids.remove(execution_id)
# Start new queries.
while remaining_queries and len(pending_execution_ids) < MAX_CONCURRENT_QUERIES:
idx, query = remaining_queries.pop()
execution_id = start_query(query)
results[idx] = execution_id
pending_execution_ids.add(execution_id)
time.sleep(5)
assert all(results)
return results
def delete_dir(bucket, prefix):
params = dict(
Bucket=bucket,
Prefix=prefix,
MaxKeys=1000, # The max we're allowed to delete at once.
)
paginator = s3.get_paginator('list_objects_v2')
for list_response in paginator.paginate(**params):
contents = list_response.get('Contents')
if not contents:
break
delete_response = s3.delete_objects(
Bucket=QUERY_RESULT_BUCKET,
Delete=dict(
Objects=[dict(
Key=obj['Key']
) for obj in contents]
)
)
errors = delete_response.get('Errors')
if errors:
print(errors)
raise Exception(f"Failed to delete dir: bucket={bucket!r}, prefix={prefix!r}")
def now():
"""Only exists for unit testing, cause patching datetime.utcnow() is pretty much impossible."""
return datetime.now(timezone.utc)
def handler(event, context):
# End of the CloudTrail time range we're going to look at. Subtract 15min because events can be delayed by that much.
end_ts = now() - timedelta(minutes=15)
# Start of the CloudTrail time range: the end timestamp from the previous run, or a year ago if it's the first run.
try:
timestamp_str = s3.get_object(Bucket=QUERY_RESULT_BUCKET, Key=LAST_UPDATE_KEY)['Body'].read()
start_ts = datetime.fromtimestamp(float(timestamp_str), timezone.utc)
except s3.exceptions.NoSuchKey as ex:
start_ts = end_ts - timedelta(days=365)
# We start from scratch, so make sure we don't have any old data.
delete_dir(QUERY_RESULT_BUCKET, OBJECT_ACCESS_LOG_DIR)
# We can't write more than 100 days worth of data at a time due to Athena's partitioning limitations.
# Moreover, we don't want the lambda to time out, so just process 100 days and let the next invocation handle the rest.
end_ts = min(end_ts, start_ts + timedelta(days=MAX_OPEN_PARTITIONS-1))
# Delete the temporary directory where Athena query results are written to.
delete_dir(QUERY_RESULT_BUCKET, QUERY_TEMP_DIR)
# Create a CloudTrail table, but only with partitions for the last N days, to avoid scanning all of the data.
# A bucket can have data for multiple accounts and multiple regions, so those need to be handled first.
partition_queries = []
for account_response in s3.list_objects_v2(Bucket=CLOUDTRAIL_BUCKET, Prefix='AWSLogs/', Delimiter='/').get('CommonPrefixes') or []:
account = account_response['Prefix'].split('/')[1]
for region_response in s3.list_objects_v2(Bucket=CLOUDTRAIL_BUCKET, Prefix=f'AWSLogs/{account}/CloudTrail/', Delimiter='/').get('CommonPrefixes') or []:
region = region_response['Prefix'].split('/')[3]
date = start_ts.date()
while date <= end_ts.date():
query = ADD_CLOUDTRAIL_PARTITION.format(
account=sql_escape(account),
region=sql_escape(region),
year=date.year,
month=date.month,
day=date.day
)
partition_queries.append(query)
date += timedelta(days=1)
# Drop old Athena tables from previous runs.
# (They're in the DB owned by the stack, so safe to do.)
run_multiple_queries([DROP_CLOUDTRAIL, DROP_OBJECT_ACCESS_LOG, DROP_PACKAGE_HASHES])
# Create new Athena tables.
run_multiple_queries([CREATE_CLOUDTRAIL, CREATE_OBJECT_ACCESS_LOG, CREATE_PACKAGE_HASHES])
# Load object access log partitions, after the object access log table is created.
# Create CloudTrail partitions, after the CloudTrail table is created.
run_multiple_queries([REPAIR_OBJECT_ACCESS_LOG] + partition_queries)
# Delete the old timestamp: if the INSERT query or put_object fail, make sure we regenerate everything next time,
# instead of ending up with duplicate logs.
s3.delete_object(Bucket=QUERY_RESULT_BUCKET, Key=LAST_UPDATE_KEY)
# Scan CloudTrail and insert new data into "object_access_log".
insert_query = INSERT_INTO_OBJECT_ACCESS_LOG.format(start_ts=start_ts.timestamp(), end_ts=end_ts.timestamp())
run_multiple_queries([insert_query])
# Save the end timestamp.
s3.put_object(Bucket=QUERY_RESULT_BUCKET, Key=LAST_UPDATE_KEY, Body=str(end_ts.timestamp()), ContentType='text/plain')
queries = [
('Objects', OBJECT_ACCESS_COUNTS),
('Packages', PACKAGE_ACCESS_COUNTS),
('PackageVersions', PACKAGE_VERSION_ACCESS_COUNTS),
('Bucket', BUCKET_ACCESS_COUNTS),
('Exts', EXTS_ACCESS_COUNTS)
]
execution_ids = run_multiple_queries([query for _, query in queries])
for (filename, _), execution_id in zip(queries, execution_ids):
src_key = f'{QUERY_TEMP_DIR}/{execution_id}.csv'
dest_key = f'{ACCESS_COUNTS_OUTPUT_DIR}/{filename}.csv'
s3.copy(
CopySource=dict(
Bucket=QUERY_RESULT_BUCKET,
Key=src_key
),
Bucket=QUERY_RESULT_BUCKET,
Key=dest_key
)
| 1 | 18,498 | Oh is `bucket` actually higher cardinality than `eventname`? | quiltdata-quilt | py |
@@ -33,6 +33,7 @@ import sip
from PyQt5.QtCore import QUrl
# so it's available for :debug-pyeval
from PyQt5.QtWidgets import QApplication # pylint: disable=unused-import
+from PyQt5.QtWebEngineWidgets import QWebEngineProfile # pylint: disable=unused-import
from qutebrowser.browser import qutescheme
from qutebrowser.utils import log, objreg, usertypes, message, debug, utils | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Misc. utility commands exposed to the user."""
import functools
import os
import signal
import traceback
try:
import hunter
except ImportError:
hunter = None
import sip
from PyQt5.QtCore import QUrl
# so it's available for :debug-pyeval
from PyQt5.QtWidgets import QApplication # pylint: disable=unused-import
from qutebrowser.browser import qutescheme
from qutebrowser.utils import log, objreg, usertypes, message, debug, utils
from qutebrowser.commands import cmdutils, runners, cmdexc
from qutebrowser.config import config, configdata
from qutebrowser.misc import consolewidget
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
def later(ms: int, command, win_id):
"""Execute a command after some time.
Args:
ms: How many milliseconds to wait.
command: The command to run, with optional args.
"""
if ms < 0:
raise cmdexc.CommandError("I can't run something in the past!")
commandrunner = runners.CommandRunner(win_id)
app = objreg.get('app')
timer = usertypes.Timer(name='later', parent=app)
try:
timer.setSingleShot(True)
try:
timer.setInterval(ms)
except OverflowError:
raise cmdexc.CommandError("Numeric argument is too large for "
"internal int representation.")
timer.timeout.connect(
functools.partial(commandrunner.run_safely, command))
timer.timeout.connect(timer.deleteLater)
timer.start()
except:
timer.deleteLater()
raise
@cmdutils.register(maxsplit=1, no_cmd_split=True, no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
def repeat(times: int, command, win_id):
"""Repeat a given command.
Args:
times: How many times to repeat.
command: The command to run, with optional args.
"""
if times < 0:
raise cmdexc.CommandError("A negative count doesn't make sense.")
commandrunner = runners.CommandRunner(win_id)
for _ in range(times):
commandrunner.run_safely(command)
@cmdutils.register(maxsplit=1, hide=True, no_cmd_split=True,
no_replace_variables=True)
@cmdutils.argument('win_id', win_id=True)
@cmdutils.argument('count', count=True)
def run_with_count(count_arg: int, command, win_id, count=1):
"""Run a command with the given count.
If run_with_count itself is run with a count, it multiplies count_arg.
Args:
count_arg: The count to pass to the command.
command: The command to run, with optional args.
count: The count that run_with_count itself received.
"""
runners.CommandRunner(win_id).run(command, count_arg * count)
@cmdutils.register(hide=True)
def message_error(text):
"""Show an error message in the statusbar.
Args:
text: The text to show.
"""
message.error(text)
@cmdutils.register(hide=True)
@cmdutils.argument('count', count=True)
def message_info(text, count=1):
"""Show an info message in the statusbar.
Args:
text: The text to show.
count: How many times to show the message
"""
for _ in range(count):
message.info(text)
@cmdutils.register(hide=True)
def message_warning(text):
"""Show a warning message in the statusbar.
Args:
text: The text to show.
"""
message.warning(text)
@cmdutils.register(hide=True)
def clear_messages():
"""Clear all message notifications."""
message.global_bridge.clear_messages.emit()
@cmdutils.register(debug=True)
@cmdutils.argument('typ', choices=['exception', 'segfault'])
def debug_crash(typ='exception'):
"""Crash for debugging purposes.
Args:
typ: either 'exception' or 'segfault'.
"""
if typ == 'segfault':
os.kill(os.getpid(), signal.SIGSEGV)
raise Exception("Segfault failed (wat.)")
else:
raise Exception("Forced crash")
@cmdutils.register(debug=True)
def debug_all_objects():
"""Print a list of all objects to the debug log."""
s = debug.get_all_objects()
log.misc.debug(s)
@cmdutils.register(debug=True)
def debug_cache_stats():
"""Print LRU cache stats."""
prefix_info = configdata.is_valid_prefix.cache_info()
# pylint: disable=protected-access
render_stylesheet_info = config._render_stylesheet.cache_info()
try:
from PyQt5.QtWebKit import QWebHistoryInterface
interface = QWebHistoryInterface.defaultInterface()
history_info = interface.historyContains.cache_info()
except ImportError:
history_info = None
log.misc.debug('is_valid_prefix: {}'.format(prefix_info))
log.misc.debug('_render_stylesheet: {}'.format(render_stylesheet_info))
log.misc.debug('history: {}'.format(history_info))
@cmdutils.register(debug=True)
def debug_console():
"""Show the debugging console."""
try:
con_widget = objreg.get('debug-console')
except KeyError:
log.misc.debug('initializing debug console')
con_widget = consolewidget.ConsoleWidget()
objreg.register('debug-console', con_widget)
if con_widget.isVisible():
log.misc.debug('hiding debug console')
con_widget.hide()
else:
log.misc.debug('showing debug console')
con_widget.show()
@cmdutils.register(debug=True, maxsplit=0, no_cmd_split=True)
def debug_trace(expr=""):
"""Trace executed code via hunter.
Args:
expr: What to trace, passed to hunter.
"""
if hunter is None:
raise cmdexc.CommandError("You need to install 'hunter' to use this "
"command!")
try:
eval('hunter.trace({})'.format(expr))
except Exception as e:
raise cmdexc.CommandError("{}: {}".format(e.__class__.__name__, e))
@cmdutils.register(maxsplit=0, debug=True, no_cmd_split=True)
def debug_pyeval(s, quiet=False):
"""Evaluate a python string and display the results as a web page.
Args:
s: The string to evaluate.
quiet: Don't show the output in a new tab.
"""
try:
r = eval(s)
out = repr(r)
except Exception:
out = traceback.format_exc()
qutescheme.pyeval_output = out
if quiet:
log.misc.debug("pyeval output: {}".format(out))
else:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.openurl(QUrl('qute://pyeval'), newtab=True)
@cmdutils.register(debug=True)
def debug_set_fake_clipboard(s=None):
"""Put data into the fake clipboard and enable logging, used for tests.
Args:
s: The text to put into the fake clipboard, or unset to enable logging.
"""
if s is None:
utils.log_clipboard = True
else:
utils.fake_clipboard = s
@cmdutils.register(hide=True)
@cmdutils.argument('win_id', win_id=True)
@cmdutils.argument('count', count=True)
def repeat_command(win_id, count=None):
"""Repeat the last executed command.
Args:
count: Which count to pass the command.
"""
mode_manager = objreg.get('mode-manager', scope='window', window=win_id)
if mode_manager.mode not in runners.last_command:
raise cmdexc.CommandError("You didn't do anything yet.")
cmd = runners.last_command[mode_manager.mode]
commandrunner = runners.CommandRunner(win_id)
commandrunner.run(cmd[0], count if count is not None else cmd[1])
@cmdutils.register(debug=True, name='debug-log-capacity')
def log_capacity(capacity: int):
"""Change the number of log lines to be stored in RAM.
Args:
capacity: Number of lines for the log.
"""
if capacity < 0:
raise cmdexc.CommandError("Can't set a negative log capacity!")
else:
log.ram_handler.change_log_capacity(capacity)
@cmdutils.register(debug=True)
@cmdutils.argument('level', choices=sorted(
(level.lower() for level in log.LOG_LEVELS),
key=lambda e: log.LOG_LEVELS[e.upper()]))
def debug_log_level(level: str):
"""Change the log level for console logging.
Args:
level: The log level to set.
"""
log.change_console_formatter(log.LOG_LEVELS[level.upper()])
log.console_handler.setLevel(log.LOG_LEVELS[level.upper()])
@cmdutils.register(debug=True)
def debug_log_filter(filters: str):
"""Change the log filter for console logging.
Args:
filters: A comma separated list of logger names. Can also be "none" to
clear any existing filters.
"""
if log.console_filter is None:
raise cmdexc.CommandError("No log.console_filter. Not attached "
"to a console?")
if filters.strip().lower() == 'none':
log.console_filter.names = None
return
if not set(filters.split(',')).issubset(log.LOGGER_NAMES):
raise cmdexc.CommandError("filters: Invalid value {} - expected one "
"of: {}".format(filters,
', '.join(log.LOGGER_NAMES)))
log.console_filter.names = filters.split(',')
@cmdutils.register()
@cmdutils.argument('current_win_id', win_id=True)
def window_only(current_win_id):
"""Close all windows except for the current one."""
for win_id, window in objreg.window_registry.items():
# We could be in the middle of destroying a window here
if sip.isdeleted(window):
continue
if win_id != current_win_id:
window.close()
@cmdutils.register(hide=True)
def nop():
"""Do nothing."""
return
| 1 | 18,487 | You can't rely on QtWebEngine being available - but why do you need to import this here at all? | qutebrowser-qutebrowser | py |
@@ -234,6 +234,12 @@ setup(
],
"packages": [
"NVDAObjects",
+ # As of py2exe 0.11.0.0 if the forcibly included package contains subpackages
+ # they need to be listed explicitly (py2exe issue 113).
+ "NVDAObjects.IAccessible",
+ "NVDAObjects.JAB",
+ "NVDAObjects.UIA",
+ "NVDAObjects.window",
"virtualBuffers",
"appModules",
"comInterfaces", | 1 | # -*- coding: UTF-8 -*-
#setup.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import os
import sys
import copy
import gettext
gettext.install("nvda")
from setuptools import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
# versionInfo names must be imported after Gettext
# Suppress E402 (module level import not at top of file)
from versionInfo import (
formatBuildVersionString,
name,
version,
publisher
) # noqa: E402
from versionInfo import *
from py2exe import distutils_buildexe
from py2exe.dllfinder import DllFinder
import wx
import importlib.machinery
# Explicitly put the nvda_dmp dir on the build path so the DMP library is included
sys.path.append(os.path.join("..", "include", "nvda_dmp"))
RT_MANIFEST = 24
manifest_template = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="%(uiAccess)s"
/>
</requestedPrivileges>
</security>
</trustInfo>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 7 -->
<supportedOS
Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"
/>
<!-- Windows 8 -->
<supportedOS
Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"
/>
<!-- Windows 8.1 -->
<supportedOS
Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"
/>
<!-- Windows 10 -->
<supportedOS
Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"
/>
</application>
</compatibility>
</assembly>
"""
# py2exe's idea of whether a dll is a system dll appears to be wrong sometimes, so monkey patch it.
orig_determine_dll_type = DllFinder.determine_dll_type
def determine_dll_type(self, imagename):
dll = os.path.basename(imagename).lower()
if dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll", "crypt32.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return None
return orig_determine_dll_type(self, imagename)
DllFinder.determine_dll_type = determine_dll_type
class py2exe(distutils_buildexe.py2exe):
"""Overridden py2exe command to:
* Add a command line option --enable-uiAccess to enable uiAccess for the main executable and EOA proxy
* Add a manifest to the executables
"""
user_options = distutils_buildexe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
super(py2exe, self).initialize_options()
self.enable_uiAccess = False
def run(self):
dist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target['uiAccess'] = True
dist.windows.insert(1, target)
# nvda_eoaProxy should have uiAccess.
target = dist.windows[3]
target['uiAccess'] = True
# Add a manifest resource to every target at runtime.
for target in dist.windows:
target["other_resources"] = [
(
RT_MANIFEST,
1,
(manifest_template % dict(uiAccess=target['uiAccess'])).encode("utf-8")
),
]
super(py2exe, self).run()
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uiAccess": False,
"icon_resources":[(1,"images/nvda.ico")],
"other_resources": [], # Populated at run time
"version":formatBuildVersionString(),
"description":"NVDA application",
"product_name":name,
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"uiAccess": False,
"icon_resources": [(1,"images/nvda.ico")],
"other_resources": [], # Populated at run time
"version":formatBuildVersionString(),
"description": name,
"product_name":name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
{
"script": "nvda_eoaProxy.pyw",
# uiAccess will be enabled at runtime if appropriate.
"uiAccess": False,
"icon_resources": [(1,"images/nvda.ico")],
"other_resources": [], # Populated at run time
"version":formatBuildVersionString(),
"description": "NVDA Ease of Access proxy",
"product_name":name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
console=[
{
"script": os.path.join("..", "include", "nvda_dmp", "nvda_dmp.py"),
"uiAccess": False,
"icon_resources": [(1, "images/nvda.ico")],
"other_resources": [], # Populated at runtime
"version":formatBuildVersionString(),
"description": "NVDA Diff-match-patch proxy",
"product_name": name,
"product_version": version,
"copyright": f"{copyright}, Bill Dengler",
"company_name": f"Bill Dengler, {publisher}",
},
],
options = {"py2exe": {
"bundle_files": 3,
"excludes": [
"tkinter",
"serial.loopback_connection",
"serial.rfc2217",
"serial.serialcli",
"serial.serialjava",
"serial.serialposix",
"serial.socket_connection",
# netbios (from pywin32) is optionally used by Python3's uuid module.
# This is not needed.
# We also need to exclude win32wnet explicitly.
"netbios",
"win32wnet",
# winxptheme is optionally used by wx.lib.agw.aui.
# We don't need this.
"winxptheme",
# numpy is an optional dependency of comtypes but we don't require it.
"numpy",
],
"packages": [
"NVDAObjects",
"virtualBuffers",
"appModules",
"comInterfaces",
"brailleDisplayDrivers",
"synthDrivers",
"visionEnhancementProviders",
],
"includes": [
"nvdaBuiltin",
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
"bisect",
# robotremoteserver (for system tests) depends on xmlrpc.server
"xmlrpc.server",
],
}},
data_files=[
(".",glob("*.dll")+glob("*.manifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib/%s"%version, glob("lib/*.dll")),
("lib64/%s"%version, glob("lib64/*.dll") + glob("lib64/*.exe")),
("libArm64/%s"%version, glob("libArm64/*.dll") + glob("libArm64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("fonts", glob("fonts/*.ttf")),
("louis/tables",glob("louis/tables/*")),
("COMRegistrationFixes", glob("COMRegistrationFixes/*.reg")),
(".", glob("../miscDeps/python/*.dll")),
(".", ['message.html' ])
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=tuple(
"*%s" % ext
for ext in importlib.machinery.SOURCE_SUFFIXES + importlib.machinery.BYTECODE_SUFFIXES
) + (
"*.exp",
"*.lib",
"*.pdb",
"__pycache__"
))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers",
excludes=tuple(
"*%s" % ext
for ext in importlib.machinery.SOURCE_SUFFIXES + importlib.machinery.BYTECODE_SUFFIXES
) + (
"__pycache__",
))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| 1 | 34,468 | How did you determine these - and how can we be confident that nothing is missing? | nvaccess-nvda | py |
@@ -0,0 +1,3 @@
+<h2>Workshops</h2>
+<h2>Videos</h2>
+<h2>Books</h2> | 1 | 1 | 6,861 | What is this page going to do that is different than the products index we already have? | thoughtbot-upcase | rb |
|
@@ -87,7 +87,10 @@ module Selenium
return unless File.exist?(manifest_path)
manifest = JSON.parse(File.read(manifest_path))
- [manifest['name'].delete(' '), manifest['version']].join('@')
+ id = if manifest.key?('application') && manifest['application'].key?('gecko')
+ manifest['application']['gecko']['id']
+ end
+ id || [manifest['name'].delete(' '), manifest['version']].join('@')
end
end # Extension
end # Firefox | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Firefox
#
# @api private
#
class Extension
NAMESPACE = 'http://www.mozilla.org/2004/em-rdf#'.freeze
def initialize(path)
unless File.exist?(path)
raise Error::WebDriverError, "could not find extension at #{path.inspect}"
end
@path = path
@should_reap_root = false
end
def write_to(extensions_dir)
root_dir = create_root
ext_path = File.join extensions_dir, read_id(root_dir)
FileUtils.rm_rf ext_path
FileUtils.mkdir_p File.dirname(ext_path), mode: 0o700
FileUtils.cp_r root_dir, ext_path
FileReaper.reap(root_dir) if @should_reap_root
end
private
def create_root
if File.directory? @path
@path
else
unless Zipper::EXTENSIONS.include? File.extname(@path)
raise Error::WebDriverError, "expected #{Zipper::EXTENSIONS.join(' or ')}, got #{@path.inspect}"
end
@should_reap_root = true
Zipper.unzip(@path)
end
end
def read_id(directory)
read_id_from_install_rdf(directory) || read_id_from_manifest_json(directory)
end
def read_id_from_install_rdf(directory)
rdf_path = File.join(directory, 'install.rdf')
return unless File.exist?(rdf_path)
doc = REXML::Document.new(File.read(rdf_path))
namespace = doc.root.namespaces.key(NAMESPACE)
if namespace
id_node = REXML::XPath.first(doc, "//#{namespace}:id")
return id_node.text if id_node
attr_node = REXML::XPath.first(doc, "//@#{namespace}:id")
return attr_node.value if attr_node
end
raise Error::WebDriverError, "cannot locate extension id in #{rdf_path}"
end
def read_id_from_manifest_json(directory)
manifest_path = File.join(directory, 'manifest.json')
return unless File.exist?(manifest_path)
manifest = JSON.parse(File.read(manifest_path))
[manifest['name'].delete(' '), manifest['version']].join('@')
end
end # Extension
end # Firefox
end # WebDriver
end # Selenium
| 1 | 15,944 | Couldn't you just write this as an if/else or a guard clause like on line 87? Just seems a bit weird doing this conditional assignment for essentially an if/else. | SeleniumHQ-selenium | rb |
@@ -3,9 +3,10 @@ module Travis
class Script
module Addons
class Deploy
- VERSIONED_RUNTIMES = [:jdk, :node, :perl, :php, :python, :ruby, :scala, :node]
+ VERSIONED_RUNTIMES = [:jdk, :node, :perl, :php, :python, :ruby, :scala, :node, :go]
USE_RUBY = '1.9.3'
- attr_accessor :script, :config, :allow_failure
+ HASH_CONFIG_KEYS = [:s3_options]
+ attr_accessor :script, :hash_config, :config, :allow_failure
def initialize(script, config)
@silent = false | 1 | module Travis
module Build
class Script
module Addons
class Deploy
VERSIONED_RUNTIMES = [:jdk, :node, :perl, :php, :python, :ruby, :scala, :node]
USE_RUBY = '1.9.3'
attr_accessor :script, :config, :allow_failure
def initialize(script, config)
@silent = false
@script = script
if config.is_a?(Array)
@configs = config
@config = {}
else
@configs = [config]
@config = config
end
end
def deploy
if @configs.length > 1
@configs.each do |config|
Deploy.new(script, config).deploy
end
else
@allow_failure = config.delete(:allow_failure)
script.cmd("git fetch --tags") if on[:tags]
script.if(want) do
script.run_stage(:before_deploy)
run
script.run_stage(:after_deploy)
end
end
end
private
def on
@on ||= begin
on = config.delete(:on) || config.delete(true) || config.delete(:true) || {}
on = { branch: on.to_str } if on.respond_to? :to_str
on[:ruby] ||= on[:rvm] if on.include? :rvm
on[:node] ||= on[:node_js] if on.include? :node_js
on
end
end
def want
conditions = [ want_push(on), want_repo(on), want_branch(on), want_runtime(on), want_condition(on), want_tags(on) ]
conditions.flatten.compact.map { |c| "(#{c})" }.join(" && ")
end
def want_push(on)
'$TRAVIS_PULL_REQUEST = false'
end
def want_repo(on)
"$TRAVIS_REPO_SLUG = \"#{on[:repo]}\"" if on[:repo]
end
def want_branch(on)
return if on[:all_branches]
branches = Array(on[:branch] || default_branches)
branches.map { |b| "$TRAVIS_BRANCH = #{b}" }.join(' || ')
end
def want_tags(on)
'$(git describe --tags --exact-match 2>/dev/null)' if on[:tags]
end
def want_condition(on)
on[:condition]
end
def want_runtime(on)
VERSIONED_RUNTIMES.map do |runtime|
next unless on.include? runtime
"$TRAVIS_#{runtime.to_s.upcase}_VERSION = \"#{on[runtime]}\""
end
end
def run
script.fold('dpl.0') { install }
cmd(run_command, echo: false, assert: false)
end
def install(edge = config[:edge])
command = "gem install dpl"
command << " --pre" if edge
cmd(command, echo: false, assert: !allow_failure)
end
def run_command(assert = !allow_failure)
return "dpl #{options} --fold" unless assert
run_command(false) + "; " + die("failed to deploy")
end
def die(message)
'if [ $? -ne 0 ]; then echo %p; travis_terminate 2; fi' % message
end
def default_branches
default_branches = config.values.grep(Hash).map(&:keys).flatten(1).uniq.compact
default_branches.any? ? default_branches : 'master'
end
def option(key, value)
case value
when Array then value.map { |v| option(key, v) }
when Hash then option(key, value[script.data.branch.to_sym])
when true then "--#{key}"
when nil, false then nil
else "--%s=%p" % [key, value]
end
end
def cmd(cmd, *args)
script.cmd("rvm #{USE_RUBY} do ruby -S #{cmd}", *args)
end
def options
config.flat_map { |k,v| option(k,v) }.compact.join(" ")
end
end
end
end
end
end
| 1 | 11,223 | Is this intended to be here? Seems it belongs to a different PR. | travis-ci-travis-build | rb |
@@ -20,7 +20,7 @@ namespace Xunit.ConsoleClient
{
try
{
- SetConsoleForegroundColor(ConsoleColor.White);
+ Console.ForegroundColor = ConsoleColor.White;
#if !NETCORE
var netVersion = Environment.Version;
#else | 1 | using System;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace Xunit.ConsoleClient
{
public class Program
{
volatile static bool cancel;
static bool failed;
static readonly ConcurrentDictionary<string, ExecutionSummary> completionMessages = new ConcurrentDictionary<string, ExecutionSummary>();
[STAThread]
public static int Main(string[] args)
{
try
{
SetConsoleForegroundColor(ConsoleColor.White);
#if !NETCORE
var netVersion = Environment.Version;
#else
var netVersion = "Core";
#endif
Console.WriteLine("xUnit.net console test runner ({0}-bit .NET {1})", IntPtr.Size * 8, netVersion);
Console.WriteLine("Copyright (C) 2014 Outercurve Foundation.");
Console.WriteLine();
SetConsoleForegroundColor(ConsoleColor.Gray);
if (args.Length == 0 || args[0] == "-?")
{
PrintUsage();
return 1;
}
#if !NETCORE
AppDomain.CurrentDomain.UnhandledException += OnUnhandledException;
#endif
Console.CancelKeyPress += (sender, e) =>
{
if (!cancel)
{
Console.WriteLine("Canceling... (Press Ctrl+C again to terminate)");
cancel = true;
e.Cancel = true;
}
};
var defaultDirectory = Directory.GetCurrentDirectory();
if (!defaultDirectory.EndsWith(new String(new[] { Path.DirectorySeparatorChar })))
{
defaultDirectory += Path.DirectorySeparatorChar;
}
var commandLine = CommandLine.Parse(args);
var failCount = RunProject(defaultDirectory, commandLine.Project, commandLine.TeamCity, commandLine.AppVeyor, commandLine.ShowProgress,
commandLine.ParallelizeAssemblies, commandLine.ParallelizeTestCollections,
commandLine.MaxParallelThreads);
if (commandLine.Wait)
{
Console.WriteLine();
Console.Write("Press enter key to continue...");
Console.ReadLine();
Console.WriteLine();
}
return failCount;
}
catch (ArgumentException ex)
{
Console.WriteLine("error: {0}", ex.Message);
return 1;
}
catch (BadImageFormatException ex)
{
Console.WriteLine("{0}", ex.Message);
return 1;
}
finally
{
ResetConsoleColor();
}
}
#if !NETCORE
static void OnUnhandledException(object sender, UnhandledExceptionEventArgs e)
{
var ex = e.ExceptionObject as Exception;
if (ex != null)
Console.WriteLine(ex.ToString());
else
Console.WriteLine("Error of unknown type thrown in application domain");
Environment.Exit(1);
}
#endif
static void PrintUsage()
{
#if !NETCORE
var executableName = Path.GetFileNameWithoutExtension(Assembly.GetExecutingAssembly().GetLocalCodeBase());
#else
var executableName = "xunit.console.netcore";
#endif
Console.WriteLine("usage: {0} <assemblyFile> [configFile] [assemblyFile [configFile]...] [options]", executableName);
Console.WriteLine();
Console.WriteLine("Note: Configuration files must end in .config");
Console.WriteLine();
Console.WriteLine("Valid options:");
Console.WriteLine(" -parallel option : set parallelization based on option");
Console.WriteLine(" : none - turn off all parallelization");
Console.WriteLine(" : collections - only parallelize collections");
Console.WriteLine(" : assemblies - only parallelize assemblies");
Console.WriteLine(" : all - parallelize assemblies & collections");
Console.WriteLine(" -maxthreads count : maximum thread count for collection parallelization");
Console.WriteLine(" : 0 - run with unbounded thread count");
Console.WriteLine(" : >0 - limit task thread pool size to 'count'");
Console.WriteLine(" -noshadow : do not shadow copy assemblies");
#if !NETCORE
Console.WriteLine(" -teamcity : forces TeamCity mode (normally auto-detected)");
Console.WriteLine(" -appveyor : forces AppVeyor CI mode (normally auto-detected)");
#endif
Console.WriteLine(" -showprogress : display the names of tests as they start and finish");
Console.WriteLine(" -wait : wait for input after completion");
Console.WriteLine(" -trait \"name=value\" : only run tests with matching name/value traits");
Console.WriteLine(" : if specified more than once, acts as an OR operation");
Console.WriteLine(" -notrait \"name=value\" : do not run tests with matching name/value traits");
Console.WriteLine(" : if specified more than once, acts as an AND operation");
Console.WriteLine(" -method \"name\" : run a given test method (should be fully specified;");
Console.WriteLine(" : i.e., 'MyNamespace.MyClass.MyTestMethod')");
Console.WriteLine(" : if specified more than once, acts as an OR operation");
Console.WriteLine(" -class \"name\" : run all methods in a given test class (should be fully");
Console.WriteLine(" : specified; i.e., 'MyNamespace.MyClass')");
Console.WriteLine(" : if specified more than once, acts as an OR operation");
TransformFactory.AvailableTransforms.ForEach(
transform => Console.WriteLine(" {0} : {1}",
String.Format("-{0} <filename>", transform.CommandLine).PadRight(22).Substring(0, 22),
transform.Description)
);
}
static int RunProject(string defaultDirectory, XunitProject project, bool teamcity, bool appVeyor, bool showProgress, bool? parallelizeAssemblies, bool? parallelizeTestCollections, int? maxThreadCount)
{
XElement assembliesElement = null;
var xmlTransformers = TransformFactory.GetXmlTransformers(project);
var needsXml = xmlTransformers.Count > 0;
var consoleLock = new object();
if (!parallelizeAssemblies.HasValue)
parallelizeAssemblies = project.All(assembly => assembly.Configuration.ParallelizeAssembly ?? false);
if (needsXml)
assembliesElement = new XElement("assemblies");
var originalWorkingFolder = Directory.GetCurrentDirectory();
using (AssemblyHelper.SubscribeResolve())
{
var clockTime = Stopwatch.StartNew();
if (parallelizeAssemblies.GetValueOrDefault())
{
var tasks = project.Assemblies.Select(assembly => Task.Run(() => ExecuteAssembly(consoleLock, defaultDirectory, assembly, needsXml, teamcity, appVeyor, showProgress, parallelizeTestCollections, maxThreadCount, project.Filters)));
var results = Task.WhenAll(tasks).GetAwaiter().GetResult();
foreach (var assemblyElement in results.Where(result => result != null))
assembliesElement.Add(assemblyElement);
}
else
{
foreach (var assembly in project.Assemblies)
{
var assemblyElement = ExecuteAssembly(consoleLock, defaultDirectory, assembly, needsXml, teamcity, appVeyor, showProgress, parallelizeTestCollections, maxThreadCount, project.Filters);
if (assemblyElement != null)
assembliesElement.Add(assemblyElement);
}
}
clockTime.Stop();
if (completionMessages.Count > 0)
{
SetConsoleForegroundColor(ConsoleColor.White);
Console.WriteLine();
Console.WriteLine("=== TEST EXECUTION SUMMARY ===");
SetConsoleForegroundColor(ConsoleColor.Gray);
var totalTestsRun = completionMessages.Values.Sum(summary => summary.Total);
var totalTestsFailed = completionMessages.Values.Sum(summary => summary.Failed);
var totalTestsSkipped = completionMessages.Values.Sum(summary => summary.Skipped);
var totalTime = completionMessages.Values.Sum(summary => summary.Time).ToString("0.000s");
var totalErrors = completionMessages.Values.Sum(summary => summary.Errors);
var longestAssemblyName = completionMessages.Keys.Max(key => key.Length);
var longestTotal = totalTestsRun.ToString().Length;
var longestFailed = totalTestsFailed.ToString().Length;
var longestSkipped = totalTestsSkipped.ToString().Length;
var longestTime = totalTime.Length;
var longestErrors = totalErrors.ToString().Length;
foreach (var message in completionMessages.OrderBy(m => m.Key))
Console.WriteLine(" {0} Total: {1}, Errors: {2}, Failed: {3}, Skipped: {4}, Time: {5}",
message.Key.PadRight(longestAssemblyName),
message.Value.Total.ToString().PadLeft(longestTotal),
message.Value.Errors.ToString().PadLeft(longestErrors),
message.Value.Failed.ToString().PadLeft(longestFailed),
message.Value.Skipped.ToString().PadLeft(longestSkipped),
message.Value.Time.ToString("0.000s").PadLeft(longestTime));
if (completionMessages.Count > 1)
Console.WriteLine(" {0} {1} {2} {3} {4} {5}" + Environment.NewLine +
" {6} {7} {8} {9} {10} {11} ({12})",
" ".PadRight(longestAssemblyName),
"-".PadRight(longestTotal, '-'),
"-".PadRight(longestErrors, '-'),
"-".PadRight(longestFailed, '-'),
"-".PadRight(longestSkipped, '-'),
"-".PadRight(longestTime, '-'),
"GRAND TOTAL:".PadLeft(longestAssemblyName),
totalTestsRun,
totalErrors,
totalTestsFailed,
totalTestsSkipped,
totalTime,
clockTime.Elapsed.TotalSeconds.ToString("0.000s"));
}
}
Directory.SetCurrentDirectory(originalWorkingFolder);
xmlTransformers.ForEach(transformer => transformer(assembliesElement));
return failed ? 1 : completionMessages.Values.Sum(summary => summary.Failed);
}
static XmlTestExecutionVisitor CreateVisitor(object consoleLock, string defaultDirectory, XElement assemblyElement, bool teamCity, bool appVeyor, bool showProgress)
{
#if !NETCORE
if (teamCity)
return new TeamCityVisitor(assemblyElement, () => cancel);
else if (appVeyor)
return new AppVeyorVisitor(consoleLock, defaultDirectory, assemblyElement, () => cancel, completionMessages);
#endif
return new StandardOutputVisitor(consoleLock, defaultDirectory, assemblyElement, () => cancel, completionMessages, showProgress);
}
static XElement ExecuteAssembly(object consoleLock, string defaultDirectory, XunitProjectAssembly assembly, bool needsXml, bool teamCity, bool appVeyor, bool showProgress, bool? parallelizeTestCollections, int? maxThreadCount, XunitFilters filters)
{
if (cancel)
return null;
var assemblyElement = needsXml ? new XElement("assembly") : null;
try
{
if (!ValidateFileExists(consoleLock, assembly.AssemblyFilename) || !ValidateFileExists(consoleLock, assembly.ConfigFilename))
return null;
// Turn off pre-enumeration of theories, since there is no theory selection UI in this runner
assembly.Configuration.PreEnumerateTheories = false;
var discoveryOptions = TestFrameworkOptions.ForDiscovery(assembly.Configuration);
var executionOptions = TestFrameworkOptions.ForExecution(assembly.Configuration);
if (maxThreadCount.HasValue)
executionOptions.SetMaxParallelThreads(maxThreadCount.GetValueOrDefault());
if (parallelizeTestCollections.HasValue)
executionOptions.SetDisableParallelization(!parallelizeTestCollections.GetValueOrDefault());
lock (consoleLock)
{
if (assembly.Configuration.DiagnosticMessages ?? false)
Console.WriteLine("Discovering: {0} (method display = {1}, parallel test collections = {2}, max threads = {3})",
Path.GetFileNameWithoutExtension(assembly.AssemblyFilename),
discoveryOptions.GetMethodDisplay(),
!executionOptions.GetDisableParallelization(),
executionOptions.GetMaxParallelThreads());
else
Console.WriteLine("Discovering: {0}", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
}
using (var controller = new XunitFrontController(AppDomainSupport.Denied, assembly.AssemblyFilename, assembly.ConfigFilename, assembly.Configuration.ShadowCopyOrDefault))
using (var discoveryVisitor = new TestDiscoveryVisitor())
{
controller.Find(includeSourceInformation: false, messageSink: discoveryVisitor, discoveryOptions: discoveryOptions);
discoveryVisitor.Finished.WaitOne();
lock (consoleLock)
{
Console.WriteLine("Discovered: {0}", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
}
var resultsVisitor = CreateVisitor(consoleLock, defaultDirectory, assemblyElement, teamCity, appVeyor, showProgress);
var filteredTestCases = discoveryVisitor.TestCases.Where(filters.Filter).ToList();
if (filteredTestCases.Count == 0)
{
lock (consoleLock)
{
SetConsoleForegroundColor(ConsoleColor.DarkYellow);
Console.WriteLine("Info: {0} has no tests to run", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
SetConsoleForegroundColor(ConsoleColor.Gray);
}
}
else
{
controller.RunTests(filteredTestCases, resultsVisitor, executionOptions);
resultsVisitor.Finished.WaitOne();
}
}
}
catch (Exception ex)
{
Console.WriteLine("{0}: {1}", ex.GetType().FullName, ex.Message);
failed = true;
}
return assemblyElement;
}
static bool ValidateFileExists(object consoleLock, string fileName)
{
if (String.IsNullOrWhiteSpace(fileName) || File.Exists(fileName))
return true;
lock (consoleLock)
{
SetConsoleForegroundColor(ConsoleColor.Red);
Console.WriteLine("File not found: {0}", fileName);
SetConsoleForegroundColor(ConsoleColor.Gray);
}
return false;
}
public static void SetConsoleForegroundColor(ConsoleColor value)
{
try
{
Console.ForegroundColor = value;
}
catch (NotSupportedException)
{
Debug.WriteLine("Ignoring NotSupportedException from Console PAL");
}
}
public static void ResetConsoleColor()
{
try
{
Console.ResetColor();
}
catch (NotSupportedException)
{
Debug.WriteLine("Ignoring NotSupportedException from Console PAL");
}
}
}
}
| 1 | 9,822 | These changes, while harmless, were no longer needed with newer versions of System.Console thanks to @ianhays | dotnet-buildtools | .cs |
@@ -58,7 +58,11 @@ logger = logging.getLogger('luigi-interface')
try:
import boto3
- client = boto3.client('ecs')
+ try:
+ import botocore.exceptions
+ client = boto3.client('ecs')
+ except botocore.exceptions.NoRegionError:
+ logger.warning('Your AWS config is missing Region information, ECSTask requires a working config.')
except ImportError:
logger.warning('boto3 is not installed. ECSTasks require boto3')
| 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Outlier Bio, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
EC2 Container Service wrapper for Luigi
From the AWS website:
Amazon EC2 Container Service (ECS) is a highly scalable, high performance
container management service that supports Docker containers and allows you
to easily run applications on a managed cluster of Amazon EC2 instances.
To use ECS, you create a taskDefinition_ JSON that defines the `docker run`_
command for one or more containers in a task or service, and then submit this
JSON to the API to run the task.
This `boto3-powered`_ wrapper allows you to create Luigi Tasks to submit ECS
``taskDefinition`` s. You can either pass a dict (mapping directly to the
``taskDefinition`` JSON) OR an Amazon Resource Name (arn) for a previously
registered ``taskDefinition``.
Requires:
- boto3 package
- Amazon AWS credentials discoverable by boto3 (e.g., by using ``aws configure``
from awscli_)
- A running ECS cluster (see `ECS Get Started`_)
Written and maintained by Jake Feala (@jfeala) for Outlier Bio (@outlierbio)
.. _`docker run`: https://docs.docker.com/reference/commandline/run
.. _taskDefinition: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html
.. _`boto3-powered`: https://boto3.readthedocs.io
.. _awscli: https://aws.amazon.com/cli
.. _`ECS Get Started`: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_GetStarted.html
"""
import time
import logging
import luigi
logger = logging.getLogger('luigi-interface')
try:
import boto3
client = boto3.client('ecs')
except ImportError:
logger.warning('boto3 is not installed. ECSTasks require boto3')
POLL_TIME = 2
def _get_task_statuses(task_ids):
"""
Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
"""
response = client.describe_tasks(tasks=task_ids)
# Error checking
if response['failures'] != []:
raise Exception('There were some failures:\n{0}'.format(
response['failures']))
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Task status request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return [t['lastStatus'] for t in response['tasks']]
def _track_tasks(task_ids):
"""Poll task status until STOPPED"""
while True:
statuses = _get_task_statuses(task_ids)
if all([status == 'STOPPED' for status in statuses]):
logger.info('ECS tasks {0} STOPPED'.format(','.join(task_ids)))
break
time.sleep(POLL_TIME)
logger.debug('ECS task status for tasks {0}: {1}'.format(
','.join(task_ids), status))
class ECSTask(luigi.Task):
"""
Base class for an Amazon EC2 Container Service Task
Amazon ECS requires you to register "tasks", which are JSON descriptions
for how to issue the ``docker run`` command. This Luigi Task can either
run a pre-registered ECS taskDefinition, OR register the task on the fly
from a Python dict.
:param task_def_arn: pre-registered task definition ARN (Amazon Resource
Name), of the form::
arn:aws:ecs:<region>:<user_id>:task-definition/<family>:<tag>
:param task_def: dict describing task in taskDefinition JSON format, for
example::
task_def = {
'family': 'hello-world',
'volumes': [],
'containerDefinitions': [
{
'memory': 1,
'essential': True,
'name': 'hello-world',
'image': 'ubuntu',
'command': ['/bin/echo', 'hello world']
}
]
}
"""
task_def_arn = luigi.Parameter(default=None)
task_def = luigi.Parameter(default=None)
@property
def ecs_task_ids(self):
"""Expose the ECS task ID"""
if hasattr(self, '_task_ids'):
return self._task_ids
@property
def command(self):
"""
Command passed to the containers
Override to return list of dicts with keys 'name' and 'command',
describing the container names and commands to pass to the container.
Directly corresponds to the `overrides` parameter of runTask API. For
example::
[
{
'name': 'myContainer',
'command': ['/bin/sleep', '60']
}
]
"""
pass
def run(self):
if (not self.task_def and not self.task_def_arn) or \
(self.task_def and self.task_def_arn):
raise ValueError(('Either (but not both) a task_def (dict) or'
'task_def_arn (string) must be assigned'))
if not self.task_def_arn:
# Register the task and get assigned taskDefinition ID (arn)
response = client.register_task_definition(**self.task_def)
self.task_def_arn = response['taskDefinition']['taskDefinitionArn']
# Submit the task to AWS ECS and get assigned task ID
# (list containing 1 string)
if self.command:
overrides = {'containerOverrides': self.command}
else:
overrides = {}
response = client.run_task(taskDefinition=self.task_def_arn,
overrides=overrides)
self._task_ids = [task['taskArn'] for task in response['tasks']]
# Wait on task completion
_track_tasks(self._task_ids)
| 1 | 15,688 | Umm... this isn't relevant to this PR | spotify-luigi | py |
@@ -24,6 +24,7 @@ namespace Xunit
Netcoreapp = 0x1000,
Uap = 0x2000,
UapAot = 0x4000,
- NetcoreCoreRT = 0x8000
+ NetcoreCoreRT = 0x8000,
+ All = ~0
}
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
namespace Xunit
{
[Flags]
public enum TargetFrameworkMonikers
{
Net45 = 0x1,
Net451 = 0x2,
Net452 = 0x4,
Net46 = 0x8,
Net461 = 0x10,
Net462 = 0x20,
Net463 = 0x40,
Netcore50 = 0x80,
Netcore50aot = 0x100,
Netcoreapp1_0 = 0x200,
Netcoreapp1_1 = 0x400,
NetFramework = 0x800,
Netcoreapp = 0x1000,
Uap = 0x2000,
UapAot = 0x4000,
NetcoreCoreRT = 0x8000
}
}
| 1 | 12,716 | While All make some sense here it doesn't make a lot of sense in the SkipOnFramework context. I wonder if we really need to expose anything more here. You can just blindly use 0. | dotnet-buildtools | .cs |
@@ -0,0 +1,7 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package main provides the bee binary viper-cobra
+// command definitions.
+package main | 1 | 1 | 13,681 | Actually, this is not correct. Package `cmd/bee` doe snot have any notion of commands or even libraries used for them. It is as simple as possible, just calling the function from `cmd/bee/cmd` which actually implements commands. | ethersphere-bee | go |
|
@@ -7,15 +7,11 @@ const Router = Ember.Router.extend({
});
Router.map(function() {
- this.route('admin', {
- resetNamespace: true
- }, function() {
+ this.route('admin', function() {
this.route('address');
this.route('loaddb');
this.route('lookup', { path: '/' });
- this.route('users', {
- resetNamespace: true
- }, function() {
+ this.route('users', function() {
this.route('edit', { path: '/edit/:user_id' });
});
this.route('roles'); | 1 | import Ember from 'ember';
import config from './config/environment';
const Router = Ember.Router.extend({
location: config.locationType,
rootURL: config.rootURL
});
Router.map(function() {
this.route('admin', {
resetNamespace: true
}, function() {
this.route('address');
this.route('loaddb');
this.route('lookup', { path: '/' });
this.route('users', {
resetNamespace: true
}, function() {
this.route('edit', { path: '/edit/:user_id' });
});
this.route('roles');
this.route('query');
this.route('workflow');
});
this.route('appointments', {
resetNamespace: true
}, function() {
this.route('edit', { path: '/edit/:appointment_id' });
this.route('search');
this.route('today');
this.route('missed');
});
this.route('finishgauth', { path: '/finishgauth/:s1/:s2/:k/:t/:i/:p' });
this.route('index', { path: '/' });
this.route('imaging', {
resetNamespace: true
}, function() {
this.route('completed');
this.route('edit', { path: '/edit/:imaging_id' });
});
this.route('inventory', {
resetNamespace: true
}, function() {
this.route('barcode', { path: '/barcode/:inventory_id' });
this.route('edit', { path: '/edit/:inventory_id' });
this.route('batch', { path: '/batch/:inventory-batch_id' });
this.route('listing');
this.route('reports');
this.route('request', { path: '/request/:inv-request_id' });
this.route('search', { path: '/search/:search_text' });
});
this.route('invoices', {
resetNamespace: true
}, function() {
this.route('edit', { path: '/edit/:invoice_id' });
this.route('search', { path: '/search/:search_text' });
});
this.route('labs', {
resetNamespace: true
}, function() {
this.route('completed');
this.route('edit', { path: '/edit/:lab_id' });
});
this.route('login');
this.route('medication', {
resetNamespace: true
}, function() {
this.route('completed');
this.route('edit', { path: '/edit/:medication_id' });
this.route('return', { path: '/return/:inv-request_id' });
this.route('search', { path: '/search/:search_text' });
});
this.route('patients', {
resetNamespace: true
}, function() {
this.route('edit', { path: '/edit/:patient_id' });
this.route('reports');
this.route('admitted');
this.route('search', { path: '/search/:search_text' });
});
this.route('pricing', {
resetNamespace: true
}, function() {
this.route('imaging');
this.route('lab');
this.route('procedure');
this.route('ward');
this.route('edit', { path: '/edit/:pricing_id' });
this.route('search', { path: '/search/:search_text' });
this.route('profiles');
});
this.route('print', {
resetNamespace: true
}, function() {
this.route('invoice', { path: '/invoice/:invoice_id' });
});
this.route('visits', {
resetNamespace: true
}, function() {
this.route('edit', { path: '/edit/:visit_id' });
this.route('procedures', {
resetNamespace: true
}, function() {
this.route('edit', { path: '/edit/:procedure_id' });
});
});
});
export default Router;
| 1 | 13,237 | This change is causing tests to fail because users sits at `/app/users`, not `/app/admin/users` | HospitalRun-hospitalrun-frontend | js |
@@ -150,4 +150,19 @@ return [
'secure' => false,
+ /*
+ |--------------------------------------------------------------------------
+ | Same-Site Cookies
+ |--------------------------------------------------------------------------
+ |
+ | This option determines how your cookies behave when cross-site requests
+ | take place, and can be used to mitigate CSRF attacks. By default, we
+ | do not enable this as other CSRF protection services are in place.
+ |
+ | Supported: "lax", "strict"
+ |
+ */
+
+ 'same_site' => null,
+
]; | 1 | <?php
return [
/*
|--------------------------------------------------------------------------
| Default Session Driver
|--------------------------------------------------------------------------
|
| This option controls the default session "driver" that will be used on
| requests. By default, we will use the lightweight native driver but
| you may specify any of the other wonderful drivers provided here.
|
| Supported: "file", "cookie", "database", "apc",
| "memcached", "redis", "array"
|
*/
'driver' => 'file',
/*
|--------------------------------------------------------------------------
| Session Lifetime
|--------------------------------------------------------------------------
|
| Here you may specify the number of minutes that you wish the session
| to be allowed to remain idle for it is expired. If you want them
| to immediately expire when the browser closes, set it to zero.
|
*/
'lifetime' => 120,
'expire_on_close' => false,
/*
|--------------------------------------------------------------------------
| Session Encryption
|--------------------------------------------------------------------------
|
| This option allows you to easily specify that all of your session data
| should be encrypted before it is stored. All encryption will be run
| automatically by Laravel and you can use the Session like normal.
|
*/
'encrypt' => false,
/*
|--------------------------------------------------------------------------
| Session File Location
|--------------------------------------------------------------------------
|
| When using the native session driver, we need a location where session
| files may be stored. A default has been set for you but a different
| location may be specified. This is only needed for file sessions.
|
*/
'files' => storage_path('framework/sessions'),
/*
|--------------------------------------------------------------------------
| Session Database Connection
|--------------------------------------------------------------------------
|
| When using the "database" session driver, you may specify the database
| connection that should be used to manage your sessions. This should
| correspond to a connection in your "database" configuration file.
|
*/
'connection' => null,
/*
|--------------------------------------------------------------------------
| Session Database Table
|--------------------------------------------------------------------------
|
| When using the "database" session driver, you may specify the table we
| should use to manage the sessions. Of course, a sensible default is
| provided for you; however, you are free to change this as needed.
|
*/
'table' => 'sessions',
/*
|--------------------------------------------------------------------------
| Session Sweeping Lottery
|--------------------------------------------------------------------------
|
| Some session drivers must manually sweep their storage location to get
| rid of old sessions from storage. Here are the chances that it will
| happen on a given request. By default, the odds are 2 out of 100.
|
*/
'lottery' => [2, 100],
/*
|--------------------------------------------------------------------------
| Session Cookie Name
|--------------------------------------------------------------------------
|
| Here you may change the name of the cookie used to identify a session
| instance by ID. The name specified here will get used every time a
| new session cookie is created by the framework for every driver.
|
*/
'cookie' => 'october_session',
/*
|--------------------------------------------------------------------------
| Session Cookie Path
|--------------------------------------------------------------------------
|
| The session cookie path determines the path for which the cookie will
| be regarded as available. Typically, this will be the root path of
| your application but you are free to change this when necessary.
|
*/
'path' => '/',
/*
|--------------------------------------------------------------------------
| Session Cookie Domain
|--------------------------------------------------------------------------
|
| Here you may change the domain of the cookie used to identify a session
| in your application. This will determine which domains the cookie is
| available to in your application. A sensible default has been set.
|
*/
'domain' => null,
/*
|--------------------------------------------------------------------------
| HTTPS Only Cookies
|--------------------------------------------------------------------------
|
| By setting this option to true, session cookies will only be sent back
| to the server if the browser has a HTTPS connection. This will keep
| the cookie from being sent to you if it can not be done securely.
|
*/
'secure' => false,
];
| 1 | 13,517 | Could you provide more information on what exactly each of those three options do? | octobercms-october | php |
@@ -1,10 +1,12 @@
using System.Collections.Generic;
+using System;
namespace ScenarioMeasurement
{
public class LinuxTraceSession : ITraceSession
{
- public string TraceFilePath { get { return perfCollect?.TraceFilePath; } }
+ public string TraceFilePath {
+ get { return perfCollect?.TraceFilePath;} }
private PerfCollect perfCollect;
private Dictionary<TraceSessionManager.KernelKeyword, PerfCollect.KernelKeyword> kernelKeywords;
private Dictionary<TraceSessionManager.ClrKeyword, PerfCollect.ClrKeyword> clrKeywords; | 1 | using System.Collections.Generic;
namespace ScenarioMeasurement
{
public class LinuxTraceSession : ITraceSession
{
public string TraceFilePath { get { return perfCollect?.TraceFilePath; } }
private PerfCollect perfCollect;
private Dictionary<TraceSessionManager.KernelKeyword, PerfCollect.KernelKeyword> kernelKeywords;
private Dictionary<TraceSessionManager.ClrKeyword, PerfCollect.ClrKeyword> clrKeywords;
public LinuxTraceSession(string sessionName, string traceName, string traceDirectory, Logger logger)
{
perfCollect = new PerfCollect(traceName, traceDirectory, logger);
InitLinuxKeywordMaps();
}
public void EnableProviders(IParser parser)
{
// Enable both providers and start the session
parser.EnableKernelProvider(this);
parser.EnableUserProviders(this);
perfCollect.Start();
}
public void Dispose()
{
perfCollect.Stop();
}
public void EnableKernelProvider(params TraceSessionManager.KernelKeyword[] keywords)
{
foreach (var keyword in keywords)
{
perfCollect.AddKernelKeyword(kernelKeywords[keyword]);
}
}
public void EnableUserProvider(params TraceSessionManager.ClrKeyword[] keywords)
{
foreach (var keyword in keywords)
{
perfCollect.AddClrKeyword(clrKeywords[keyword]);
}
}
private void InitLinuxKeywordMaps()
{
// initialize linux kernel keyword map
kernelKeywords = new Dictionary<TraceSessionManager.KernelKeyword, PerfCollect.KernelKeyword>();
kernelKeywords[TraceSessionManager.KernelKeyword.Process] = PerfCollect.KernelKeyword.ProcessLifetime;
kernelKeywords[TraceSessionManager.KernelKeyword.Thread] = PerfCollect.KernelKeyword.Thread;
kernelKeywords[TraceSessionManager.KernelKeyword.ContextSwitch] = PerfCollect.KernelKeyword.ContextSwitch;
// initialize linux clr keyword map
clrKeywords = new Dictionary<TraceSessionManager.ClrKeyword, PerfCollect.ClrKeyword>();
clrKeywords[TraceSessionManager.ClrKeyword.Startup] = PerfCollect.ClrKeyword.DotNETRuntimePrivate_StartupKeyword;
}
public void EnableUserProvider(string provider)
{
}
}
}
| 1 | 11,197 | Why this line break? | dotnet-performance | .cs |
@@ -27,6 +27,7 @@ import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiextensionsinstall "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiextensionsinstall "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install"
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
webhooktesting "github.com/jetstack/cert-manager/cmd/webhook/app/testing"
"github.com/jetstack/cert-manager/pkg/api"
apitesting "github.com/jetstack/cert-manager/pkg/api/testing"
"github.com/jetstack/cert-manager/test/internal/apiserver"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
type StopFunc func()
func RunControlPlane(t *testing.T, ctx context.Context) (*rest.Config, StopFunc) {
env, stopControlPlane := apiserver.RunBareControlPlane(t)
config := env.Config
webhookOpts, stopWebhook := webhooktesting.StartWebhookServer(t, ctx, []string{"--api-server-host=" + config.Host})
crdsDir := apitesting.CRDDirectory(t)
crds := readCustomResourcesAtPath(t, crdsDir)
for _, crd := range crds {
t.Logf("Found CRD with name %q", crd.Name)
}
patchCRDConversion(crds, webhookOpts.URL, webhookOpts.CAPEM)
patchCRDServed(crds)
if _, err := envtest.InstallCRDs(config, envtest.CRDInstallOptions{
CRDs: crdsToRuntimeObjects(crds),
}); err != nil {
t.Fatal(err)
}
cl, err := client.New(config, client.Options{Scheme: api.Scheme})
if err != nil {
t.Fatal(err)
}
// installing the validating webhooks, not using WebhookInstallOptions as it patches the CA to be it's own
err = cl.Create(ctx, getValidatingWebhookConfig(webhookOpts.URL, webhookOpts.CAPEM))
if err != nil {
t.Fatal(err)
}
// installing the mutating webhooks, not using WebhookInstallOptions as it patches the CA to be it's own
err = cl.Create(ctx, getMutatingWebhookConfig(webhookOpts.URL, webhookOpts.CAPEM))
if err != nil {
t.Fatal(err)
}
return config, func() {
defer stopWebhook()
stopControlPlane()
}
}
var (
internalScheme = runtime.NewScheme()
)
func init() {
utilruntime.Must(metav1.AddMetaToScheme(internalScheme))
apiextensionsinstall.Install(internalScheme)
}
func patchCRDConversion(crds []*v1.CustomResourceDefinition, url string, caPEM []byte) {
for _, crd := range crds {
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Served = true
}
if crd.Spec.Conversion == nil {
continue
}
if crd.Spec.Conversion.Webhook == nil {
continue
}
if crd.Spec.Conversion.Webhook.ClientConfig == nil {
continue
}
if crd.Spec.Conversion.Webhook.ClientConfig.Service == nil {
continue
}
path := ""
if crd.Spec.Conversion.Webhook.ClientConfig.Service.Path != nil {
path = *crd.Spec.Conversion.Webhook.ClientConfig.Service.Path
}
url := fmt.Sprintf("%s%s", url, path)
crd.Spec.Conversion.Webhook.ClientConfig.URL = &url
crd.Spec.Conversion.Webhook.ClientConfig.CABundle = caPEM
crd.Spec.Conversion.Webhook.ClientConfig.Service = nil
}
}
func readCustomResourcesAtPath(t *testing.T, path string) []*v1.CustomResourceDefinition {
serializer := jsonserializer.NewSerializerWithOptions(jsonserializer.DefaultMetaFactory, internalScheme, internalScheme, jsonserializer.SerializerOptions{
Yaml: true,
})
converter := runtime.UnsafeObjectConvertor(internalScheme)
codec := versioning.NewCodec(serializer, serializer, converter, internalScheme, internalScheme, internalScheme, runtime.InternalGroupVersioner, runtime.InternalGroupVersioner, internalScheme.Name())
var crds []*v1.CustomResourceDefinition
if err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if filepath.Ext(path) != ".yaml" {
return nil
}
crd, err := readCRDsAtPath(codec, converter, path)
if err != nil {
return err
}
crds = append(crds, crd...)
return nil
}); err != nil {
t.Fatal(err)
}
return crds
}
func readCRDsAtPath(codec runtime.Codec, converter runtime.ObjectConvertor, path string) ([]*v1.CustomResourceDefinition, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var crds []*v1.CustomResourceDefinition
for _, d := range strings.Split(string(data), "\n---\n") {
// skip empty YAML documents
if strings.TrimSpace(d) == "" {
continue
}
internalCRD := &apiextensions.CustomResourceDefinition{}
if _, _, err := codec.Decode([]byte(d), nil, internalCRD); err != nil {
return nil, err
}
out := &v1.CustomResourceDefinition{}
if err := converter.Convert(internalCRD, out, nil); err != nil {
return nil, err
}
crds = append(crds, out)
}
return crds, nil
}
func crdsToRuntimeObjects(in []*v1.CustomResourceDefinition) []client.Object {
out := make([]client.Object, len(in))
for i, crd := range in {
out[i] = client.Object(crd)
}
return out
}
func getValidatingWebhookConfig(url string, caPEM []byte) client.Object {
failurePolicy := admissionregistrationv1.Fail
sideEffects := admissionregistrationv1.SideEffectClassNone
validateURL := fmt.Sprintf("%s/validate", url)
webhook := admissionregistrationv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: "cert-manager-webhook",
},
Webhooks: []admissionregistrationv1.ValidatingWebhook{
{
Name: "webhook.cert-manager.io",
ClientConfig: admissionregistrationv1.WebhookClientConfig{
URL: &validateURL,
CABundle: caPEM,
},
Rules: []admissionregistrationv1.RuleWithOperations{
{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{"cert-manager.io", "acme.cert-manager.io"},
APIVersions: []string{"*"},
Resources: []string{"*/*"},
},
},
},
FailurePolicy: &failurePolicy,
SideEffects: &sideEffects,
AdmissionReviewVersions: []string{"v1"},
},
},
}
return &webhook
}
func getMutatingWebhookConfig(url string, caPEM []byte) client.Object {
failurePolicy := admissionregistrationv1.Fail
sideEffects := admissionregistrationv1.SideEffectClassNone
validateURL := fmt.Sprintf("%s/mutate", url)
webhook := admissionregistrationv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: "cert-manager-webhook",
},
Webhooks: []admissionregistrationv1.MutatingWebhook{
{
Name: "webhook.cert-manager.io",
ClientConfig: admissionregistrationv1.WebhookClientConfig{
URL: &validateURL,
CABundle: caPEM,
},
Rules: []admissionregistrationv1.RuleWithOperations{
{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{"cert-manager.io", "acme.cert-manager.io"},
APIVersions: []string{"*"},
Resources: []string{"*/*"},
},
},
},
FailurePolicy: &failurePolicy,
SideEffects: &sideEffects,
AdmissionReviewVersions: []string{"v1"},
},
},
}
return &webhook
}
// patchCRDServed ensures that even the API versions which are not served are
// available in the integration tests.
// This workaround allows the conversion tests and the ctl convert tests to run.
// TODO: Remove this workaround in cert-manager 1.7 when all the legacy API
// versions will finally be removed.
func patchCRDServed(crds []*v1.CustomResourceDefinition) {
for _, crd := range crds {
for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Served = true
}
}
}
| 1 | 29,603 | We're importing this twice with different aliases. | jetstack-cert-manager | go |
@@ -98,6 +98,7 @@ if __name__ == "__main__":
"grpcio-tools==1.32.0",
"isort>=4.3.21,<5",
"mock==3.0.5",
+ "pandera<0.8.0",
"protobuf==3.13.0", # without this, pip will install the most up-to-date protobuf
"pylint==2.6.0",
"pytest-cov==2.10.1", | 1 | from typing import Dict
from setuptools import find_packages, setup # type: ignore
def long_description() -> str:
return """
## Dagster
Dagster is a data orchestrator for machine learning, analytics, and ETL.
Dagster lets you define pipelines in terms of the data flow between reusable, logical components,
then test locally and run anywhere. With a unified view of pipelines and the assets they produce,
Dagster can schedule and orchestrate Pandas, Spark, SQL, or anything else that Python can invoke.
Dagster is designed for data platform engineers, data engineers, and full-stack data scientists.
Building a data platform with Dagster makes your stakeholders more independent and your systems
more robust. Developing data pipelines with Dagster makes testing easier and deploying faster.
""".strip()
def get_version() -> str:
version: Dict[str, str] = {}
with open("dagster/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
setup(
name="dagster",
version=get_version(),
author="Elementl",
author_email="[email protected]",
license="Apache-2.0",
description="A data orchestrator for machine learning, analytics, and ETL.",
long_description=long_description(),
long_description_content_type="text/markdown",
url="https://github.com/dagster-io/dagster",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["dagster_tests"]),
package_data={
"dagster": [
"dagster/core/storage/event_log/sqlite/alembic/*",
"dagster/core/storage/runs/sqlite/alembic/*",
"dagster/core/storage/schedules/sqlite/alembic/*",
"dagster/generate/new_project/*",
"dagster/grpc/protos/*",
]
},
include_package_data=True,
install_requires=[
# cli
"click>=5.0",
"coloredlogs>=6.1, <=14.0",
# https://github.com/dagster-io/dagster/issues/4167
"Jinja2<3.0",
"PyYAML>=5.1",
# core (not explicitly expressed atm)
# alembic 1.6.3 broke our migrations: https://github.com/sqlalchemy/alembic/issues/848
# alembic 1.7.0 is a breaking change
"alembic>=1.2.1,!=1.6.3,<1.7.0",
"croniter>=0.3.34",
"grpcio>=1.32.0", # ensure version we require is >= that with which we generated the grpc code (set in dev-requirements)
"grpcio-health-checking>=1.32.0",
"packaging>=20.9",
"pendulum",
"protobuf>=3.13.0", # ensure version we require is >= that with which we generated the proto code (set in dev-requirements)
"python-dateutil",
"pytz",
"rx>=1.6,<2", # https://github.com/dagster-io/dagster/issues/4089
"tabulate",
"tqdm",
"typing_compat",
"sqlalchemy>=1.0",
"toposort>=1.0",
"watchdog>=0.8.3",
'psutil >= 1.0; platform_system=="Windows"',
# https://github.com/mhammond/pywin32/issues/1439
'pywin32 != 226; platform_system=="Windows"',
"docstring-parser",
],
extras_require={
"docker": ["docker"],
"test": [
"astroid>=2.3.3,<2.5",
"black==20.8b1",
"coverage==5.3",
"docker",
"flake8>=3.7.8",
"freezegun>=0.3.15",
"grpcio-tools==1.32.0",
"isort>=4.3.21,<5",
"mock==3.0.5",
"protobuf==3.13.0", # without this, pip will install the most up-to-date protobuf
"pylint==2.6.0",
"pytest-cov==2.10.1",
"pytest-dependency==0.5.1",
"pytest-mock==3.3.1",
"pytest-rerunfailures==10.0",
"pytest-runner==5.2",
"pytest-xdist==2.1.0",
"pytest==6.1.1",
"responses==0.10.*",
"snapshottest==0.6.0",
"tox==3.14.2",
"tox-pip-version==0.0.7",
"tqdm==4.48.0", # pylint crash 48.1+
"yamllint",
],
},
entry_points={
"console_scripts": [
"dagster = dagster.cli:main",
"dagster-daemon = dagster.daemon.cli:main",
]
},
)
| 1 | 16,951 | We should avoid adding a pandera dependency to Dagster itself. | dagster-io-dagster | py |
@@ -163,9 +163,16 @@ public abstract class PostgrePrivilege implements DBAPrivilege, Comparable<Postg
public void setPermission(PostgrePrivilegeType privilegeType, boolean permit) {
for (ObjectPermission permission : permissions) {
- if (permission.privilegeType == privilegeType) {
+ if (permission.privilegeType != privilegeType) {
if (permit) {
permission.permissions |= GRANTED;
+ ObjectPermission[] tempPermission = new ObjectPermission[this.permissions.length+1];
+ for(int i = 0; i < this.permissions.length; i++) {
+ tempPermission[i] = this.permissions[i];
+ }
+ tempPermission[this.permissions.length] = new ObjectPermission(privilegeType, permission.getGrantor(), permission.permissions);
+ this.permissions = tempPermission;
+
} else {
permission.permissions = 0;
} | 1 | /*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.postgresql.model;
import org.jkiss.code.NotNull;
import org.jkiss.code.Nullable;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.model.DBPDataSource;
import org.jkiss.dbeaver.model.access.DBAPrivilege;
import org.jkiss.dbeaver.model.access.DBAPrivilegeGrant;
import org.jkiss.dbeaver.model.access.DBARole;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.struct.DBSObject;
import java.util.List;
/**
* PostgrePrivilege
*/
public abstract class PostgrePrivilege implements DBAPrivilege, Comparable<PostgrePrivilege> {
public static final short NONE = 0;
public static final short GRANTED = 1;
public static final short WITH_GRANT_OPTION = 2;
public static final short WITH_HIERARCHY = 4;
public class ObjectPermission implements DBAPrivilegeGrant {
@NotNull
private PostgrePrivilegeType privilegeType;
@NotNull
private String grantor;
private short permissions;
public ObjectPermission(@NotNull PostgrePrivilegeType privilegeType, @NotNull String grantor, short permissions) {
this.privilegeType = privilegeType;
this.grantor = grantor;
this.permissions = permissions;
}
@Override
public DBARole getSubject(DBRProgressMonitor monitor) throws DBException {
return owner instanceof DBARole ? (DBARole) owner : (DBARole) getTargetObject(monitor);
}
@Override
public DBSObject getObject(DBRProgressMonitor monitor) throws DBException {
return owner instanceof DBARole ? getTargetObject(monitor) : owner;
}
@Override
public DBAPrivilege[] getPrivileges() {
return new DBAPrivilege[] { PostgrePrivilege.this };
}
@NotNull
public PostgrePrivilegeType getPrivilegeType() {
return privilegeType;
}
@Override
public boolean isGranted() {
return (permissions & GRANTED) == GRANTED;
}
@NotNull
public String getGrantor() {
return grantor;
}
public short getPermissions() {
return permissions;
}
@Override
public String toString() {
return privilegeType.toString();
}
}
protected final PostgrePrivilegeOwner owner;
private ObjectPermission[] permissions;
public PostgrePrivilege(PostgrePrivilegeOwner owner, List<PostgrePrivilegeGrant> grants) {
this.owner = owner;
this.permissions = new ObjectPermission[grants.size()];
for (int i = 0 ; i < grants.size(); i++) {
final PostgrePrivilegeGrant privilege = grants.get(i);
short permission = GRANTED;
if (privilege.isGrantable()) permission |= WITH_GRANT_OPTION;
if (privilege.isWithHierarchy()) permission |= WITH_HIERARCHY;
this.permissions[i] = new ObjectPermission(privilege.getPrivilegeType(), privilege.getGrantor(), permission);
}
}
public DBAPrivilegeGrant[] getGrants() {
return permissions;
}
@Override
public boolean isPersisted() {
return true;
}
@Nullable
@Override
public String getDescription() {
return null;
}
@Nullable
@Override
public PostgrePrivilegeOwner getParentObject() {
return owner;
}
@NotNull
@Override
public PostgreDataSource getDataSource() {
return owner.getDataSource();
}
public PostgrePrivilegeOwner getOwner() {
return owner;
}
public abstract PostgreObject getTargetObject(DBRProgressMonitor monitor) throws DBException;
public ObjectPermission[] getPermissions() {
return permissions;
}
public PostgrePrivilegeType[] getPrivileges() {
PostgrePrivilegeType[] ppt = new PostgrePrivilegeType[permissions.length];
for (int i = 0; i < permissions.length; i++) {
ppt[i] = permissions[i].getPrivilegeType();
}
return ppt;
}
public short getPermission(PostgrePrivilegeType privilegeType) {
for (ObjectPermission permission : permissions) {
if (permission.privilegeType == privilegeType || permission.privilegeType == PostgrePrivilegeType.ALL) {
return permission.permissions;
}
}
return NONE;
}
public void setPermission(PostgrePrivilegeType privilegeType, boolean permit) {
for (ObjectPermission permission : permissions) {
if (permission.privilegeType == privilegeType) {
if (permit) {
permission.permissions |= GRANTED;
} else {
permission.permissions = 0;
}
}
}
}
// Properties for permissions viewer
/*
@Property(viewable = true, editable = true, updatable = true, order = 100, name = "SELECT")
public boolean hasPermissionSelect() {
return getPermission(PostgrePrivilegeType.SELECT) != 0;
}
@Property(viewable = true, order = 101, name = "INSERT")
public boolean hasPermissionInsert() {
return getPermission(PostgrePrivilegeType.INSERT) != 0;
}
@Property(viewable = true, order = 102, name = "UPDATE")
public boolean hasPermissionUpdate() {
return getPermission(PostgrePrivilegeType.UPDATE) != 0;
}
@Property(viewable = true, order = 103, name = "DELETE")
public boolean hasPermissionDelete() {
return getPermission(PostgrePrivilegeType.DELETE) != 0;
}
@Property(viewable = true, order = 104, name = "TRUNCATE")
public boolean hasPermissionTruncate() {
return getPermission(PostgrePrivilegeType.TRUNCATE) != 0;
}
@Property(viewable = true, order = 105, name = "REFERENCES")
public boolean hasPermissionReferences() {
return getPermission(PostgrePrivilegeType.REFERENCES) != 0;
}
@Property(viewable = true, order = 106, name = "TRIGGER")
public boolean hasPermissionTrigger() {
return getPermission(PostgrePrivilegeType.TRIGGER) != 0;
}
*/
/**
* Checks all privileges
*/
public boolean hasAllPrivileges(Object object) {
for (PostgrePrivilegeType pt : getDataSource().getSupportedPrivilegeTypes()) {
if (pt.isValid() && pt.supportsType(object.getClass()) && getPermission(pt) == 0) {
return false;
}
}
return true;
}
}
| 1 | 11,591 | Can be replaced with `org.jkiss.utils.ArrayUtils#add`. | dbeaver-dbeaver | java |
@@ -653,7 +653,12 @@ public class ZkController implements Closeable {
customThreadPool.submit(() -> Collections.singleton(overseer).parallelStream().forEach(IOUtils::closeQuietly));
try {
- customThreadPool.submit(() -> electionContexts.values().parallelStream().forEach(IOUtils::closeQuietly));
+ customThreadPool.submit(() -> {
+ Collection<ElectionContext> values = electionContexts.values();
+ synchronized (electionContexts) {
+ values.forEach(IOUtils::closeQuietly);
+ }
+ });
} finally {
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.lang.invoke.MethodHandles;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.URLEncoder;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import com.google.common.base.Strings;
import org.apache.commons.lang3.StringUtils;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider;
import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.cloud.overseer.SliceMutator;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.*;
import org.apache.solr.common.cloud.Replica.Type;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.ObjectReleaseTracker;
import org.apache.solr.common.util.SolrNamedThreadFactory;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.common.util.URLUtil;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.CloseHook;
import org.apache.solr.core.CloudConfig;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.CoreDescriptor;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrCoreInitializationException;
import org.apache.solr.handler.admin.ConfigSetsHandler;
import org.apache.solr.handler.component.HttpShardHandler;
import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.servlet.SolrDispatchFilter;
import org.apache.solr.update.UpdateLog;
import org.apache.solr.util.RTimer;
import org.apache.solr.util.RefCounted;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.SessionExpiredException;
import org.apache.zookeeper.Op;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
/**
* Handle ZooKeeper interactions.
* <p>
* notes: loads everything on init, creates what's not there - further updates
* are prompted with Watches.
* <p>
* TODO: exceptions during close on attempts to update cloud state
*/
public class ZkController implements Closeable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
static final int WAIT_DOWN_STATES_TIMEOUT_SECONDS = 60;
private final boolean SKIP_AUTO_RECOVERY = Boolean.getBoolean("solrcloud.skip.autorecovery");
private final ZkDistributedQueue overseerJobQueue;
private final OverseerTaskQueue overseerCollectionQueue;
private final OverseerTaskQueue overseerConfigSetQueue;
private final DistributedMap overseerRunningMap;
private final DistributedMap overseerCompletedMap;
private final DistributedMap overseerFailureMap;
private final DistributedMap asyncIdsMap;
public final static String COLLECTION_PARAM_PREFIX = "collection.";
public final static String CONFIGNAME_PROP = "configName";
static class ContextKey {
private String collection;
private String coreNodeName;
public ContextKey(String collection, String coreNodeName) {
this.collection = collection;
this.coreNodeName = coreNodeName;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ ((collection == null) ? 0 : collection.hashCode());
result = prime * result
+ ((coreNodeName == null) ? 0 : coreNodeName.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
ContextKey other = (ContextKey) obj;
if (collection == null) {
if (other.collection != null) return false;
} else if (!collection.equals(other.collection)) return false;
if (coreNodeName == null) {
if (other.coreNodeName != null) return false;
} else if (!coreNodeName.equals(other.coreNodeName)) return false;
return true;
}
}
private final Map<ContextKey, ElectionContext> electionContexts = Collections.synchronizedMap(new HashMap<>());
private final SolrZkClient zkClient;
public final ZkStateReader zkStateReader;
private SolrCloudManager cloudManager;
private CloudSolrClient cloudSolrClient;
private final String zkServerAddress; // example: 127.0.0.1:54062/solr
private final int localHostPort; // example: 54065
private final String hostName; // example: 127.0.0.1
private final String nodeName; // example: 127.0.0.1:54065_solr
private String baseURL; // example: http://127.0.0.1:54065/solr
private final CloudConfig cloudConfig;
private final NodesSysPropsCacher sysPropsCacher;
private LeaderElector overseerElector;
private Map<String, ReplicateFromLeader> replicateFromLeaders = new ConcurrentHashMap<>();
private final Map<String, ZkCollectionTerms> collectionToTerms = new HashMap<>();
// for now, this can be null in tests, in which case recovery will be inactive, and other features
// may accept defaults or use mocks rather than pulling things from a CoreContainer
private CoreContainer cc;
protected volatile Overseer overseer;
private int leaderVoteWait;
private int leaderConflictResolveWait;
private boolean genericCoreNodeNames;
private int clientTimeout;
private volatile boolean isClosed;
private final ConcurrentHashMap<String, Throwable> replicasMetTragicEvent = new ConcurrentHashMap<>();
@Deprecated
// keeps track of replicas that have been asked to recover by leaders running on this node
private final Map<String, String> replicasInLeaderInitiatedRecovery = new HashMap<String, String>();
// This is an expert and unsupported development mode that does not create
// an Overseer or register a /live node. This let's you monitor the cluster
// and interact with zookeeper via the Solr admin UI on a node outside the cluster,
// and so one that will not be killed or stopped when testing. See developer cloud-scripts.
private boolean zkRunOnly = Boolean.getBoolean("zkRunOnly"); // expert
// keeps track of a list of objects that need to know a new ZooKeeper session was created after expiration occurred
// ref is held as a HashSet since we clone the set before notifying to avoid synchronizing too long
private HashSet<OnReconnect> reconnectListeners = new HashSet<OnReconnect>();
private class RegisterCoreAsync implements Callable<Object> {
CoreDescriptor descriptor;
boolean recoverReloadedCores;
boolean afterExpiration;
RegisterCoreAsync(CoreDescriptor descriptor, boolean recoverReloadedCores, boolean afterExpiration) {
this.descriptor = descriptor;
this.recoverReloadedCores = recoverReloadedCores;
this.afterExpiration = afterExpiration;
}
public Object call() throws Exception {
if (log.isInfoEnabled()) {
log.info("Registering core {} afterExpiration? {}", descriptor.getName(), afterExpiration);
}
register(descriptor.getName(), descriptor, recoverReloadedCores, afterExpiration, false);
return descriptor;
}
}
// notifies registered listeners after the ZK reconnect in the background
private static class OnReconnectNotifyAsync implements Callable<Object> {
private final OnReconnect listener;
OnReconnectNotifyAsync(OnReconnect listener) {
this.listener = listener;
}
@Override
public Object call() throws Exception {
listener.command();
return null;
}
}
/**
* @param cc Core container associated with this controller. cannot be null.
* @param zkServerAddress where to connect to the zk server
* @param zkClientConnectTimeout timeout in ms
* @param cloudConfig configuration for this controller. TODO: possibly redundant with CoreContainer
* @param descriptorsSupplier a supplier of the current core descriptors. used to know which cores to re-register on reconnect
*/
@SuppressWarnings({"unchecked"})
public ZkController(final CoreContainer cc, String zkServerAddress, int zkClientConnectTimeout, CloudConfig cloudConfig, final Supplier<List<CoreDescriptor>> descriptorsSupplier)
throws InterruptedException, TimeoutException, IOException {
if (cc == null) throw new IllegalArgumentException("CoreContainer cannot be null.");
this.cc = cc;
this.cloudConfig = cloudConfig;
this.genericCoreNodeNames = cloudConfig.getGenericCoreNodeNames();
// be forgiving and strip this off leading/trailing slashes
// this allows us to support users specifying hostContext="/" in
// solr.xml to indicate the root context, instead of hostContext=""
// which means the default of "solr"
String localHostContext = trimLeadingAndTrailingSlashes(cloudConfig.getSolrHostContext());
this.zkServerAddress = zkServerAddress;
this.localHostPort = cloudConfig.getSolrHostPort();
this.hostName = normalizeHostName(cloudConfig.getHost());
this.nodeName = generateNodeName(this.hostName, Integer.toString(this.localHostPort), localHostContext);
MDCLoggingContext.setNode(nodeName);
this.leaderVoteWait = cloudConfig.getLeaderVoteWait();
this.leaderConflictResolveWait = cloudConfig.getLeaderConflictResolveWait();
this.clientTimeout = cloudConfig.getZkClientTimeout();
DefaultConnectionStrategy strat = new DefaultConnectionStrategy();
String zkACLProviderClass = cloudConfig.getZkACLProviderClass();
ZkACLProvider zkACLProvider = null;
if (zkACLProviderClass != null && zkACLProviderClass.trim().length() > 0) {
zkACLProvider = cc.getResourceLoader().newInstance(zkACLProviderClass, ZkACLProvider.class);
} else {
zkACLProvider = new DefaultZkACLProvider();
}
String zkCredentialsProviderClass = cloudConfig.getZkCredentialsProviderClass();
if (zkCredentialsProviderClass != null && zkCredentialsProviderClass.trim().length() > 0) {
strat.setZkCredentialsToAddAutomatically(cc.getResourceLoader().newInstance(zkCredentialsProviderClass, ZkCredentialsProvider.class));
} else {
strat.setZkCredentialsToAddAutomatically(new DefaultZkCredentialsProvider());
}
addOnReconnectListener(getConfigDirListener());
zkClient = new SolrZkClient(zkServerAddress, clientTimeout, zkClientConnectTimeout, strat,
// on reconnect, reload cloud info
new OnReconnect() {
@Override
public void command() throws SessionExpiredException {
log.info("ZooKeeper session re-connected ... refreshing core states after session expiration.");
clearZkCollectionTerms();
try {
// recreate our watchers first so that they exist even on any problems below
zkStateReader.createClusterStateWatchersAndUpdate();
// this is troublesome - we dont want to kill anything the old
// leader accepted
// though I guess sync will likely get those updates back? But
// only if
// he is involved in the sync, and he certainly may not be
// ExecutorUtil.shutdownAndAwaitTermination(cc.getCmdDistribExecutor());
// we need to create all of our lost watches
// seems we dont need to do this again...
// Overseer.createClientNodes(zkClient, getNodeName());
// start the overseer first as following code may need it's processing
if (!zkRunOnly) {
ElectionContext context = new OverseerElectionContext(zkClient,
overseer, getNodeName());
ElectionContext prevContext = overseerElector.getContext();
if (prevContext != null) {
prevContext.cancelElection();
prevContext.close();
}
overseerElector.setup(context);
overseerElector.joinElection(context, true);
}
cc.cancelCoreRecoveries();
try {
registerAllCoresAsDown(descriptorsSupplier, false);
} catch (SessionExpiredException e) {
// zk has to reconnect and this will all be tried again
throw e;
} catch (Exception e) {
// this is really best effort - in case of races or failure cases where we now need to be the leader, if anything fails,
// just continue
log.warn("Exception while trying to register all cores as DOWN", e);
}
// we have to register as live first to pick up docs in the buffer
createEphemeralLiveNode();
List<CoreDescriptor> descriptors = descriptorsSupplier.get();
// re register all descriptors
ExecutorService executorService = (cc != null) ? cc.getCoreZkRegisterExecutorService() : null;
if (descriptors != null) {
for (CoreDescriptor descriptor : descriptors) {
// TODO: we need to think carefully about what happens when it
// was
// a leader that was expired - as well as what to do about
// leaders/overseers
// with connection loss
try {
// unload solrcores that have been 'failed over'
throwErrorIfReplicaReplaced(descriptor);
if (executorService != null) {
executorService.submit(new RegisterCoreAsync(descriptor, true, true));
} else {
register(descriptor.getName(), descriptor, true, true, false);
}
} catch (Exception e) {
SolrException.log(log, "Error registering SolrCore", e);
}
}
}
// notify any other objects that need to know when the session was re-connected
HashSet<OnReconnect> clonedListeners;
synchronized (reconnectListeners) {
clonedListeners = (HashSet<OnReconnect>)reconnectListeners.clone();
}
// the OnReconnect operation can be expensive per listener, so do that async in the background
for (OnReconnect listener : clonedListeners) {
try {
if (executorService != null) {
executorService.submit(new OnReconnectNotifyAsync(listener));
} else {
listener.command();
}
} catch (Exception exc) {
// not much we can do here other than warn in the log
log.warn("Error when notifying OnReconnect listener {} after session re-connected.", listener, exc);
}
}
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
throw new ZooKeeperException(
SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (SessionExpiredException e) {
throw e;
} catch (Exception e) {
SolrException.log(log, "", e);
throw new ZooKeeperException(
SolrException.ErrorCode.SERVER_ERROR, "", e);
}
}
}, new BeforeReconnect() {
@Override
public void command() {
try {
ZkController.this.overseer.close();
} catch (Exception e) {
log.error("Error trying to stop any Overseer threads", e);
}
closeOutstandingElections(descriptorsSupplier);
markAllAsNotLeader(descriptorsSupplier);
}
}, zkACLProvider, new ConnectionManager.IsClosed() {
@Override
public boolean isClosed() {
return cc.isShutDown();
}});
// Refuse to start if ZK has a non empty /clusterstate.json
checkNoOldClusterstate(zkClient);
this.overseerRunningMap = Overseer.getRunningMap(zkClient);
this.overseerCompletedMap = Overseer.getCompletedMap(zkClient);
this.overseerFailureMap = Overseer.getFailureMap(zkClient);
this.asyncIdsMap = Overseer.getAsyncIdsMap(zkClient);
zkStateReader = new ZkStateReader(zkClient, () -> {
if (cc != null) cc.securityNodeChanged();
});
init();
this.overseerJobQueue = overseer.getStateUpdateQueue();
this.overseerCollectionQueue = overseer.getCollectionQueue(zkClient);
this.overseerConfigSetQueue = overseer.getConfigSetQueue(zkClient);
this.sysPropsCacher = new NodesSysPropsCacher(getSolrCloudManager().getNodeStateProvider(),
getNodeName(), zkStateReader);
assert ObjectReleaseTracker.track(this);
}
/**
* <p>Verifies if /clusterstate.json exists in Zookeepeer, and if it does and is not empty, refuses to start and outputs
* a helpful message regarding collection migration.</p>
*
* <p>If /clusterstate.json exists and is empty, it is removed.</p>
*/
private void checkNoOldClusterstate(final SolrZkClient zkClient) throws InterruptedException {
try {
if (!zkClient.exists(ZkStateReader.UNSUPPORTED_CLUSTER_STATE, true)) {
return;
}
final byte[] data = zkClient.getData(ZkStateReader.UNSUPPORTED_CLUSTER_STATE, null, null, true);
if (Arrays.equals("{}".getBytes(StandardCharsets.UTF_8), data)) {
// Empty json. This log will only occur once.
log.warn("{} no longer supported starting with Solr 9. Found empty file on Zookeeper, deleting it.", ZkStateReader.UNSUPPORTED_CLUSTER_STATE);
zkClient.delete(ZkStateReader.UNSUPPORTED_CLUSTER_STATE, -1, true);
} else {
// /clusterstate.json not empty: refuse to start but do not automatically delete. A bit of a pain but user shouldn't
// have older collections at this stage anyway.
String message = ZkStateReader.UNSUPPORTED_CLUSTER_STATE + " no longer supported starting with Solr 9. "
+ "It is present and not empty. Cannot start Solr. Please first migrate collections to stateFormat=2 using an "
+ "older version of Solr or if you don't care about the data then delete the file from "
+ "Zookeeper using a command line tool, for example: bin/solr zk rm /clusterstate.json -z host:port";
log.error(message);
throw new SolrException(SolrException.ErrorCode.INVALID_STATE, message);
}
} catch (KeeperException e) {
// Convert checked exception to one acceptable by the caller (see also init() further down)
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
}
public int getLeaderVoteWait() {
return leaderVoteWait;
}
public int getLeaderConflictResolveWait() {
return leaderConflictResolveWait;
}
private void registerAllCoresAsDown(
final Supplier<List<CoreDescriptor>> registerOnReconnect, boolean updateLastPublished) throws SessionExpiredException {
List<CoreDescriptor> descriptors = registerOnReconnect.get();
if (isClosed) return;
if (descriptors != null) {
// before registering as live, make sure everyone is in a
// down state
publishNodeAsDown(getNodeName());
for (CoreDescriptor descriptor : descriptors) {
// if it looks like we are going to be the leader, we don't
// want to wait for the following stuff
CloudDescriptor cloudDesc = descriptor.getCloudDescriptor();
String collection = cloudDesc.getCollectionName();
String slice = cloudDesc.getShardId();
try {
int children = zkStateReader
.getZkClient()
.getChildren(
ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection
+ "/leader_elect/" + slice + "/election", null, true).size();
if (children == 0) {
log.debug("looks like we are going to be the leader for collection {} shard {}", collection, slice);
continue;
}
} catch (NoNodeException e) {
log.debug("looks like we are going to be the leader for collection {} shard {}", collection, slice);
continue;
} catch (InterruptedException e2) {
Thread.currentThread().interrupt();
} catch (SessionExpiredException e) {
// zk has to reconnect
throw e;
} catch (KeeperException e) {
log.warn("", e);
Thread.currentThread().interrupt();
}
final String coreZkNodeName = descriptor.getCloudDescriptor().getCoreNodeName();
try {
log.debug("calling waitForLeaderToSeeDownState for coreZkNodeName={} collection={} shard={}", new Object[]{coreZkNodeName, collection, slice});
waitForLeaderToSeeDownState(descriptor, coreZkNodeName);
} catch (Exception e) {
log.warn("There was a problem while making a best effort to ensure the leader has seen us as down, this is not unexpected as Zookeeper has just reconnected after a session expiration", e);
if (isClosed) {
return;
}
}
}
}
}
public NodesSysPropsCacher getSysPropsCacher() {
return sysPropsCacher;
}
private void closeOutstandingElections(final Supplier<List<CoreDescriptor>> registerOnReconnect) {
List<CoreDescriptor> descriptors = registerOnReconnect.get();
if (descriptors != null) {
for (CoreDescriptor descriptor : descriptors) {
closeExistingElectionContext(descriptor);
}
}
}
private ContextKey closeExistingElectionContext(CoreDescriptor cd) {
// look for old context - if we find it, cancel it
String collection = cd.getCloudDescriptor().getCollectionName();
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
ContextKey contextKey = new ContextKey(collection, coreNodeName);
ElectionContext prevContext = electionContexts.get(contextKey);
if (prevContext != null) {
prevContext.close();
electionContexts.remove(contextKey);
}
return contextKey;
}
private void markAllAsNotLeader(final Supplier<List<CoreDescriptor>> registerOnReconnect) {
List<CoreDescriptor> descriptors = registerOnReconnect.get();
if (descriptors != null) {
for (CoreDescriptor descriptor : descriptors) {
descriptor.getCloudDescriptor().setLeader(false);
descriptor.getCloudDescriptor().setHasRegistered(false);
}
}
}
public void preClose() {
this.isClosed = true;
try {
this.removeEphemeralLiveNode();
} catch (AlreadyClosedException | SessionExpiredException | KeeperException.ConnectionLossException e) {
} catch (Exception e) {
log.warn("Error removing live node. Continuing to close CoreContainer", e);
}
try {
if (getZkClient().getConnectionManager().isConnected()) {
log.info("Publish this node as DOWN...");
publishNodeAsDown(getNodeName());
}
} catch (Exception e) {
log.warn("Error publishing nodes as down. Continuing to close CoreContainer", e);
}
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("preCloseThreadPool"));
try {
synchronized (collectionToTerms) {
customThreadPool.submit(() -> collectionToTerms.values().parallelStream().forEach(ZkCollectionTerms::close));
}
customThreadPool.submit(() -> replicateFromLeaders.values().parallelStream().forEach(ReplicateFromLeader::stopReplication));
} finally {
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
}
}
/**
* Closes the underlying ZooKeeper client.
*/
public void close() {
if (!this.isClosed)
preClose();
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("closeThreadPool"));
customThreadPool.submit(() -> Collections.singleton(overseerElector.getContext()).parallelStream().forEach(IOUtils::closeQuietly));
customThreadPool.submit(() -> Collections.singleton(overseer).parallelStream().forEach(IOUtils::closeQuietly));
try {
customThreadPool.submit(() -> electionContexts.values().parallelStream().forEach(IOUtils::closeQuietly));
} finally {
sysPropsCacher.close();
customThreadPool.submit(() -> Collections.singleton(cloudSolrClient).parallelStream().forEach(IOUtils::closeQuietly));
customThreadPool.submit(() -> Collections.singleton(cloudManager).parallelStream().forEach(IOUtils::closeQuietly));
try {
try {
zkStateReader.close();
} catch (Exception e) {
log.error("Error closing zkStateReader", e);
}
} finally {
try {
zkClient.close();
} catch (Exception e) {
log.error("Error closing zkClient", e);
} finally {
// just in case the OverseerElectionContext managed to start another Overseer
IOUtils.closeQuietly(overseer);
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
}
}
}
assert ObjectReleaseTracker.release(this);
}
/**
* Best effort to give up the leadership of a shard in a core after hitting a tragic exception
* @param cd The current core descriptor
* @param tragicException The tragic exception from the {@code IndexWriter}
*/
public void giveupLeadership(CoreDescriptor cd, Throwable tragicException) {
assert tragicException != null;
assert cd != null;
DocCollection dc = getClusterState().getCollectionOrNull(cd.getCollectionName());
if (dc == null) return;
Slice shard = dc.getSlice(cd.getCloudDescriptor().getShardId());
if (shard == null) return;
// if this replica is not a leader, it will be put in recovery state by the leader
if (shard.getReplica(cd.getCloudDescriptor().getCoreNodeName()) != shard.getLeader()) return;
int numActiveReplicas = shard.getReplicas(
rep -> rep.getState() == Replica.State.ACTIVE
&& rep.getType() != Type.PULL
&& getClusterState().getLiveNodes().contains(rep.getNodeName())
).size();
// at least the leader still be able to search, we should give up leadership if other replicas can take over
if (numActiveReplicas >= 2) {
String key = cd.getCollectionName() + ":" + cd.getCloudDescriptor().getCoreNodeName();
//TODO better handling the case when delete replica was failed
if (replicasMetTragicEvent.putIfAbsent(key, tragicException) == null) {
log.warn("Leader {} met tragic exception, give up its leadership", key, tragicException);
try {
// by using Overseer to remove and add replica back, we can do the task in an async/robust manner
Map<String,Object> props = new HashMap<>();
props.put(Overseer.QUEUE_OPERATION, "deletereplica");
props.put(COLLECTION_PROP, cd.getCollectionName());
props.put(SHARD_ID_PROP, shard.getName());
props.put(REPLICA_PROP, cd.getCloudDescriptor().getCoreNodeName());
getOverseerCollectionQueue().offer(Utils.toJSON(new ZkNodeProps(props)));
props.clear();
props.put(Overseer.QUEUE_OPERATION, "addreplica");
props.put(COLLECTION_PROP, cd.getCollectionName());
props.put(SHARD_ID_PROP, shard.getName());
props.put(ZkStateReader.REPLICA_TYPE, cd.getCloudDescriptor().getReplicaType().name().toUpperCase(Locale.ROOT));
props.put(CoreAdminParams.NODE, getNodeName());
getOverseerCollectionQueue().offer(Utils.toJSON(new ZkNodeProps(props)));
} catch (Exception e) {
// Exceptions are not bubbled up. giveupLeadership is best effort, and is only called in case of some other
// unrecoverable error happened
log.error("Met exception on give up leadership for {}", key, e);
replicasMetTragicEvent.remove(key);
SolrZkClient.checkInterrupted(e);
}
}
}
}
/**
* Returns true if config file exists
*/
public boolean configFileExists(String collection, String fileName)
throws KeeperException, InterruptedException {
Stat stat = zkClient.exists(ZkConfigManager.CONFIGS_ZKNODE + "/" + collection + "/" + fileName, null, true);
return stat != null;
}
/**
* @return information about the cluster from ZooKeeper
*/
public ClusterState getClusterState() {
return zkStateReader.getClusterState();
}
public SolrCloudManager getSolrCloudManager() {
if (cloudManager != null) {
return cloudManager;
}
synchronized(this) {
if (cloudManager != null) {
return cloudManager;
}
cloudSolrClient = new CloudSolrClient.Builder(new ZkClientClusterStateProvider(zkStateReader)).withSocketTimeout(30000).withConnectionTimeout(15000)
.withHttpClient(cc.getUpdateShardHandler().getDefaultHttpClient())
.withConnectionTimeout(15000).withSocketTimeout(30000).build();
cloudManager = new SolrClientCloudManager(
new ZkDistributedQueueFactory(zkClient),
cloudSolrClient,
cc.getObjectCache());
cloudManager.getClusterStateProvider().connect();
}
return cloudManager;
}
/**
* Returns config file data (in bytes)
*/
public byte[] getConfigFileData(String zkConfigName, String fileName)
throws KeeperException, InterruptedException {
String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + zkConfigName + "/" + fileName;
byte[] bytes = zkClient.getData(zkPath, null, null, true);
if (bytes == null) {
log.error("Config file contains no data:{}", zkPath);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"Config file contains no data:" + zkPath);
}
return bytes;
}
// normalize host removing any url scheme.
// input can be null, host, or url_prefix://host
private String normalizeHostName(String host) {
if (host == null || host.length() == 0) {
String hostaddress;
try {
hostaddress = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
hostaddress = "127.0.0.1"; // cannot resolve system hostname, fall through
}
// Re-get the IP again for "127.0.0.1", the other case we trust the hosts
// file is right.
if ("127.0.0.1".equals(hostaddress)) {
Enumeration<NetworkInterface> netInterfaces = null;
try {
netInterfaces = NetworkInterface.getNetworkInterfaces();
while (netInterfaces.hasMoreElements()) {
NetworkInterface ni = netInterfaces.nextElement();
Enumeration<InetAddress> ips = ni.getInetAddresses();
while (ips.hasMoreElements()) {
InetAddress ip = ips.nextElement();
if (ip.isSiteLocalAddress()) {
hostaddress = ip.getHostAddress();
}
}
}
} catch (Exception e) {
SolrException.log(log,
"Error while looking for a better host name than 127.0.0.1", e);
}
}
host = hostaddress;
} else {
if (URLUtil.hasScheme(host)) {
host = URLUtil.removeScheme(host);
}
}
return host;
}
public String getHostName() {
return hostName;
}
public int getHostPort() {
return localHostPort;
}
public SolrZkClient getZkClient() {
return zkClient;
}
/**
* @return zookeeper server address
*/
public String getZkServerAddress() {
return zkServerAddress;
}
boolean isClosed() {
return isClosed;
}
/**
* Create the zknodes necessary for a cluster to operate
*
* @param zkClient a SolrZkClient
* @throws KeeperException if there is a Zookeeper error
* @throws InterruptedException on interrupt
*/
public static void createClusterZkNodes(SolrZkClient zkClient)
throws KeeperException, InterruptedException, IOException {
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
cmdExecutor.ensureExists(ZkStateReader.LIVE_NODES_ZKNODE, zkClient);
cmdExecutor.ensureExists(ZkStateReader.COLLECTIONS_ZKNODE, zkClient);
cmdExecutor.ensureExists(ZkStateReader.ALIASES, zkClient);
byte[] emptyJson = "{}".getBytes(StandardCharsets.UTF_8);
cmdExecutor.ensureExists(ZkStateReader.SOLR_SECURITY_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient);
bootstrapDefaultConfigSet(zkClient);
}
private static void bootstrapDefaultConfigSet(SolrZkClient zkClient) throws KeeperException, InterruptedException, IOException {
if (zkClient.exists("/configs/_default", true) == false) {
String configDirPath = getDefaultConfigDirPath();
if (configDirPath == null) {
log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset {} {}"
, "intended to be the default. Current 'solr.default.confdir' value:"
, System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
} else {
ZkMaintenanceUtils.upConfig(zkClient, Paths.get(configDirPath), ConfigSetsHandler.DEFAULT_CONFIGSET_NAME);
}
}
}
/**
* Gets the absolute filesystem path of the _default configset to bootstrap from.
* First tries the sysprop "solr.default.confdir". If not found, tries to find
* the _default dir relative to the sysprop "solr.install.dir".
* Returns null if not found anywhere.
*
* @lucene.internal
* @see SolrDispatchFilter#SOLR_DEFAULT_CONFDIR_ATTRIBUTE
*/
public static String getDefaultConfigDirPath() {
String configDirPath = null;
String serverSubPath = "solr" + File.separator +
"configsets" + File.separator + "_default" +
File.separator + "conf";
String subPath = File.separator + "server" + File.separator + serverSubPath;
if (System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE) != null && new File(System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)).exists()) {
configDirPath = new File(System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)).getAbsolutePath();
} else if (System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) != null &&
new File(System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) + subPath).exists()) {
configDirPath = new File(System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) + subPath).getAbsolutePath();
}
return configDirPath;
}
private void init() {
try {
createClusterZkNodes(zkClient);
zkStateReader.createClusterStateWatchersAndUpdate();
this.baseURL = zkStateReader.getBaseUrlForNodeName(this.nodeName);
checkForExistingEphemeralNode();
registerLiveNodesListener();
// start the overseer first as following code may need it's processing
if (!zkRunOnly) {
overseerElector = new LeaderElector(zkClient);
this.overseer = new Overseer((HttpShardHandler) cc.getShardHandlerFactory().getShardHandler(), cc.getUpdateShardHandler(),
CommonParams.CORES_HANDLER_PATH, zkStateReader, this, cloudConfig);
ElectionContext context = new OverseerElectionContext(zkClient,
overseer, getNodeName());
overseerElector.setup(context);
overseerElector.joinElection(context, false);
}
Stat stat = zkClient.exists(ZkStateReader.LIVE_NODES_ZKNODE, null, true);
if (stat != null && stat.getNumChildren() > 0) {
publishAndWaitForDownStates();
}
// Do this last to signal we're up.
createEphemeralLiveNode();
} catch (IOException e) {
log.error("", e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Can't create ZooKeeperController", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
} catch (KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
}
}
private void checkForExistingEphemeralNode() throws KeeperException, InterruptedException {
if (zkRunOnly) {
return;
}
String nodeName = getNodeName();
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
if (!zkClient.exists(nodePath, true)) {
return;
}
final CountDownLatch deletedLatch = new CountDownLatch(1);
Stat stat = zkClient.exists(nodePath, event -> {
if (Watcher.Event.EventType.None.equals(event.getType())) {
return;
}
if (Watcher.Event.EventType.NodeDeleted.equals(event.getType())) {
deletedLatch.countDown();
}
}, true);
if (stat == null) {
// znode suddenly disappeared but that's okay
return;
}
boolean deleted = deletedLatch.await(zkClient.getSolrZooKeeper().getSessionTimeout() * 2, TimeUnit.MILLISECONDS);
if (!deleted) {
throw new SolrException(ErrorCode.SERVER_ERROR, "A previous ephemeral live node still exists. " +
"Solr cannot continue. Please ensure that no other Solr process using the same port is running already.");
}
}
private void registerLiveNodesListener() {
// this listener is used for generating nodeLost events, so we check only if
// some nodes went missing compared to last state
LiveNodesListener listener = (oldNodes, newNodes) -> {
oldNodes.removeAll(newNodes);
if (oldNodes.isEmpty()) { // only added nodes
return false;
}
if (isClosed) {
return true;
}
// if this node is in the top three then attempt to create nodeLost message
int i = 0;
for (String n : newNodes) {
if (n.equals(getNodeName())) {
break;
}
if (i > 2) {
return false; // this node is not in the top three
}
i++;
}
return false;
};
zkStateReader.registerLiveNodesListener(listener);
}
public void publishAndWaitForDownStates() throws KeeperException,
InterruptedException {
publishAndWaitForDownStates(WAIT_DOWN_STATES_TIMEOUT_SECONDS);
}
public void publishAndWaitForDownStates(int timeoutSeconds) throws KeeperException,
InterruptedException {
publishNodeAsDown(getNodeName());
Set<String> collectionsWithLocalReplica = ConcurrentHashMap.newKeySet();
for (CoreDescriptor descriptor : cc.getCoreDescriptors()) {
collectionsWithLocalReplica.add(descriptor.getCloudDescriptor().getCollectionName());
}
CountDownLatch latch = new CountDownLatch(collectionsWithLocalReplica.size());
for (String collectionWithLocalReplica : collectionsWithLocalReplica) {
zkStateReader.registerDocCollectionWatcher(collectionWithLocalReplica, (collectionState) -> {
if (collectionState == null) return false;
boolean foundStates = true;
for (CoreDescriptor coreDescriptor : cc.getCoreDescriptors()) {
if (coreDescriptor.getCloudDescriptor().getCollectionName().equals(collectionWithLocalReplica)) {
Replica replica = collectionState.getReplica(coreDescriptor.getCloudDescriptor().getCoreNodeName());
if (replica == null || replica.getState() != Replica.State.DOWN) {
foundStates = false;
}
}
}
if (foundStates && collectionsWithLocalReplica.remove(collectionWithLocalReplica)) {
latch.countDown();
}
return foundStates;
});
}
boolean allPublishedDown = latch.await(timeoutSeconds, TimeUnit.SECONDS);
if (!allPublishedDown) {
log.warn("Timed out waiting to see all nodes published as DOWN in our cluster state.");
}
}
/**
* Validates if the chroot exists in zk (or if it is successfully created).
* Optionally, if create is set to true this method will create the path in
* case it doesn't exist
*
* @return true if the path exists or is created false if the path doesn't
* exist and 'create' = false
*/
public static boolean checkChrootPath(String zkHost, boolean create)
throws KeeperException, InterruptedException {
if (!SolrZkClient.containsChroot(zkHost)) {
return true;
}
log.trace("zkHost includes chroot");
String chrootPath = zkHost.substring(zkHost.indexOf("/"), zkHost.length());
SolrZkClient tmpClient = new SolrZkClient(zkHost.substring(0,
zkHost.indexOf("/")), 60000, 30000, null, null, null);
boolean exists = tmpClient.exists(chrootPath, true);
if (!exists && create) {
tmpClient.makePath(chrootPath, false, true);
exists = true;
}
tmpClient.close();
return exists;
}
public boolean isConnected() {
return zkClient.isConnected();
}
private void createEphemeralLiveNode() throws KeeperException,
InterruptedException {
if (zkRunOnly) {
return;
}
String nodeName = getNodeName();
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
log.info("Register node as live in ZooKeeper:{}", nodePath);
List<Op> ops = new ArrayList<>(2);
ops.add(Op.create(nodePath, null, zkClient.getZkACLProvider().getACLsToAdd(nodePath), CreateMode.EPHEMERAL));
zkClient.multi(ops, true);
}
public void removeEphemeralLiveNode() throws KeeperException, InterruptedException {
if (zkRunOnly) {
return;
}
String nodeName = getNodeName();
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
log.info("Remove node as live in ZooKeeper:{}", nodePath);
List<Op> ops = new ArrayList<>(2);
ops.add(Op.delete(nodePath, -1));
try {
zkClient.multi(ops, true);
} catch (NoNodeException e) {
}
}
public String getNodeName() {
return nodeName;
}
/**
* Returns true if the path exists
*/
public boolean pathExists(String path) throws KeeperException,
InterruptedException {
return zkClient.exists(path, true);
}
/**
* Register shard with ZooKeeper.
*
* @return the shardId for the SolrCore
*/
public String register(String coreName, final CoreDescriptor desc, boolean skipRecovery) throws Exception {
return register(coreName, desc, false, false, skipRecovery);
}
/**
* Register shard with ZooKeeper.
*
* @return the shardId for the SolrCore
*/
public String register(String coreName, final CoreDescriptor desc, boolean recoverReloadedCores,
boolean afterExpiration, boolean skipRecovery) throws Exception {
MDCLoggingContext.setCoreDescriptor(cc, desc);
try {
// pre register has published our down state
final String baseUrl = getBaseUrl();
final CloudDescriptor cloudDesc = desc.getCloudDescriptor();
final String collection = cloudDesc.getCollectionName();
final String shardId = cloudDesc.getShardId();
final String coreZkNodeName = cloudDesc.getCoreNodeName();
assert coreZkNodeName != null : "we should have a coreNodeName by now";
// check replica's existence in clusterstate first
try {
zkStateReader.waitForState(collection, 100, TimeUnit.MILLISECONDS,
(collectionState) -> getReplicaOrNull(collectionState, shardId, coreZkNodeName) != null);
} catch (TimeoutException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error registering SolrCore, timeout waiting for replica present in clusterstate");
}
Replica replica = getReplicaOrNull(zkStateReader.getClusterState().getCollectionOrNull(collection), shardId, coreZkNodeName);
if (replica == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error registering SolrCore, replica is removed from clusterstate");
}
if (replica.getType() != Type.PULL) {
getCollectionTerms(collection).register(cloudDesc.getShardId(), coreZkNodeName);
}
ZkShardTerms shardTerms = getShardTerms(collection, cloudDesc.getShardId());
log.debug("Register replica - core:{} address:{} collection:{} shard:{}",
coreName, baseUrl, collection, shardId);
try {
// If we're a preferred leader, insert ourselves at the head of the queue
boolean joinAtHead = replica.getBool(SliceMutator.PREFERRED_LEADER_PROP, false);
if (replica.getType() != Type.PULL) {
joinElection(desc, afterExpiration, joinAtHead);
} else if (replica.getType() == Type.PULL) {
if (joinAtHead) {
log.warn("Replica {} was designated as preferred leader but it's type is {}, It won't join election", coreZkNodeName, Type.PULL);
}
log.debug("Replica {} skipping election because it's type is {}", coreZkNodeName, Type.PULL);
startReplicationFromLeader(coreName, false);
}
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (KeeperException | IOException e) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
// in this case, we want to wait for the leader as long as the leader might
// wait for a vote, at least - but also long enough that a large cluster has
// time to get its act together
String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000);
String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
log.debug("We are {} and leader is {}", ourUrl, leaderUrl);
boolean isLeader = leaderUrl.equals(ourUrl);
assert !(isLeader && replica.getType() == Type.PULL) : "Pull replica became leader!";
try (SolrCore core = cc.getCore(desc.getName())) {
// recover from local transaction log and wait for it to complete before
// going active
// TODO: should this be moved to another thread? To recoveryStrat?
// TODO: should this actually be done earlier, before (or as part of)
// leader election perhaps?
if (core == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "SolrCore is no longer available to register");
}
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
boolean isTlogReplicaAndNotLeader = replica.getType() == Replica.Type.TLOG && !isLeader;
if (isTlogReplicaAndNotLeader) {
String commitVersion = ReplicateFromLeader.getCommitVersion(core);
if (commitVersion != null) {
ulog.copyOverOldUpdates(Long.parseLong(commitVersion));
}
}
// we will call register again after zk expiration and on reload
if (!afterExpiration && !core.isReloaded() && ulog != null && !isTlogReplicaAndNotLeader) {
// disable recovery in case shard is in construction state (for shard splits)
Slice slice = getClusterState().getCollection(collection).getSlice(shardId);
if (slice.getState() != Slice.State.CONSTRUCTION || !isLeader) {
Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler().getUpdateLog().recoverFromLog();
if (recoveryFuture != null) {
log.info("Replaying tlog for {} during startup... NOTE: This can take a while.", ourUrl);
recoveryFuture.get(); // NOTE: this could potentially block for
// minutes or more!
// TODO: public as recovering in the mean time?
// TODO: in the future we could do peersync in parallel with recoverFromLog
} else {
if (log.isDebugEnabled()) {
log.debug("No LogReplay needed for core={} baseURL={}", core.getName(), baseUrl);
}
}
}
}
boolean didRecovery
= checkRecovery(recoverReloadedCores, isLeader, skipRecovery, collection, coreZkNodeName, shardId, core, cc, afterExpiration);
if (!didRecovery) {
if (isTlogReplicaAndNotLeader) {
startReplicationFromLeader(coreName, true);
}
publish(desc, Replica.State.ACTIVE);
}
if (replica.getType() != Type.PULL) {
// the watcher is added to a set so multiple calls of this method will left only one watcher
shardTerms.addListener(new RecoveringCoreTermWatcher(core.getCoreDescriptor(), getCoreContainer()));
}
core.getCoreDescriptor().getCloudDescriptor().setHasRegistered(true);
} catch (Exception e) {
unregister(coreName, desc, false);
throw e;
}
// make sure we have an update cluster state right away
zkStateReader.forceUpdateCollection(collection);
// the watcher is added to a set so multiple calls of this method will left only one watcher
zkStateReader.registerDocCollectionWatcher(cloudDesc.getCollectionName(),
new UnloadCoreOnDeletedWatcher(coreZkNodeName, shardId, desc.getName()));
return shardId;
} finally {
MDCLoggingContext.clear();
}
}
private Replica getReplicaOrNull(DocCollection docCollection, String shard, String coreNodeName) {
if (docCollection == null) return null;
Slice slice = docCollection.getSlice(shard);
if (slice == null) return null;
Replica replica = slice.getReplica(coreNodeName);
if (replica == null) return null;
if (!getNodeName().equals(replica.getNodeName())) return null;
return replica;
}
public void startReplicationFromLeader(String coreName, boolean switchTransactionLog) throws InterruptedException {
log.info("{} starting background replication from leader", coreName);
ReplicateFromLeader replicateFromLeader = new ReplicateFromLeader(cc, coreName);
synchronized (replicateFromLeader) { // synchronize to prevent any stop before we finish the start
if (replicateFromLeaders.putIfAbsent(coreName, replicateFromLeader) == null) {
replicateFromLeader.startReplication(switchTransactionLog);
} else {
log.warn("A replicate from leader instance already exists for core {}", coreName);
}
}
}
public void stopReplicationFromLeader(String coreName) {
log.info("{} stopping background replication from leader", coreName);
ReplicateFromLeader replicateFromLeader = replicateFromLeaders.remove(coreName);
if (replicateFromLeader != null) {
synchronized (replicateFromLeader) {
replicateFromLeader.stopReplication();
}
}
}
// timeoutms is the timeout for the first call to get the leader - there is then
// a longer wait to make sure that leader matches our local state
private String getLeader(final CloudDescriptor cloudDesc, int timeoutms) {
String collection = cloudDesc.getCollectionName();
String shardId = cloudDesc.getShardId();
// rather than look in the cluster state file, we go straight to the zknodes
// here, because on cluster restart there could be stale leader info in the
// cluster state node that won't be updated for a moment
String leaderUrl;
try {
leaderUrl = getLeaderProps(collection, cloudDesc.getShardId(), timeoutms)
.getCoreUrl();
// now wait until our currently cloud state contains the latest leader
String clusterStateLeaderUrl = zkStateReader.getLeaderUrl(collection,
shardId, timeoutms * 2); // since we found it in zk, we are willing to
// wait a while to find it in state
int tries = 0;
final long msInSec = 1000L;
int maxTries = (int) Math.floor(leaderConflictResolveWait / msInSec);
while (!leaderUrl.equals(clusterStateLeaderUrl)) {
if (cc.isShutDown()) throw new AlreadyClosedException();
if (tries > maxTries) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"There is conflicting information about the leader of shard: "
+ cloudDesc.getShardId() + " our state says:"
+ clusterStateLeaderUrl + " but zookeeper says:" + leaderUrl);
}
tries++;
if (tries % 30 == 0) {
String warnMsg = String.format(Locale.ENGLISH, "Still seeing conflicting information about the leader "
+ "of shard %s for collection %s after %d seconds; our state says %s, but ZooKeeper says %s",
cloudDesc.getShardId(), collection, tries, clusterStateLeaderUrl, leaderUrl);
log.warn(warnMsg);
}
Thread.sleep(msInSec);
clusterStateLeaderUrl = zkStateReader.getLeaderUrl(collection, shardId,
timeoutms);
leaderUrl = getLeaderProps(collection, cloudDesc.getShardId(), timeoutms)
.getCoreUrl();
}
} catch (AlreadyClosedException e) {
throw e;
} catch (Exception e) {
log.error("Error getting leader from zk", e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Error getting leader from zk for shard " + shardId, e);
}
return leaderUrl;
}
/**
* Get leader props directly from zk nodes.
* @throws SessionExpiredException on zk session expiration.
*/
public ZkCoreNodeProps getLeaderProps(final String collection,
final String slice, int timeoutms) throws InterruptedException, SessionExpiredException {
return getLeaderProps(collection, slice, timeoutms, true);
}
/**
* Get leader props directly from zk nodes.
*
* @return leader props
* @throws SessionExpiredException on zk session expiration.
*/
public ZkCoreNodeProps getLeaderProps(final String collection,
final String slice, int timeoutms, boolean failImmediatelyOnExpiration) throws InterruptedException, SessionExpiredException {
int iterCount = timeoutms / 1000;
Exception exp = null;
while (iterCount-- > 0) {
try {
byte[] data = zkClient.getData(
ZkStateReader.getShardLeadersPath(collection, slice), null, null,
true);
ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(
ZkNodeProps.load(data));
return leaderProps;
} catch (InterruptedException e) {
throw e;
} catch (SessionExpiredException e) {
if (failImmediatelyOnExpiration) {
throw e;
}
exp = e;
Thread.sleep(1000);
} catch (Exception e) {
exp = e;
Thread.sleep(1000);
}
if (cc.isShutDown()) {
throw new AlreadyClosedException();
}
}
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Could not get leader props", exp);
}
private void joinElection(CoreDescriptor cd, boolean afterExpiration, boolean joinAtHead)
throws InterruptedException, KeeperException, IOException {
// look for old context - if we find it, cancel it
String collection = cd.getCloudDescriptor().getCollectionName();
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
ContextKey contextKey = new ContextKey(collection, coreNodeName);
ElectionContext prevContext = electionContexts.get(contextKey);
if (prevContext != null) {
prevContext.cancelElection();
}
String shardId = cd.getCloudDescriptor().getShardId();
Map<String, Object> props = new HashMap<>();
// we only put a subset of props into the leader node
props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
ZkNodeProps ourProps = new ZkNodeProps(props);
LeaderElector leaderElector = new LeaderElector(zkClient, contextKey, electionContexts);
ElectionContext context = new ShardLeaderElectionContext(leaderElector, shardId,
collection, coreNodeName, ourProps, this, cc);
leaderElector.setup(context);
electionContexts.put(contextKey, context);
leaderElector.joinElection(context, false, joinAtHead);
}
/**
* Returns whether or not a recovery was started
*/
private boolean checkRecovery(boolean recoverReloadedCores, final boolean isLeader, boolean skipRecovery,
final String collection, String coreZkNodeName, String shardId,
SolrCore core, CoreContainer cc, boolean afterExpiration) {
if (SKIP_AUTO_RECOVERY) {
log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
return false;
}
boolean doRecovery = true;
if (!isLeader) {
if (skipRecovery || (!afterExpiration && core.isReloaded() && !recoverReloadedCores)) {
doRecovery = false;
}
if (doRecovery) {
if (log.isInfoEnabled()) {
log.info("Core needs to recover:{}", core.getName());
}
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
return true;
}
ZkShardTerms zkShardTerms = getShardTerms(collection, shardId);
if (zkShardTerms.registered(coreZkNodeName) && !zkShardTerms.canBecomeLeader(coreZkNodeName)) {
if (log.isInfoEnabled()) {
log.info("Leader's term larger than core {}; starting recovery process", core.getName());
}
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
return true;
}
} else {
log.info("I am the leader, no recovery necessary");
}
return false;
}
public String getBaseUrl() {
return baseURL;
}
public void publish(final CoreDescriptor cd, final Replica.State state) throws Exception {
publish(cd, state, true, false);
}
/**
* Publish core state to overseer.
*/
public void publish(final CoreDescriptor cd, final Replica.State state, boolean updateLastState, boolean forcePublish) throws Exception {
if (!forcePublish) {
try (SolrCore core = cc.getCore(cd.getName())) {
if (core == null || core.isClosed()) {
return;
}
}
}
MDCLoggingContext.setCoreDescriptor(cc, cd);
try {
String collection = cd.getCloudDescriptor().getCollectionName();
log.debug("publishing state={}", state);
// System.out.println(Thread.currentThread().getStackTrace()[3]);
Integer numShards = cd.getCloudDescriptor().getNumShards();
if (numShards == null) { // XXX sys prop hack
log.debug("numShards not found on descriptor - reading it from system property");
numShards = Integer.getInteger(ZkStateReader.NUM_SHARDS_PROP);
}
assert collection != null && collection.length() > 0;
String shardId = cd.getCloudDescriptor().getShardId();
String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
Map<String,Object> props = new HashMap<>();
props.put(Overseer.QUEUE_OPERATION, "state");
props.put(ZkStateReader.STATE_PROP, state.toString());
props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
props.put(ZkStateReader.ROLES_PROP, cd.getCloudDescriptor().getRoles());
props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
props.put(ZkStateReader.SHARD_ID_PROP, cd.getCloudDescriptor().getShardId());
props.put(ZkStateReader.COLLECTION_PROP, collection);
props.put(ZkStateReader.REPLICA_TYPE, cd.getCloudDescriptor().getReplicaType().toString());
props.put(ZkStateReader.FORCE_SET_STATE_PROP, "false");
if (numShards != null) {
props.put(ZkStateReader.NUM_SHARDS_PROP, numShards.toString());
}
if (coreNodeName != null) {
props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
}
try (SolrCore core = cc.getCore(cd.getName())) {
if (core != null && state == Replica.State.ACTIVE) {
ensureRegisteredSearcher(core);
}
if (core != null && core.getDirectoryFactory().isSharedStorage()) {
if (core.getDirectoryFactory().isSharedStorage()) {
props.put(ZkStateReader.SHARED_STORAGE_PROP, "true");
props.put("dataDir", core.getDataDir());
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
if (ulog != null) {
props.put("ulogDir", ulog.getLogDir());
}
}
}
} catch (SolrCoreInitializationException ex) {
// The core had failed to initialize (in a previous request, not this one), hence nothing to do here.
if (log.isInfoEnabled()) {
log.info("The core '{}' had failed to initialize before.", cd.getName());
}
}
// pull replicas are excluded because their terms are not considered
if (state == Replica.State.RECOVERING && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
// state is used by client, state of replica can change from RECOVERING to DOWN without needed to finish recovery
// by calling this we will know that a replica actually finished recovery or not
getShardTerms(collection, shardId).startRecovering(coreNodeName);
}
if (state == Replica.State.ACTIVE && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
getShardTerms(collection, shardId).doneRecovering(coreNodeName);
}
ZkNodeProps m = new ZkNodeProps(props);
if (updateLastState) {
cd.getCloudDescriptor().setLastPublished(state);
}
overseerJobQueue.offer(Utils.toJSON(m));
} finally {
MDCLoggingContext.clear();
}
}
public ZkShardTerms getShardTerms(String collection, String shardId) {
return getCollectionTerms(collection).getShard(shardId);
}
private ZkCollectionTerms getCollectionTerms(String collection) {
synchronized (collectionToTerms) {
if (!collectionToTerms.containsKey(collection)) collectionToTerms.put(collection, new ZkCollectionTerms(collection, zkClient));
return collectionToTerms.get(collection);
}
}
public void clearZkCollectionTerms() {
synchronized (collectionToTerms) {
collectionToTerms.values().forEach(ZkCollectionTerms::close);
collectionToTerms.clear();
}
}
public void unregister(String coreName, CoreDescriptor cd) throws Exception {
unregister(coreName, cd, true);
}
public void unregister(String coreName, CoreDescriptor cd, boolean removeCoreFromZk) throws Exception {
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
final String collection = cd.getCloudDescriptor().getCollectionName();
getCollectionTerms(collection).remove(cd.getCloudDescriptor().getShardId(), cd);
replicasMetTragicEvent.remove(collection+":"+coreNodeName);
if (Strings.isNullOrEmpty(collection)) {
log.error("No collection was specified.");
assert false : "No collection was specified [" + collection + "]";
return;
}
final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collection);
Replica replica = (docCollection == null) ? null : docCollection.getReplica(coreNodeName);
if (replica == null || replica.getType() != Type.PULL) {
ElectionContext context = electionContexts.remove(new ContextKey(collection, coreNodeName));
if (context != null) {
context.cancelElection();
}
}
CloudDescriptor cloudDescriptor = cd.getCloudDescriptor();
if (removeCoreFromZk) {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
OverseerAction.DELETECORE.toLower(), ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.NODE_NAME_PROP, getNodeName(),
ZkStateReader.COLLECTION_PROP, cloudDescriptor.getCollectionName(),
ZkStateReader.BASE_URL_PROP, getBaseUrl(),
ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
overseerJobQueue.offer(Utils.toJSON(m));
}
}
public void createCollection(String collection) throws Exception {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
CollectionParams.CollectionAction.CREATE.toLower(), ZkStateReader.NODE_NAME_PROP, getNodeName(),
ZkStateReader.COLLECTION_PROP, collection);
overseerJobQueue.offer(Utils.toJSON(m));
}
public ZkStateReader getZkStateReader() {
return zkStateReader;
}
private void doGetShardIdAndNodeNameProcess(CoreDescriptor cd) {
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
if (coreNodeName != null) {
waitForShardId(cd);
} else {
// if no explicit coreNodeName, we want to match by base url and core name
waitForCoreNodeName(cd);
waitForShardId(cd);
}
}
private void waitForCoreNodeName(CoreDescriptor descriptor) {
int retryCount = 320;
log.debug("look for our core node name");
while (retryCount-- > 0) {
final DocCollection docCollection = zkStateReader.getClusterState()
.getCollectionOrNull(descriptor.getCloudDescriptor().getCollectionName());
if (docCollection != null && docCollection.getSlicesMap() != null) {
final Map<String, Slice> slicesMap = docCollection.getSlicesMap();
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
// TODO: for really large clusters, we could 'index' on this
String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String msgNodeName = getNodeName();
String msgCore = descriptor.getName();
if (msgNodeName.equals(nodeName) && core.equals(msgCore)) {
descriptor.getCloudDescriptor()
.setCoreNodeName(replica.getName());
getCoreContainer().getCoresLocator().persist(getCoreContainer(), descriptor);
return;
}
}
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
private void waitForShardId(CoreDescriptor cd) {
if (log.isDebugEnabled()) {
log.debug("waiting to find shard id in clusterstate for {}", cd.getName());
}
int retryCount = 320;
while (retryCount-- > 0) {
final String shardId = zkStateReader.getClusterState().getShardId(cd.getCollectionName(), getNodeName(), cd.getName());
if (shardId != null) {
cd.getCloudDescriptor().setShardId(shardId);
return;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
throw new SolrException(ErrorCode.SERVER_ERROR,
"Could not get shard id for core: " + cd.getName());
}
public String getCoreNodeName(CoreDescriptor descriptor) {
String coreNodeName = descriptor.getCloudDescriptor().getCoreNodeName();
if (coreNodeName == null && !genericCoreNodeNames) {
// it's the default
return getNodeName() + "_" + descriptor.getName();
}
return coreNodeName;
}
public void preRegister(CoreDescriptor cd, boolean publishState) {
String coreNodeName = getCoreNodeName(cd);
// before becoming available, make sure we are not live and active
// this also gets us our assigned shard id if it was not specified
try {
checkStateInZk(cd);
CloudDescriptor cloudDesc = cd.getCloudDescriptor();
// make sure the node name is set on the descriptor
if (cloudDesc.getCoreNodeName() == null) {
cloudDesc.setCoreNodeName(coreNodeName);
}
// publishState == false on startup
if (publishState || isPublishAsDownOnStartup(cloudDesc)) {
publish(cd, Replica.State.DOWN, false, true);
}
String collectionName = cd.getCloudDescriptor().getCollectionName();
DocCollection collection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
if (log.isDebugEnabled()) {
log.debug(collection == null ?
"Collection {} not visible yet, but flagging it so a watch is registered when it becomes visible" :
"Registering watch for collection {}",
collectionName);
}
} catch (KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (NotInClusterStateException e) {
// make the stack trace less verbose
throw e;
} catch (Exception e) {
log.error("", e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
doGetShardIdAndNodeNameProcess(cd);
}
/**
* On startup, the node already published all of its replicas as DOWN,
* we can skip publish the replica as down
* @return Should publish the replica as down on startup
*/
private boolean isPublishAsDownOnStartup(CloudDescriptor cloudDesc) {
Replica replica = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName())
.getSlice(cloudDesc.getShardId())
.getReplica(cloudDesc.getCoreNodeName());
return !replica.getNodeName().equals(getNodeName());
}
private void checkStateInZk(CoreDescriptor cd) throws InterruptedException, NotInClusterStateException {
CloudDescriptor cloudDesc = cd.getCloudDescriptor();
String nodeName = cloudDesc.getCoreNodeName();
if (nodeName == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "No coreNodeName for " + cd);
}
final String coreNodeName = nodeName;
if (cloudDesc.getShardId() == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "No shard id for " + cd);
}
AtomicReference<String> errorMessage = new AtomicReference<>();
AtomicReference<DocCollection> collectionState = new AtomicReference<>();
try {
zkStateReader.waitForState(cd.getCollectionName(), 10, TimeUnit.SECONDS, (c) -> {
collectionState.set(c);
if (c == null)
return false;
Slice slice = c.getSlice(cloudDesc.getShardId());
if (slice == null) {
errorMessage.set("Invalid shard: " + cloudDesc.getShardId());
return false;
}
Replica replica = slice.getReplica(coreNodeName);
if (replica == null) {
errorMessage.set("coreNodeName " + coreNodeName + " does not exist in shard " + cloudDesc.getShardId() +
", ignore the exception if the replica was deleted");
return false;
}
return true;
});
} catch (TimeoutException e) {
String error = errorMessage.get();
if (error == null)
error = "coreNodeName " + coreNodeName + " does not exist in shard " + cloudDesc.getShardId() +
", ignore the exception if the replica was deleted";
throw new NotInClusterStateException(ErrorCode.SERVER_ERROR, error);
}
}
private ZkCoreNodeProps waitForLeaderToSeeDownState(
CoreDescriptor descriptor, final String coreZkNodeName) throws SessionExpiredException {
// try not to wait too long here - if we are waiting too long, we should probably
// move along and join the election
CloudDescriptor cloudDesc = descriptor.getCloudDescriptor();
String collection = cloudDesc.getCollectionName();
String shard = cloudDesc.getShardId();
ZkCoreNodeProps leaderProps = null;
int retries = 2;
for (int i = 0; i < retries; i++) {
try {
if (isClosed) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"We have been closed");
}
// go straight to zk, not the cloud state - we want current info
leaderProps = getLeaderProps(collection, shard, 5000);
break;
} catch (SessionExpiredException e) {
throw e;
} catch (Exception e) {
log.info("Did not find the leader in Zookeeper", e);
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
if (i == retries - 1) {
throw new SolrException(ErrorCode.SERVER_ERROR, "There was a problem finding the leader in zk");
}
}
}
String leaderBaseUrl = leaderProps.getBaseUrl();
String leaderCoreName = leaderProps.getCoreName();
String myCoreNodeName = cloudDesc.getCoreNodeName();
String myCoreName = descriptor.getName();
String ourUrl = ZkCoreNodeProps.getCoreUrl(getBaseUrl(), myCoreName);
boolean isLeader = leaderProps.getCoreUrl().equals(ourUrl);
if (!isLeader && !SKIP_AUTO_RECOVERY) {
if (!getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) {
log.debug("Term of replica {} is already less than leader, so not waiting for leader to see down state."
, myCoreNodeName);
} else {
if (log.isInfoEnabled()) {
log.info("replica={} is making a best effort attempt to wait for leader={} to see it's DOWN state.", myCoreNodeName, leaderProps.getCoreUrl());
}
try (HttpSolrClient client = new Builder(leaderBaseUrl)
.withConnectionTimeout(8000) // short timeouts, we may be in a storm and this is best effort and maybe we should be the leader now
.withSocketTimeout(30000)
.build()) {
WaitForState prepCmd = new WaitForState();
prepCmd.setCoreName(leaderCoreName);
prepCmd.setNodeName(getNodeName());
prepCmd.setCoreNodeName(coreZkNodeName);
prepCmd.setState(Replica.State.DOWN);
// lets give it another chance, but without taking too long
retries = 3;
for (int i = 0; i < retries; i++) {
if (isClosed) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"We have been closed");
}
try {
client.request(prepCmd);
break;
} catch (Exception e) {
// if the core container is shutdown, don't wait
if (cc.isShutDown()) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Core container is shutdown.");
}
Throwable rootCause = SolrException.getRootCause(e);
if (rootCause instanceof IOException) {
// if there was a communication error talking to the leader, see if the leader is even alive
if (!zkStateReader.getClusterState().liveNodesContain(leaderProps.getNodeName())) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Node " + leaderProps.getNodeName() + " hosting leader for " +
shard + " in " + collection + " is not live!");
}
}
SolrException.log(log,
"There was a problem making a request to the leader", e);
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
if (i == retries - 1) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"There was a problem making a request to the leader");
}
}
}
} catch (IOException e) {
SolrException.log(log, "Error closing HttpSolrClient", e);
}
}
}
return leaderProps;
}
public static void linkConfSet(SolrZkClient zkClient, String collection, String confSetName) throws KeeperException, InterruptedException {
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
log.debug("Load collection config from:{}", path);
byte[] data;
try {
data = zkClient.getData(path, null, null, true);
} catch (NoNodeException e) {
// if there is no node, we will try and create it
// first try to make in case we are pre configuring
ZkNodeProps props = new ZkNodeProps(CONFIGNAME_PROP, confSetName);
try {
zkClient.makePath(path, Utils.toJSON(props),
CreateMode.PERSISTENT, null, true);
} catch (KeeperException e2) {
// it's okay if the node already exists
if (e2.code() != KeeperException.Code.NODEEXISTS) {
throw e;
}
// if we fail creating, setdata
// TODO: we should consider using version
zkClient.setData(path, Utils.toJSON(props), true);
}
return;
}
// we found existing data, let's update it
ZkNodeProps props = null;
if (data != null) {
props = ZkNodeProps.load(data);
Map<String, Object> newProps = new HashMap<>(props.getProperties());
newProps.put(CONFIGNAME_PROP, confSetName);
props = new ZkNodeProps(newProps);
} else {
props = new ZkNodeProps(CONFIGNAME_PROP, confSetName);
}
// TODO: we should consider using version
zkClient.setData(path, Utils.toJSON(props), true);
}
/**
* If in SolrCloud mode, upload config sets for each SolrCore in solr.xml.
*/
public static void bootstrapConf(SolrZkClient zkClient, CoreContainer cc) throws IOException {
ZkConfigManager configManager = new ZkConfigManager(zkClient);
//List<String> allCoreNames = cfg.getAllCoreNames();
List<CoreDescriptor> cds = cc.getCoresLocator().discover(cc);
if (log.isInfoEnabled()) {
log.info("bootstrapping config for {} cores into ZooKeeper using solr.xml from {}", cds.size(), cc.getSolrHome());
}
for (CoreDescriptor cd : cds) {
String coreName = cd.getName();
String confName = cd.getCollectionName();
if (StringUtils.isEmpty(confName))
confName = coreName;
Path udir = cd.getInstanceDir().resolve("conf");
log.info("Uploading directory {} with name {} for solrCore {}", udir, confName, coreName);
configManager.uploadConfigDir(udir, confName);
}
}
public ZkDistributedQueue getOverseerJobQueue() {
return overseerJobQueue;
}
public OverseerTaskQueue getOverseerCollectionQueue() {
return overseerCollectionQueue;
}
public OverseerTaskQueue getOverseerConfigSetQueue() {
return overseerConfigSetQueue;
}
public DistributedMap getOverseerRunningMap() {
return overseerRunningMap;
}
public DistributedMap getOverseerCompletedMap() {
return overseerCompletedMap;
}
public DistributedMap getOverseerFailureMap() {
return overseerFailureMap;
}
/**
* When an operation needs to be performed in an asynchronous mode, the asyncId needs
* to be claimed by calling this method to make sure it's not duplicate (hasn't been
* claimed by other request). If this method returns true, the asyncId in the parameter
* has been reserved for the operation, meaning that no other thread/operation can claim
* it. If for whatever reason, the operation is not scheduled, the asuncId needs to be
* cleared using {@link #clearAsyncId(String)}.
* If this method returns false, no reservation has been made, and this asyncId can't
* be used, since it's being used by another operation (currently or in the past)
* @param asyncId A string representing the asyncId of an operation. Can't be null.
* @return True if the reservation succeeds.
* False if this ID is already in use.
*/
public boolean claimAsyncId(String asyncId) throws KeeperException {
try {
return asyncIdsMap.putIfAbsent(asyncId, new byte[0]);
} catch (InterruptedException e) {
log.error("Could not claim asyncId={}", asyncId, e);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
/**
* Clears an asyncId previously claimed by calling {@link #claimAsyncId(String)}
* @param asyncId A string representing the asyncId of an operation. Can't be null.
* @return True if the asyncId existed and was cleared.
* False if the asyncId didn't exist before.
*/
public boolean clearAsyncId(String asyncId) throws KeeperException {
try {
return asyncIdsMap.remove(asyncId);
} catch (InterruptedException e) {
log.error("Could not release asyncId={}", asyncId, e);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
public int getClientTimeout() {
return clientTimeout;
}
public Overseer getOverseer() {
return overseer;
}
public LeaderElector getOverseerElector() {
return overseerElector;
}
/**
* Returns the nodeName that should be used based on the specified properties.
*
* @param hostName - must not be null or the empty string
* @param hostPort - must consist only of digits, must not be null or the empty string
* @param hostContext - should not begin or end with a slash (leading/trailin slashes will be ignored), must not be null, may be the empty string to denote the root context
* @lucene.experimental
* @see ZkStateReader#getBaseUrlForNodeName
*/
static String generateNodeName(final String hostName,
final String hostPort,
final String hostContext) {
try {
return hostName + ':' + hostPort + '_' +
URLEncoder.encode(trimLeadingAndTrailingSlashes(hostContext), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new Error("JVM Does not seem to support UTF-8", e);
}
}
/**
* Utility method for trimming and leading and/or trailing slashes from
* its input. May return the empty string. May return null if and only
* if the input is null.
*/
public static String trimLeadingAndTrailingSlashes(final String in) {
if (null == in) return in;
String out = in;
if (out.startsWith("/")) {
out = out.substring(1);
}
if (out.endsWith("/")) {
out = out.substring(0, out.length() - 1);
}
return out;
}
public void rejoinOverseerElection(String electionNode, boolean joinAtHead) {
try {
if (electionNode != null) {
// Check whether we came to this node by mistake
if ( overseerElector.getContext() != null && overseerElector.getContext().leaderSeqPath == null
&& !overseerElector.getContext().leaderSeqPath.endsWith(electionNode)) {
log.warn("Asked to rejoin with wrong election node : {}, current node is {}", electionNode, overseerElector.getContext().leaderSeqPath);
//however delete it . This is possible when the last attempt at deleting the election node failed.
if (electionNode.startsWith(getNodeName())) {
try {
zkClient.delete(Overseer.OVERSEER_ELECT + LeaderElector.ELECTION_NODE + "/" + electionNode, -1, true);
} catch (NoNodeException e) {
//no problem
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
log.warn("Old election node exists , could not be removed ", e);
}
}
} else { // We're in the right place, now attempt to rejoin
overseerElector.retryElection(new OverseerElectionContext(zkClient,
overseer, getNodeName()), joinAtHead);
return;
}
} else {
overseerElector.retryElection(overseerElector.getContext(), joinAtHead);
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to rejoin election", e);
}
}
public void rejoinShardLeaderElection(SolrParams params) {
String collectionName = params.get(COLLECTION_PROP);
String shardId = params.get(SHARD_ID_PROP);
String coreNodeName = params.get(CORE_NODE_NAME_PROP);
String coreName = params.get(CORE_NAME_PROP);
String electionNode = params.get(ELECTION_NODE_PROP);
String baseUrl = params.get(BASE_URL_PROP);
try {
MDCLoggingContext.setCoreDescriptor(cc, cc.getCoreDescriptor(coreName));
log.info("Rejoin the shard leader election.");
ContextKey contextKey = new ContextKey(collectionName, coreNodeName);
ElectionContext prevContext = electionContexts.get(contextKey);
if (prevContext != null) prevContext.cancelElection();
ZkNodeProps zkProps = new ZkNodeProps(BASE_URL_PROP, baseUrl, CORE_NAME_PROP, coreName, NODE_NAME_PROP, getNodeName(), CORE_NODE_NAME_PROP, coreNodeName);
LeaderElector elect = ((ShardLeaderElectionContextBase) prevContext).getLeaderElector();
ShardLeaderElectionContext context = new ShardLeaderElectionContext(elect, shardId, collectionName,
coreNodeName, zkProps, this, getCoreContainer());
context.leaderSeqPath = context.electionPath + LeaderElector.ELECTION_NODE + "/" + electionNode;
elect.setup(context);
electionContexts.put(contextKey, context);
elect.retryElection(context, params.getBool(REJOIN_AT_HEAD_PROP, false));
try (SolrCore core = cc.getCore(coreName)) {
Replica.Type replicaType = core.getCoreDescriptor().getCloudDescriptor().getReplicaType();
if (replicaType == Type.TLOG) {
String leaderUrl = getLeader(core.getCoreDescriptor().getCloudDescriptor(), cloudConfig.getLeaderVoteWait());
String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
if (!leaderUrl.equals(ourUrl)) {
// restart the replication thread to ensure the replication is running in each new replica
// especially if previous role is "leader" (i.e., no replication thread)
stopReplicationFromLeader(coreName);
startReplicationFromLeader(coreName, false);
}
}
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to rejoin election", e);
} finally {
MDCLoggingContext.clear();
}
}
public void checkOverseerDesignate() {
try {
byte[] data = zkClient.getData(ZkStateReader.ROLES, null, new Stat(), true);
if (data == null) return;
@SuppressWarnings({"rawtypes"})
Map roles = (Map) Utils.fromJSON(data);
if (roles == null) return;
@SuppressWarnings({"rawtypes"})
List nodeList = (List) roles.get("overseer");
if (nodeList == null) return;
if (nodeList.contains(getNodeName())) {
ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.ADDROLE.toString().toLowerCase(Locale.ROOT),
"node", getNodeName(),
"role", "overseer");
log.info("Going to add role {} ", props);
getOverseerCollectionQueue().offer(Utils.toJSON(props));
}
} catch (NoNodeException nne) {
return;
} catch (Exception e) {
log.warn("could not read the overseer designate ", e);
}
}
public CoreContainer getCoreContainer() {
return cc;
}
public void throwErrorIfReplicaReplaced(CoreDescriptor desc) {
ClusterState clusterState = getZkStateReader().getClusterState();
if (clusterState != null) {
DocCollection collection = clusterState.getCollectionOrNull(desc
.getCloudDescriptor().getCollectionName());
if (collection != null) {
CloudUtil.checkSharedFSFailoverReplaced(cc, desc);
}
}
}
/**
* Add a listener to be notified once there is a new session created after a ZooKeeper session expiration occurs;
* in most cases, listeners will be components that have watchers that need to be re-created.
*/
public void addOnReconnectListener(OnReconnect listener) {
if (listener != null) {
synchronized (reconnectListeners) {
reconnectListeners.add(listener);
log.debug("Added new OnReconnect listener {}", listener);
}
}
}
/**
* Removed a previously registered OnReconnect listener, such as when a core is removed or reloaded.
*/
public void removeOnReconnectListener(OnReconnect listener) {
if (listener != null) {
boolean wasRemoved;
synchronized (reconnectListeners) {
wasRemoved = reconnectListeners.remove(listener);
}
if (wasRemoved) {
log.debug("Removed OnReconnect listener {}", listener);
} else {
log.warn("Was asked to remove OnReconnect listener {}, but remove operation " +
"did not find it in the list of registered listeners."
, listener);
}
}
}
@SuppressWarnings({"unchecked"})
Set<OnReconnect> getCurrentOnReconnectListeners() {
HashSet<OnReconnect> clonedListeners;
synchronized (reconnectListeners) {
clonedListeners = (HashSet<OnReconnect>)reconnectListeners.clone();
}
return clonedListeners;
}
/**
* Persists a config file to ZooKeeper using optimistic concurrency.
*
* @return true on success
*/
public static int persistConfigResourceToZooKeeper(ZkSolrResourceLoader zkLoader, int znodeVersion,
String resourceName, byte[] content,
boolean createIfNotExists) {
int latestVersion = znodeVersion;
final ZkController zkController = zkLoader.getZkController();
final SolrZkClient zkClient = zkController.getZkClient();
final String resourceLocation = zkLoader.getConfigSetZkPath() + "/" + resourceName;
String errMsg = "Failed to persist resource at {0} - old {1}";
try {
try {
Stat stat = zkClient.setData(resourceLocation, content, znodeVersion, true);
latestVersion = stat.getVersion();// if the set succeeded , it should have incremented the version by one always
log.info("Persisted config data to node {} ", resourceLocation);
touchConfDir(zkLoader);
} catch (NoNodeException e) {
if (createIfNotExists) {
try {
zkClient.create(resourceLocation, content, CreateMode.PERSISTENT, true);
latestVersion = 0;//just created so version must be zero
touchConfDir(zkLoader);
} catch (KeeperException.NodeExistsException nee) {
try {
Stat stat = zkClient.exists(resourceLocation, null, true);
if (log.isDebugEnabled()) {
log.debug("failed to set data version in zk is {} and expected version is {} ", stat.getVersion(), znodeVersion);
}
} catch (Exception e1) {
log.warn("could not get stat");
}
if (log.isInfoEnabled()) {
log.info(StrUtils.formatString(errMsg, resourceLocation, znodeVersion));
}
throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry.");
}
}
}
} catch (KeeperException.BadVersionException bve) {
int v = -1;
try {
Stat stat = zkClient.exists(resourceLocation, null, true);
v = stat.getVersion();
} catch (Exception e) {
log.error("Exception during ZooKeeper node checking ", e);
}
if (log.isInfoEnabled()) {
log.info(StrUtils.formatString("%s zkVersion= %d %s %d", errMsg, resourceLocation, znodeVersion));
}
throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry.");
} catch (ResourceModifiedInZkException e) {
throw e;
} catch (Exception e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt(); // Restore the interrupted status
}
final String msg = "Error persisting resource at " + resourceLocation;
log.error(msg, e);
throw new SolrException(ErrorCode.SERVER_ERROR, msg, e);
}
return latestVersion;
}
public static void touchConfDir(ZkSolrResourceLoader zkLoader) {
SolrZkClient zkClient = zkLoader.getZkController().getZkClient();
try {
zkClient.setData(zkLoader.getConfigSetZkPath(), new byte[]{0}, true);
} catch (Exception e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt(); // Restore the interrupted status
}
final String msg = "Error 'touching' conf location " + zkLoader.getConfigSetZkPath();
log.error(msg, e);
throw new SolrException(ErrorCode.SERVER_ERROR, msg, e);
}
}
public static class ResourceModifiedInZkException extends SolrException {
public ResourceModifiedInZkException(ErrorCode code, String msg) {
super(code, msg);
}
}
private void unregisterConfListener(String confDir, Runnable listener) {
synchronized (confDirectoryListeners) {
final Set<Runnable> listeners = confDirectoryListeners.get(confDir);
if (listeners == null) {
log.warn("{} has no more registered listeners, but a live one attempted to unregister!", confDir);
return;
}
if (listeners.remove(listener)) {
log.debug("removed listener for config directory [{}]", confDir);
}
if (listeners.isEmpty()) {
// no more listeners for this confDir, remove it from the map
log.debug("No more listeners for config directory [{}]", confDir);
confDirectoryListeners.remove(confDir);
}
}
}
/**
* This will give a callback to the listener whenever a child is modified in the
* conf directory. It is the responsibility of the listener to check if the individual
* item of interest has been modified. When the last core which was interested in
* this conf directory is gone the listeners will be removed automatically.
*/
public void registerConfListenerForCore(final String confDir, SolrCore core, final Runnable listener) {
if (listener == null) {
throw new NullPointerException("listener cannot be null");
}
synchronized (confDirectoryListeners) {
final Set<Runnable> confDirListeners = getConfDirListeners(confDir);
confDirListeners.add(listener);
core.addCloseHook(new CloseHook() {
@Override
public void preClose(SolrCore core) {
unregisterConfListener(confDir, listener);
}
@Override
public void postClose(SolrCore core) {
}
});
}
}
// this method is called in a protected confDirListeners block
private Set<Runnable> getConfDirListeners(final String confDir) {
assert Thread.holdsLock(confDirectoryListeners) : "confDirListeners lock not held by thread";
Set<Runnable> confDirListeners = confDirectoryListeners.get(confDir);
if (confDirListeners == null) {
log.debug("watch zkdir {}" , confDir);
confDirListeners = new HashSet<>();
confDirectoryListeners.put(confDir, confDirListeners);
setConfWatcher(confDir, new WatcherImpl(confDir), null);
}
return confDirListeners;
}
private final Map<String, Set<Runnable>> confDirectoryListeners = new HashMap<>();
private class WatcherImpl implements Watcher {
private final String zkDir;
private WatcherImpl(String dir) {
this.zkDir = dir;
}
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (Event.EventType.None.equals(event.getType())) {
return;
}
Stat stat = null;
try {
stat = zkClient.exists(zkDir, null, true);
} catch (KeeperException e) {
//ignore , it is not a big deal
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
boolean resetWatcher = false;
try {
resetWatcher = fireEventListeners(zkDir);
} finally {
if (Event.EventType.None.equals(event.getType())) {
log.debug("A node got unwatched for {}", zkDir);
} else {
if (resetWatcher) setConfWatcher(zkDir, this, stat);
else log.debug("A node got unwatched for {}", zkDir);
}
}
}
}
private boolean fireEventListeners(String zkDir) {
if (isClosed || cc.isShutDown()) {
return false;
}
synchronized (confDirectoryListeners) {
// if this is not among directories to be watched then don't set the watcher anymore
if (!confDirectoryListeners.containsKey(zkDir)) {
log.debug("Watcher on {} is removed ", zkDir);
return false;
}
final Set<Runnable> listeners = confDirectoryListeners.get(zkDir);
if (listeners != null && !listeners.isEmpty()) {
final Set<Runnable> listenersCopy = new HashSet<>(listeners);
// run these in a separate thread because this can be long running
new Thread(() -> {
log.debug("Running listeners for {}", zkDir);
for (final Runnable listener : listenersCopy) {
try {
listener.run();
} catch (Exception e) {
log.warn("listener throws error", e);
}
}
}).start();
}
}
return true;
}
private void setConfWatcher(String zkDir, Watcher watcher, Stat stat) {
try {
Stat newStat = zkClient.exists(zkDir, watcher, true);
if (stat != null && newStat.getVersion() > stat.getVersion()) {
//a race condition where a we missed an event fired
//so fire the event listeners
fireEventListeners(zkDir);
}
} catch (KeeperException e) {
log.error("failed to set watcher for conf dir {} ", zkDir);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("failed to set watcher for conf dir {} ", zkDir);
}
}
public OnReconnect getConfigDirListener() {
return () -> {
synchronized (confDirectoryListeners) {
for (String s : confDirectoryListeners.keySet()) {
setConfWatcher(s, new WatcherImpl(s), null);
fireEventListeners(s);
}
}
};
}
/** @lucene.internal */
class UnloadCoreOnDeletedWatcher implements DocCollectionWatcher {
String coreNodeName;
String shard;
String coreName;
public UnloadCoreOnDeletedWatcher(String coreNodeName, String shard, String coreName) {
this.coreNodeName = coreNodeName;
this.shard = shard;
this.coreName = coreName;
}
@Override
// synchronized due to SOLR-11535
public synchronized boolean onStateChanged(DocCollection collectionState) {
if (getCoreContainer().getCoreDescriptor(coreName) == null) return true;
boolean replicaRemoved = getReplicaOrNull(collectionState, shard, coreNodeName) == null;
if (replicaRemoved) {
try {
log.info("Replica {} removed from clusterstate, remove it.", coreName);
getCoreContainer().unload(coreName, true, true, true);
} catch (SolrException e) {
if (!e.getMessage().contains("Cannot unload non-existent core")) {
// no need to log if the core was already unloaded
log.warn("Failed to unregister core:{}", coreName, e);
}
} catch (Exception e) {
log.warn("Failed to unregister core:{}", coreName, e);
}
}
return replicaRemoved;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UnloadCoreOnDeletedWatcher that = (UnloadCoreOnDeletedWatcher) o;
return Objects.equals(coreNodeName, that.coreNodeName) &&
Objects.equals(shard, that.shard) &&
Objects.equals(coreName, that.coreName);
}
@Override
public int hashCode() {
return Objects.hash(coreNodeName, shard, coreName);
}
}
/**
* Thrown during pre register process if the replica is not present in clusterstate
*/
public static class NotInClusterStateException extends SolrException {
public NotInClusterStateException(ErrorCode code, String msg) {
super(code, msg);
}
}
public boolean checkIfCoreNodeNameAlreadyExists(CoreDescriptor dcore) {
DocCollection collection = zkStateReader.getClusterState().getCollectionOrNull(dcore.getCollectionName());
if (collection != null) {
Collection<Slice> slices = collection.getSlices();
for (Slice slice : slices) {
Collection<Replica> replicas = slice.getReplicas();
Replica r = slice.getReplica(dcore.getCloudDescriptor().getCoreNodeName());
if (r != null) {
return true;
}
}
}
return false;
}
/**
* Best effort to set DOWN state for all replicas on node.
*
* @param nodeName to operate on
*/
public void publishNodeAsDown(String nodeName) {
log.info("Publish node={} as DOWN", nodeName);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DOWNNODE.toLower(),
ZkStateReader.NODE_NAME_PROP, nodeName);
try {
overseer.getStateUpdateQueue().offer(Utils.toJSON(m));
} catch (AlreadyClosedException e) {
log.info("Not publishing node as DOWN because a resource required to do so is already closed.");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.debug("Publish node as down was interrupted.");
} catch (KeeperException e) {
log.warn("Could not publish node as down: ", e);
}
}
/**
* Ensures that a searcher is registered for the given core and if not, waits until one is registered
*/
private static void ensureRegisteredSearcher(SolrCore core) throws InterruptedException {
if (!core.getSolrConfig().useColdSearcher) {
RefCounted<SolrIndexSearcher> registeredSearcher = core.getRegisteredSearcher();
if (registeredSearcher != null) {
if (log.isDebugEnabled()) {
log.debug("Found a registered searcher: {} for core: {}", registeredSearcher.get(), core);
}
registeredSearcher.decref();
} else {
@SuppressWarnings({"rawtypes"})
Future[] waitSearcher = new Future[1];
if (log.isInfoEnabled()) {
log.info("No registered searcher found for core: {}, waiting until a searcher is registered before publishing as active", core.getName());
}
final RTimer timer = new RTimer();
RefCounted<SolrIndexSearcher> searcher = null;
try {
searcher = core.getSearcher(false, true, waitSearcher, true);
boolean success = true;
if (waitSearcher[0] != null) {
if (log.isDebugEnabled()) {
log.debug("Waiting for first searcher of core {}, id: {} to be registered", core.getName(), core);
}
try {
waitSearcher[0].get();
} catch (ExecutionException e) {
log.warn("Wait for a searcher to be registered for core {}, id: {} failed due to: {}", core.getName(), core, e, e);
success = false;
}
}
if (success) {
if (searcher == null) {
// should never happen
if (log.isDebugEnabled()) {
log.debug("Did not find a searcher even after the future callback for core: {}, id: {}!!!", core.getName(), core);
}
} else {
if (log.isInfoEnabled()) {
log.info("Found a registered searcher: {}, took: {} ms for core: {}, id: {}", searcher.get(), timer.getTime(), core.getName(), core);
}
}
}
} finally {
if (searcher != null) {
searcher.decref();
}
}
}
RefCounted<SolrIndexSearcher> newestSearcher = core.getNewestSearcher(false);
if (newestSearcher != null) {
if (log.isDebugEnabled()) {
log.debug("Found newest searcher: {} for core: {}, id: {}", newestSearcher.get(), core.getName(), core);
}
newestSearcher.decref();
}
}
}
}
| 1 | 37,868 | While we're here, this line (and a few others) should be `customThreadPool.submit(() -> IOUtils.closeQuietly(overseer);` I have no idea why we're creating a collection and a stream for a single object. | apache-lucene-solr | java |
@@ -335,6 +335,18 @@ func ValidateACMEChallengeSolverDNS01(p *cmacme.ACMEChallengeSolverDNS01, fldPat
if len(p.AzureDNS.TenantID) == 0 {
el = append(el, field.Required(fldPath.Child("azureDNS", "tenantID"), ""))
}
+ if len(p.AzureDNS.ManagedIdentityClientID) > 0 {
+ el = append(el, field.Forbidden(fldPath.Child("azureDNS", "managedIdentityClientID"), "managed identity can not be used at the same time as clientID, tenantID or clientSecret"))
+ }
+ if len(p.AzureDNS.ManagedIdentityResourceID) > 0 {
+ el = append(el, field.Forbidden(fldPath.Child("azureDNS", "managedIdentityResourceID"), "managed identity can not be used at the same time as clientID, tenantID or clientSecret"))
+ }
+ } else {
+ // using managed identity
+ if len(p.AzureDNS.ManagedIdentityClientID) > 0 && len(p.AzureDNS.ManagedIdentityResourceID) > 0 {
+ el = append(el, field.Forbidden(fldPath.Child("azureDNS"), "managedIdentityClientID and managedIdentityResourceID connot both be specified"))
+ }
+
}
// SubscriptionID must always be defined
if len(p.AzureDNS.SubscriptionID) == 0 { | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"crypto/x509"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/jetstack/cert-manager/pkg/internal/api/validation"
cmacme "github.com/jetstack/cert-manager/pkg/internal/apis/acme"
"github.com/jetstack/cert-manager/pkg/internal/apis/certmanager"
"github.com/jetstack/cert-manager/pkg/internal/apis/certmanager/validation/util"
cmmeta "github.com/jetstack/cert-manager/pkg/internal/apis/meta"
)
// Validation functions for cert-manager Issuer types.
func ValidateIssuer(a *admissionv1.AdmissionRequest, obj runtime.Object) (field.ErrorList, validation.WarningList) {
iss := obj.(*certmanager.Issuer)
allErrs, warnings := ValidateIssuerSpec(&iss.Spec, field.NewPath("spec"))
warnings = append(warnings, validateAPIVersion(a.RequestKind)...)
return allErrs, warnings
}
func ValidateUpdateIssuer(a *admissionv1.AdmissionRequest, oldObj, obj runtime.Object) (field.ErrorList, validation.WarningList) {
iss := obj.(*certmanager.Issuer)
allErrs, warnings := ValidateIssuerSpec(&iss.Spec, field.NewPath("spec"))
// Admission request should never be nil
warnings = append(warnings, validateAPIVersion(a.RequestKind)...)
return allErrs, warnings
}
func ValidateIssuerSpec(iss *certmanager.IssuerSpec, fldPath *field.Path) (field.ErrorList, validation.WarningList) {
return ValidateIssuerConfig(&iss.IssuerConfig, fldPath)
}
func ValidateIssuerConfig(iss *certmanager.IssuerConfig, fldPath *field.Path) (field.ErrorList, validation.WarningList) {
var warnings validation.WarningList
numConfigs := 0
el := field.ErrorList{}
if iss.ACME != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("acme"), "may not specify more than one issuer type"))
} else {
numConfigs++
e, w := ValidateACMEIssuerConfig(iss.ACME, fldPath.Child("acme"))
el, warnings = append(el, e...), append(warnings, w...)
}
}
if iss.CA != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("ca"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateCAIssuerConfig(iss.CA, fldPath.Child("ca"))...)
}
}
if iss.SelfSigned != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("selfSigned"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateSelfSignedIssuerConfig(iss.SelfSigned, fldPath.Child("selfSigned"))...)
}
}
if iss.Vault != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("vault"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateVaultIssuerConfig(iss.Vault, fldPath.Child("vault"))...)
}
}
if iss.Venafi != nil {
if numConfigs > 0 {
el = append(el, field.Forbidden(fldPath.Child("venafi"), "may not specify more than one issuer type"))
} else {
numConfigs++
el = append(el, ValidateVenafiIssuerConfig(iss.Venafi, fldPath.Child("venafi"))...)
}
}
if numConfigs == 0 {
el = append(el, field.Required(fldPath, "at least one issuer must be configured"))
}
return el, warnings
}
func ValidateACMEIssuerConfig(iss *cmacme.ACMEIssuer, fldPath *field.Path) (field.ErrorList, validation.WarningList) {
var warnings validation.WarningList
el := field.ErrorList{}
if len(iss.PrivateKey.Name) == 0 {
el = append(el, field.Required(fldPath.Child("privateKeySecretRef", "name"), "private key secret name is a required field"))
}
if len(iss.Server) == 0 {
el = append(el, field.Required(fldPath.Child("server"), "acme server URL is a required field"))
}
if eab := iss.ExternalAccountBinding; eab != nil {
eabFldPath := fldPath.Child("externalAccountBinding")
if len(eab.KeyID) == 0 {
el = append(el, field.Required(eabFldPath.Child("keyID"), "the keyID field is required when using externalAccountBinding"))
}
el = append(el, ValidateSecretKeySelector(&eab.Key, eabFldPath.Child("keySecretRef"))...)
if len(eab.KeyAlgorithm) != 0 {
warnings = append(warnings, deprecatedACMEEABKeyAlgorithmField)
}
}
for i, sol := range iss.Solvers {
el = append(el, ValidateACMEIssuerChallengeSolverConfig(&sol, fldPath.Child("solvers").Index(i))...)
}
return el, warnings
}
func ValidateACMEIssuerChallengeSolverConfig(sol *cmacme.ACMEChallengeSolver, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
numProviders := 0
if sol.HTTP01 != nil {
numProviders++
el = append(el, ValidateACMEIssuerChallengeSolverHTTP01Config(sol.HTTP01, fldPath.Child("http01"))...)
}
if sol.DNS01 != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath, "may not specify more than one solver type in a single solver"))
} else {
numProviders++
el = append(el, ValidateACMEChallengeSolverDNS01(sol.DNS01, fldPath.Child("dns01"))...)
}
}
if numProviders == 0 {
el = append(el, field.Required(fldPath, "no solver type configured"))
}
return el
}
func ValidateACMEIssuerChallengeSolverHTTP01Config(http01 *cmacme.ACMEChallengeSolverHTTP01, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
numDefined := 0
if http01.Ingress != nil {
numDefined++
el = append(el, ValidateACMEIssuerChallengeSolverHTTP01IngressConfig(http01.Ingress, fldPath.Child("ingress"))...)
}
if http01.GatewayHTTPRoute != nil {
numDefined++
el = append(el, ValidateACMEIssuerChallengeSolverHTTP01GatewayConfig(http01.GatewayHTTPRoute, fldPath.Child("gateway"))...)
}
if numDefined == 0 {
el = append(el, field.Required(fldPath, "no HTTP01 solver type configured"))
}
if numDefined > 1 {
el = append(el, field.Required(fldPath, "only 1 HTTP01 solver type may be configured"))
}
return el
}
func ValidateACMEIssuerChallengeSolverHTTP01IngressConfig(ingress *cmacme.ACMEChallengeSolverHTTP01Ingress, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if ingress.Class != nil && len(ingress.Name) > 0 {
el = append(el, field.Forbidden(fldPath, "only one of 'name' or 'class' should be specified"))
}
switch ingress.ServiceType {
case "", corev1.ServiceTypeClusterIP, corev1.ServiceTypeNodePort:
default:
el = append(el, field.Invalid(fldPath.Child("serviceType"), ingress.ServiceType, `must be empty, "ClusterIP" or "NodePort"`))
}
return el
}
func ValidateACMEIssuerChallengeSolverHTTP01GatewayConfig(gateway *cmacme.ACMEChallengeSolverHTTP01GatewayHTTPRoute, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if len(gateway.Labels) == 0 {
el = append(el, field.Required(fldPath.Child("labels"), `labels must be set`))
}
switch gateway.ServiceType {
case "", corev1.ServiceTypeClusterIP, corev1.ServiceTypeNodePort:
default:
el = append(el, field.Invalid(fldPath.Child("serviceType"), gateway.ServiceType, `must be empty, "ClusterIP" or "NodePort"`))
}
return el
}
func ValidateCAIssuerConfig(iss *certmanager.CAIssuer, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if len(iss.SecretName) == 0 {
el = append(el, field.Required(fldPath.Child("secretName"), ""))
}
for i, ocspURL := range iss.OCSPServers {
if ocspURL == "" {
el = append(el, field.Invalid(fldPath.Child("ocspServer").Index(i), ocspURL, "must be a valid URL, e.g., http://ocsp.int-x3.letsencrypt.org"))
}
}
return el
}
func ValidateSelfSignedIssuerConfig(iss *certmanager.SelfSignedIssuer, fldPath *field.Path) field.ErrorList {
return nil
}
func ValidateVaultIssuerConfig(iss *certmanager.VaultIssuer, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if len(iss.Server) == 0 {
el = append(el, field.Required(fldPath.Child("server"), ""))
}
if len(iss.Path) == 0 {
el = append(el, field.Required(fldPath.Child("path"), ""))
}
// check if caBundle is valid
certs := iss.CABundle
if len(certs) > 0 {
caCertPool := x509.NewCertPool()
ok := caCertPool.AppendCertsFromPEM(certs)
if !ok {
el = append(el, field.Invalid(fldPath.Child("caBundle"), "", "Specified CA bundle is invalid"))
}
}
return el
// TODO: add validation for Vault authentication types
}
func ValidateVenafiTPP(tpp *certmanager.VenafiTPP, fldPath *field.Path) (el field.ErrorList) {
if tpp.URL == "" {
el = append(el, field.Required(fldPath.Child("url"), ""))
}
return el
}
func ValidateVenafiCloud(c *certmanager.VenafiCloud, fldPath *field.Path) (el field.ErrorList) {
return el
}
func ValidateVenafiIssuerConfig(iss *certmanager.VenafiIssuer, fldPath *field.Path) (el field.ErrorList) {
if iss.Zone == "" {
el = append(el, field.Required(fldPath.Child("zone"), ""))
}
unionCount := 0
if iss.TPP != nil {
unionCount++
el = append(el, ValidateVenafiTPP(iss.TPP, fldPath.Child("tpp"))...)
}
if iss.Cloud != nil {
unionCount++
el = append(el, ValidateVenafiCloud(iss.Cloud, fldPath.Child("cloud"))...)
}
if unionCount == 0 {
el = append(el, field.Required(fldPath, "please supply one of: tpp, cloud"))
}
if unionCount > 1 {
el = append(el, field.Forbidden(fldPath, "please supply one of: tpp, cloud"))
}
return el
}
// This list must be kept in sync with pkg/issuer/acme/dns/rfc2136/rfc2136.go
var supportedTSIGAlgorithms = []string{
"HMACMD5",
"HMACSHA1",
"HMACSHA256",
"HMACSHA512",
}
func ValidateACMEChallengeSolverDNS01(p *cmacme.ACMEChallengeSolverDNS01, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
// allow empty values for now, until we have a MutatingWebhook to apply
// default values to fields.
if len(p.CNAMEStrategy) > 0 {
switch p.CNAMEStrategy {
case cmacme.NoneStrategy:
case cmacme.FollowStrategy:
default:
el = append(el, field.Invalid(fldPath.Child("cnameStrategy"), p.CNAMEStrategy, fmt.Sprintf("must be one of %q or %q", cmacme.NoneStrategy, cmacme.FollowStrategy)))
}
}
numProviders := 0
if p.Akamai != nil {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.Akamai.AccessToken, fldPath.Child("akamai", "accessToken"))...)
el = append(el, ValidateSecretKeySelector(&p.Akamai.ClientSecret, fldPath.Child("akamai", "clientSecret"))...)
el = append(el, ValidateSecretKeySelector(&p.Akamai.ClientToken, fldPath.Child("akamai", "clientToken"))...)
if len(p.Akamai.ServiceConsumerDomain) == 0 {
el = append(el, field.Required(fldPath.Child("akamai", "serviceConsumerDomain"), ""))
}
}
if p.AzureDNS != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("azureDNS"), "may not specify more than one provider type"))
} else {
numProviders++
// if ClientID or ClientSecret or TenantID are defined then all of ClientID, ClientSecret and tenantID must be defined
// We check things separately because
if len(p.AzureDNS.ClientID) > 0 || len(p.AzureDNS.TenantID) > 0 || p.AzureDNS.ClientSecret != nil {
if len(p.AzureDNS.ClientID) == 0 {
el = append(el, field.Required(fldPath.Child("azureDNS", "clientID"), ""))
}
if p.AzureDNS.ClientSecret == nil {
el = append(el, field.Required(fldPath.Child("azureDNS", "clientSecretSecretRef"), ""))
} else {
el = append(el, ValidateSecretKeySelector(p.AzureDNS.ClientSecret, fldPath.Child("azureDNS", "clientSecretSecretRef"))...)
}
if len(p.AzureDNS.TenantID) == 0 {
el = append(el, field.Required(fldPath.Child("azureDNS", "tenantID"), ""))
}
}
// SubscriptionID must always be defined
if len(p.AzureDNS.SubscriptionID) == 0 {
el = append(el, field.Required(fldPath.Child("azureDNS", "subscriptionID"), ""))
}
// ResourceGroupName must always be defined
if len(p.AzureDNS.ResourceGroupName) == 0 {
el = append(el, field.Required(fldPath.Child("azureDNS", "resourceGroupName"), ""))
}
switch p.AzureDNS.Environment {
case "", cmacme.AzurePublicCloud, cmacme.AzureChinaCloud, cmacme.AzureGermanCloud, cmacme.AzureUSGovernmentCloud:
default:
el = append(el, field.Invalid(fldPath.Child("azureDNS", "environment"), p.AzureDNS.Environment,
fmt.Sprintf("must be either empty or one of %s, %s, %s or %s", cmacme.AzurePublicCloud, cmacme.AzureChinaCloud, cmacme.AzureGermanCloud, cmacme.AzureUSGovernmentCloud)))
}
}
}
if p.CloudDNS != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("cloudDNS"), "may not specify more than one provider type"))
} else {
numProviders++
// if service account is not nil we validate the entire secret key
// selector
if p.CloudDNS.ServiceAccount != nil {
el = append(el, ValidateSecretKeySelector(p.CloudDNS.ServiceAccount, fldPath.Child("cloudDNS", "serviceAccountSecretRef"))...)
}
if len(p.CloudDNS.Project) == 0 {
el = append(el, field.Required(fldPath.Child("cloudDNS", "project"), ""))
}
}
}
if p.Cloudflare != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("cloudflare"), "may not specify more than one provider type"))
} else {
numProviders++
if p.Cloudflare.APIKey != nil {
el = append(el, ValidateSecretKeySelector(p.Cloudflare.APIKey, fldPath.Child("cloudflare", "apiKeySecretRef"))...)
}
if p.Cloudflare.APIToken != nil {
el = append(el, ValidateSecretKeySelector(p.Cloudflare.APIToken, fldPath.Child("cloudflare", "apiTokenSecretRef"))...)
}
if p.Cloudflare.APIKey != nil && p.Cloudflare.APIToken != nil {
el = append(el, field.Forbidden(fldPath.Child("cloudflare"), "apiKeySecretRef and apiTokenSecretRef cannot both be specified"))
}
if p.Cloudflare.APIKey == nil && p.Cloudflare.APIToken == nil {
el = append(el, field.Required(fldPath.Child("cloudflare"), "apiKeySecretRef or apiTokenSecretRef is required"))
}
if len(p.Cloudflare.Email) == 0 && p.Cloudflare.APIKey != nil {
el = append(el, field.Required(fldPath.Child("cloudflare", "email"), ""))
}
}
}
if p.Route53 != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("route53"), "may not specify more than one provider type"))
} else {
numProviders++
// region is the only required field for route53 as ambient credentials can be used instead
if len(p.Route53.Region) == 0 {
el = append(el, field.Required(fldPath.Child("route53", "region"), ""))
}
}
}
if p.AcmeDNS != nil {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.AcmeDNS.AccountSecret, fldPath.Child("acmeDNS", "accountSecretRef"))...)
if len(p.AcmeDNS.Host) == 0 {
el = append(el, field.Required(fldPath.Child("acmeDNS", "host"), ""))
}
}
if p.DigitalOcean != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("digitalocean"), "may not specify more than one provider type"))
} else {
numProviders++
el = append(el, ValidateSecretKeySelector(&p.DigitalOcean.Token, fldPath.Child("digitalocean", "tokenSecretRef"))...)
}
}
if p.RFC2136 != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("rfc2136"), "may not specify more than one provider type"))
} else {
numProviders++
// Nameserver is the only required field for RFC2136
if len(p.RFC2136.Nameserver) == 0 {
el = append(el, field.Required(fldPath.Child("rfc2136", "nameserver"), ""))
} else {
if _, err := util.ValidNameserver(p.RFC2136.Nameserver); err != nil {
el = append(el, field.Invalid(fldPath.Child("rfc2136", "nameserver"), p.RFC2136.Nameserver, "nameserver must be set in the form host:port where host is an IPv4 address, an enclosed IPv6 address or a hostname and port is an optional port number."))
}
}
if len(p.RFC2136.TSIGAlgorithm) > 0 {
present := false
for _, b := range supportedTSIGAlgorithms {
if b == strings.ToUpper(p.RFC2136.TSIGAlgorithm) {
present = true
}
}
if !present {
el = append(el, field.NotSupported(fldPath.Child("rfc2136", "tsigAlgorithm"), "", supportedTSIGAlgorithms))
}
}
if len(p.RFC2136.TSIGKeyName) > 0 {
el = append(el, ValidateSecretKeySelector(&p.RFC2136.TSIGSecret, fldPath.Child("rfc2136", "tsigSecretSecretRef"))...)
}
if len(ValidateSecretKeySelector(&p.RFC2136.TSIGSecret, fldPath.Child("rfc2136", "tsigSecretSecretRef"))) == 0 {
if len(p.RFC2136.TSIGKeyName) <= 0 {
el = append(el, field.Required(fldPath.Child("rfc2136", "tsigKeyName"), ""))
}
}
}
}
if p.Webhook != nil {
if numProviders > 0 {
el = append(el, field.Forbidden(fldPath.Child("webhook"), "may not specify more than one provider type"))
} else {
numProviders++
if len(p.Webhook.SolverName) == 0 {
el = append(el, field.Required(fldPath.Child("webhook", "solverName"), "solver name must be specified"))
}
}
}
if numProviders == 0 {
el = append(el, field.Required(fldPath, "no DNS01 provider configured"))
}
return el
}
func ValidateSecretKeySelector(sks *cmmeta.SecretKeySelector, fldPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if sks.Name == "" {
el = append(el, field.Required(fldPath.Child("name"), "secret name is required"))
}
if sks.Key == "" {
el = append(el, field.Required(fldPath.Child("key"), "secret key is required"))
}
return el
}
| 1 | 29,054 | Should these restrictions also be reflected in the API docs? | jetstack-cert-manager | go |
@@ -209,6 +209,13 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
+ // restore original request before invoking error handler chain (issue #3717)
+ origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request)
+ r.Method = origReq.Method
+ r.RemoteAddr = origReq.RemoteAddr
+ r.RequestURI = origReq.RequestURI
+ cloneURL(origReq.URL, r.URL)
+
// prepare the error log
logger := errLog
if s.Logs != nil { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/lucas-clemente/quic-go/http3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Server describes an HTTP server.
type Server struct {
// Socket addresses to which to bind listeners. Accepts
// [network addresses](/docs/conventions#network-addresses)
// that may include port ranges. Listener addresses must
// be unique; they cannot be repeated across all defined
// servers.
Listen []string `json:"listen,omitempty"`
// A list of listener wrapper modules, which can modify the behavior
// of the base listener. They are applied in the given order.
ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
// How long to allow a read from a client's upload. Setting this
// to a short, non-zero value can mitigate slowloris attacks, but
// may also affect legitimately slow clients.
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
// ReadHeaderTimeout is like ReadTimeout but for request headers.
ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
// WriteTimeout is how long to allow a write to a client. Note
// that setting this to a small value when serving large files
// may negatively affect legitimately slow clients.
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
// IdleTimeout is the maximum time to wait for the next request
// when keep-alives are enabled. If zero, ReadTimeout is used.
// If both are zero, there is no timeout.
IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
// MaxHeaderBytes is the maximum size to parse from a client's
// HTTP request headers.
MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
// Routes describes how this server will handle requests.
// Routes are executed sequentially. First a route's matchers
// are evaluated, then its grouping. If it matches and has
// not been mutually-excluded by its grouping, then its
// handlers are executed sequentially. The sequence of invoked
// handlers comprises a compiled middleware chain that flows
// from each matching route and its handlers to the next.
Routes RouteList `json:"routes,omitempty"`
// Errors is how this server will handle errors returned from any
// of the handlers in the primary routes. If the primary handler
// chain returns an error, the error along with its recommended
// status code are bubbled back up to the HTTP server which
// executes a separate error route, specified using this property.
// The error routes work exactly like the normal routes.
Errors *HTTPErrorConfig `json:"errors,omitempty"`
// How to handle TLS connections. At least one policy is
// required to enable HTTPS on this server if automatic
// HTTPS is disabled or does not apply.
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
// AutoHTTPS configures or disables automatic HTTPS within this server.
// HTTPS is enabled automatically and by default when qualifying names
// are present in a Host matcher and/or when the server is listening
// only on the HTTPS port.
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
// If true, will require that a request's Host header match
// the value of the ServerName sent by the client's TLS
// ClientHello; often a necessary safeguard when using TLS
// client authentication.
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
// Enables access logging and configures how access logs are handled
// in this server. To minimally enable access logs, simply set this
// to a non-null, empty struct.
Logs *ServerLogConfig `json:"logs,omitempty"`
// Enable experimental HTTP/3 support. Note that HTTP/3 is not a
// finished standard and has extremely limited client support.
// This field is not subject to compatibility promises.
ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"`
// Enables H2C ("Cleartext HTTP/2" or "H2 over TCP") support,
// which will serve HTTP/2 over plaintext TCP connections if
// a client support it. Because this is not implemented by the
// Go standard library, using H2C is incompatible with most
// of the other options for this server. Do not enable this
// only to achieve maximum client compatibility. In practice,
// very few clients implement H2C, and even fewer require it.
// This setting applies only to unencrypted HTTP listeners.
// ⚠️ Experimental feature; subject to change or removal.
AllowH2C bool `json:"allow_h2c,omitempty"`
name string
primaryHandlerChain Handler
errorHandlerChain Handler
listenerWrappers []caddy.ListenerWrapper
tlsApp *caddytls.TLS
logger *zap.Logger
accessLogger *zap.Logger
errorLogger *zap.Logger
h3server *http3.Server
}
// ServeHTTP is the entry point for all HTTP requests.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "Caddy")
if s.h3server != nil {
err := s.h3server.SetQuicHeaders(w.Header())
if err != nil {
s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
}
}
repl := caddy.NewReplacer()
r = PrepareRequest(r, repl, w, s)
// encode the request for logging purposes before
// it enters any handler chain; this is necessary
// to capture the original request in case it gets
// modified during handling
loggableReq := zap.Object("request", LoggableHTTPRequest{r})
errLog := s.errorLogger.With(loggableReq)
var duration time.Duration
if s.shouldLogRequest(r) {
wrec := NewResponseRecorder(w, nil, nil)
w = wrec
// capture the original version of the request
accLog := s.accessLogger.With(loggableReq)
defer func() {
repl.Set("http.response.status", wrec.Status())
repl.Set("http.response.size", wrec.Size())
repl.Set("http.response.duration", duration)
logger := accLog
if s.Logs != nil {
logger = s.Logs.wrapLogger(logger, r.Host)
}
log := logger.Info
if wrec.Status() >= 400 {
log = logger.Error
}
log("handled request",
zap.String("common_log", repl.ReplaceAll(commonLogFormat, commonLogEmptyValue)),
zap.Duration("duration", duration),
zap.Int("size", wrec.Size()),
zap.Int("status", wrec.Status()),
zap.Object("resp_headers", LoggableHTTPHeader(wrec.Header())),
)
}()
}
start := time.Now()
// guarantee ACME HTTP challenges; handle them
// separately from any user-defined handlers
if s.tlsApp.HandleHTTPChallenge(w, r) {
duration = time.Since(start)
return
}
// execute the primary handler chain
err := s.primaryHandlerChain.ServeHTTP(w, r)
duration = time.Since(start)
// if no errors, we're done!
if err == nil {
return
}
// prepare the error log
logger := errLog
if s.Logs != nil {
logger = s.Logs.wrapLogger(logger, r.Host)
}
logger = logger.With(zap.Duration("duration", duration))
// get the values that will be used to log the error
errStatus, errMsg, errFields := errLogValues(err)
// add HTTP error information to request context
r = s.Errors.WithError(r, err)
if s.Errors != nil && len(s.Errors.Routes) > 0 {
// execute user-defined error handling route
err2 := s.errorHandlerChain.ServeHTTP(w, r)
if err2 == nil {
// user's error route handled the error response
// successfully, so now just log the error
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
}
} else {
// well... this is awkward
errFields = append([]zapcore.Field{
zap.String("error", err2.Error()),
zap.Namespace("first_error"),
zap.String("msg", errMsg),
}, errFields...)
logger.Error("error handling handler error", errFields...)
}
} else {
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
}
w.WriteHeader(errStatus)
}
}
// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
// in s.enforcementHandler which performs crucial security checks, etc.
func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
return s.enforcementHandler(w, r, stack)
})
}
// enforcementHandler is an implicit middleware which performs
// standard checks before executing the HTTP middleware chain.
func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
// enforce strict host matching, which ensures that the SNI
// value (if any), matches the Host header; essential for
// servers that rely on TLS ClientAuth sharing a listener
// with servers that do not; if not enforced, client could
// bypass by sending benign SNI then restricted Host header
if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
hostname, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostname = r.Host // OK; probably lacked port
}
if !strings.EqualFold(r.TLS.ServerName, hostname) {
err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
r.TLS.ServerName, hostname)
r.Close = true
return Error(http.StatusForbidden, err)
}
}
return next.ServeHTTP(w, r)
}
// listenersUseAnyPortOtherThan returns true if there are any
// listeners in s that use a port which is not otherPort.
func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
for _, lnAddr := range s.Listen {
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
return true
}
}
return false
}
// hasListenerAddress returns true if s has a listener
// at the given address fullAddr. Currently, fullAddr
// must represent exactly one socket address (port
// ranges are not supported)
func (s *Server) hasListenerAddress(fullAddr string) bool {
laddrs, err := caddy.ParseNetworkAddress(fullAddr)
if err != nil {
return false
}
if laddrs.PortRangeSize() != 1 {
return false // TODO: support port ranges
}
for _, lnAddr := range s.Listen {
thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if thisAddrs.Network != laddrs.Network {
continue
}
// Apparently, Linux requires all bound ports to be distinct
// *regardless of host interface* even if the addresses are
// in fact different; binding "192.168.0.1:9000" and then
// ":9000" will fail for ":9000" because "address is already
// in use" even though it's not, and the same bindings work
// fine on macOS. I also found on Linux that listening on
// "[::]:9000" would fail with a similar error, except with
// the address "0.0.0.0:9000", as if deliberately ignoring
// that I specified the IPv6 interface explicitly. This seems
// to be a major bug in the Linux network stack and I don't
// know why it hasn't been fixed yet, so for now we have to
// special-case ourselves around Linux like a doting parent.
// The second issue seems very similar to a discussion here:
// https://github.com/nodejs/node/issues/9390
//
// This is very easy to reproduce by creating an HTTP server
// that listens to both addresses or just one with a host
// interface; or for a more confusing reproduction, try
// listening on "127.0.0.1:80" and ":443" and you'll see
// the error, if you take away the GOOS condition below.
//
// So, an address is equivalent if the port is in the port
// range, and if not on Linux, the host is the same... sigh.
if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
(laddrs.StartPort <= thisAddrs.EndPort) &&
(laddrs.StartPort >= thisAddrs.StartPort) {
return true
}
}
return false
}
func (s *Server) hasTLSClientAuth() bool {
for _, cp := range s.TLSConnPolicies {
if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
return true
}
}
return false
}
// HTTPErrorConfig determines how to handle errors
// from the HTTP handlers.
type HTTPErrorConfig struct {
// The routes to evaluate after the primary handler
// chain returns an error. In an error route, extra
// placeholders are available:
//
// Placeholder | Description
// ------------|---------------
// `{http.error.status_code}` | The recommended HTTP status code
// `{http.error.status_text}` | The status text associated with the recommended status code
// `{http.error.message}` | The error message
// `{http.error.trace}` | The origin of the error
// `{http.error.id}` | An identifier for this occurrence of the error
Routes RouteList `json:"routes,omitempty"`
}
// WithError makes a shallow copy of r to add the error to its
// context, and sets placeholders on the request's replacer
// related to err. It returns the modified request which has
// the error information in its context and replacer. It
// overwrites any existing error values that are stored.
func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
// add the raw error value to the request context
// so it can be accessed by error handlers
c := context.WithValue(r.Context(), ErrorCtxKey, err)
r = r.WithContext(c)
// add error values to the replacer
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
repl.Set("http.error", err)
if handlerErr, ok := err.(HandlerError); ok {
repl.Set("http.error.status_code", handlerErr.StatusCode)
repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
repl.Set("http.error.trace", handlerErr.Trace)
repl.Set("http.error.id", handlerErr.ID)
}
return r
}
// shouldLogRequest returns true if this request should be logged.
func (s *Server) shouldLogRequest(r *http.Request) bool {
if s.accessLogger == nil || s.Logs == nil {
// logging is disabled
return false
}
for _, dh := range s.Logs.SkipHosts {
// logging for this particular host is disabled
if r.Host == dh {
return false
}
}
if _, ok := s.Logs.LoggerNames[r.Host]; ok {
// this host is mapped to a particular logger name
return true
}
if s.Logs.SkipUnmappedHosts {
// this host is not mapped and thus must not be logged
return false
}
return true
}
// ServerLogConfig describes a server's logging configuration. If
// enabled without customization, all requests to this server are
// logged to the default logger; logger destinations may be
// customized per-request-host.
type ServerLogConfig struct {
// The default logger name for all logs emitted by this server for
// hostnames that are not in the LoggerNames (logger_names) map.
DefaultLoggerName string `json:"default_logger_name,omitempty"`
// LoggerNames maps request hostnames to a custom logger name.
// For example, a mapping of "example.com" to "example" would
// cause access logs from requests with a Host of example.com
// to be emitted by a logger named "http.log.access.example".
LoggerNames map[string]string `json:"logger_names,omitempty"`
// By default, all requests to this server will be logged if
// access logging is enabled. This field lists the request
// hosts for which access logging should be disabled.
SkipHosts []string `json:"skip_hosts,omitempty"`
// If true, requests to any host not appearing in the
// LoggerNames (logger_names) map will not be logged.
SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
}
// wrapLogger wraps logger in a logger named according to user preferences for the given host.
func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, host string) *zap.Logger {
if loggerName := slc.getLoggerName(host); loggerName != "" {
return logger.Named(loggerName)
}
return logger
}
func (slc ServerLogConfig) getLoggerName(host string) string {
tryHost := func(key string) (string, bool) {
// first try exact match
if loggerName, ok := slc.LoggerNames[key]; ok {
return loggerName, ok
}
// strip port and try again (i.e. Host header of "example.com:1234" should
// match "example.com" if there is no "example.com:1234" in the map)
hostOnly, _, err := net.SplitHostPort(key)
if err != nil {
return "", false
}
loggerName, ok := slc.LoggerNames[hostOnly]
return loggerName, ok
}
// try the exact hostname first
if loggerName, ok := tryHost(host); ok {
return loggerName
}
// try matching wildcard domains if other non-specific loggers exist
labels := strings.Split(host, ".")
for i := range labels {
if labels[i] == "" {
continue
}
labels[i] = "*"
wildcardHost := strings.Join(labels, ".")
if loggerName, ok := tryHost(wildcardHost); ok {
return loggerName
}
}
return slc.DefaultLoggerName
}
// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
// be nil, but the handlers will lose response placeholders and access to the server.
func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
// set up the context for the request
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
ctx = context.WithValue(ctx, ServerCtxKey, s)
ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
var url2 url.URL // avoid letting this escape to the heap
ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
r = r.WithContext(ctx)
// once the pointer to the request won't change
// anymore, finish setting up the replacer
addHTTPVarsToReplacer(repl, r, w)
return r
}
// errLogValues inspects err and returns the status code
// to use, the error log message, and any extra fields.
// If err is a HandlerError, the returned values will
// have richer information.
func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
if handlerErr, ok := err.(HandlerError); ok {
status = handlerErr.StatusCode
if handlerErr.Err == nil {
msg = err.Error()
} else {
msg = handlerErr.Err.Error()
}
fields = []zapcore.Field{
zap.Int("status", handlerErr.StatusCode),
zap.String("err_id", handlerErr.ID),
zap.String("err_trace", handlerErr.Trace),
}
return
}
status = http.StatusInternalServerError
msg = err.Error()
return
}
// originalRequest returns a partial, shallow copy of
// req, including: req.Method, deep copy of req.URL
// (into the urlCopy parameter, which should be on the
// stack), req.RequestURI, and req.RemoteAddr. Notably,
// headers are not copied. This function is designed to
// be very fast and efficient, and useful primarily for
// read-only/logging purposes.
func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
cloneURL(req.URL, urlCopy)
return http.Request{
Method: req.Method,
RemoteAddr: req.RemoteAddr,
RequestURI: req.RequestURI,
URL: urlCopy,
}
}
// cloneURL makes a copy of r.URL and returns a
// new value that doesn't reference the original.
func cloneURL(from, to *url.URL) {
*to = *from
if from.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *from.User
to.User = userInfo
}
}
const (
// commonLogFormat is the common log format. https://en.wikipedia.org/wiki/Common_Log_Format
commonLogFormat = `{http.request.remote.host} ` + commonLogEmptyValue + ` {http.auth.user.id} [{time.now.common_log}] "{http.request.orig_method} {http.request.orig_uri} {http.request.proto}" {http.response.status} {http.response.size}`
// commonLogEmptyValue is the common empty log value.
commonLogEmptyValue = "-"
)
// Context keys for HTTP request context values.
const (
// For referencing the server instance
ServerCtxKey caddy.CtxKey = "server"
// For the request's variable table
VarsCtxKey caddy.CtxKey = "vars"
// For a partial copy of the unmodified request that
// originally came into the server's entry handler
OriginalRequestCtxKey caddy.CtxKey = "original_request"
)
| 1 | 15,712 | What about the request headers if someone uses `request_header`? :thinking: | caddyserver-caddy | go |
@@ -54,7 +54,12 @@ func (r *ReconcileHiveConfig) deployExternalDNS(hLog log.FieldLogger, h *resourc
deployment := resourceread.ReadDeploymentV1OrDie(asset)
// Make AWS-specific changes to external-dns deployment
- deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--provider=aws")
+ args := deployment.Spec.Template.Spec.Containers[0].Args
+ args = append(args, "--provider=aws")
+ for _, domain := range instance.Spec.ManagedDomains {
+ args = append(args, fmt.Sprintf("--domain-filter=%s", domain))
+ }
+ deployment.Spec.Template.Spec.Containers[0].Args = args
if len(instance.Spec.ExternalDNS.Image) > 0 {
deployment.Spec.Template.Spec.Containers[0].Image = instance.Spec.ExternalDNS.Image | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hive
import (
"fmt"
log "github.com/sirupsen/logrus"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
"github.com/openshift/hive/pkg/operator/assets"
"github.com/openshift/hive/pkg/operator/util"
"github.com/openshift/hive/pkg/resource"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/kubernetes/scheme"
)
func (r *ReconcileHiveConfig) deployExternalDNS(hLog log.FieldLogger, h *resource.Helper, instance *hivev1.HiveConfig, recorder events.Recorder) error {
if instance.Spec.ExternalDNS == nil {
// TODO: Add code to remove if already deployed
hLog.Debug("external DNS is not configured in HiveConfig, it will not be deployed")
return nil
}
// For now, we only support AWS
if instance.Spec.ExternalDNS.AWS == nil {
return fmt.Errorf("only AWS supported, AWS-specific external DNS configuration must be specified")
}
if len(instance.Spec.ExternalDNS.AWS.Credentials.Name) == 0 {
return fmt.Errorf("a secret reference must be specified for AWS credentials")
}
asset := assets.MustAsset("config/external-dns/deployment.yaml")
hLog.Debug("reading external-dns deployment")
deployment := resourceread.ReadDeploymentV1OrDie(asset)
// Make AWS-specific changes to external-dns deployment
deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--provider=aws")
if len(instance.Spec.ExternalDNS.Image) > 0 {
deployment.Spec.Template.Spec.Containers[0].Image = instance.Spec.ExternalDNS.Image
}
authEnvVars := []corev1.EnvVar{
{
Name: "AWS_ACCESS_KEY_ID",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: instance.Spec.ExternalDNS.AWS.Credentials,
Key: "aws_access_key_id",
},
},
},
{
Name: "AWS_SECRET_ACCESS_KEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: instance.Spec.ExternalDNS.AWS.Credentials,
Key: "aws_secret_access_key",
},
},
},
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, authEnvVars...)
// Apply service account
err := util.ApplyAsset(h, "config/external-dns/service_account.yaml", hLog)
if err != nil {
hLog.WithError(err).Error("cannot apply asset external-dns service account")
return err
}
// Apply deployment
s := json.NewYAMLSerializer(json.DefaultMetaFactory, scheme.Scheme, scheme.Scheme)
err = h.ApplyRuntimeObject(deployment, s)
if err != nil {
hLog.WithError(err).Error("error applying external-dns deployment")
return err
}
hLog.Info("external-dns deployment applied")
return nil
}
| 1 | 5,792 | Verify that external-dns allows setting this parameter multiple times (that it's not "last one wins"). | openshift-hive | go |
@@ -80,6 +80,7 @@ class Docker(base.Base):
- nofile:262144:262144
dns_servers:
- 8.8.8.8
+ etc_hosts: "{'host1.example.com': '10.3.1.5'}"
networks:
- name: foo
- name: bar | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import os
from molecule import logger
from molecule.driver import base
from molecule.util import sysexit_with_message
log = logger.get_logger(__name__)
class Docker(base.Base):
"""
The class responsible for managing `Docker`_ containers. `Docker`_ is
the default driver used in Molecule.
Molecule leverages Ansible's `docker_container`_ module, by mapping
variables from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``.
.. _`docker_container`: https://docs.ansible.com/ansible/latest/docker_container_module.html
.. _`Docker Security Configuration`: https://docs.docker.com/engine/reference/run/#security-configuration
.. code-block:: yaml
driver:
name: docker
platforms:
- name: instance
hostname: instance
image: image_name:tag
dockerfile: Dockerfile.j2
pull: True|False
pre_build_image: True|False
registry:
url: registry.example.com
credentials:
username: $USERNAME
password: $PASSWORD
email: [email protected]
override_command: True|False
command: sleep infinity
pid_mode: host
privileged: True|False
security_opts:
- seccomp=unconfined
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
tmpfs:
- /tmp
- /run
capabilities:
- SYS_ADMIN
exposed_ports:
- 53/udp
- 53/tcp
published_ports:
- 0.0.0.0:8053:53/udp
- 0.0.0.0:8053:53/tcp
ulimits:
- nofile:262144:262144
dns_servers:
- 8.8.8.8
networks:
- name: foo
- name: bar
network_mode: host
purge_networks: true
docker_host: tcp://localhost:12376
env:
FOO: bar
restart_policy: on-failure
restart_retries: 1
buildargs:
http_proxy: http://proxy.example.com:8080/
If specifying the `CMD`_ directive in your ``Dockerfile.j2`` or consuming a
built image which declares a ``CMD`` directive, then you must set
``override_command: False``. Otherwise, Molecule takes care to honour the
value of the ``command`` key or uses the default of ``bash -c "while true;
do sleep 10000; done"`` to run the container until it is provisioned.
When attempting to utilize a container image with `systemd`_ as your init
system inside the container to simulate a real machine, make sure to set
the ``privileged``, ``volume_mounts``, ``command``, and ``environment``
values. An example using the ``centos:7`` image is below:
.. note:: Do note that running containers in privileged mode is considerably
less secure. For details, please reference `Docker Security
Configuration`_
.. code-block:: yaml
platforms:
- name: instance
image: centos:7
privileged: true
volume_mounts:
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
command: "/usr/sbin/init"
environment:
container: docker
.. code-block:: bash
$ pip install molecule[docker]
When pulling from a private registry, the username and password must be
exported as environment variables in the current shell. The only supported
variables are $USERNAME and $PASSWORD.
.. code-block:: bash
$ export USERNAME=foo
$ export PASSWORD=bar
Provide a list of files Molecule will preserve, relative to the scenario
ephemeral directory, after any ``destroy`` subcommand execution.
.. code-block:: yaml
driver:
name: docker
safe_files:
- foo
.. _`Docker`: https://www.docker.com
.. _`systemd`: https://www.freedesktop.org/wiki/Software/systemd/
.. _`CMD`: https://docs.docker.com/engine/reference/builder/#cmd
""" # noqa
def __init__(self, config):
super(Docker, self).__init__(config)
self._name = 'docker'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def login_cmd_template(self):
return ('docker exec '
'-e COLUMNS={columns} '
'-e LINES={lines} '
'-e TERM=bash '
'-e TERM=xterm '
'-ti {instance} bash')
@property
def default_safe_files(self):
return [
os.path.join(self._config.scenario.ephemeral_directory,
'Dockerfile')
]
@property
def default_ssh_connection_options(self):
return []
def login_options(self, instance_name):
return {'instance': instance_name}
def ansible_connection_options(self, instance_name):
return {'ansible_connection': 'docker'}
def sanity_checks(self):
"""Implement Docker driver sanity checks."""
if self._config.state.sanity_checked:
return
log.info("Sanity checks: '{}'".format(self._name))
HAS_DOCKER_PY = None
try:
from ansible.module_utils.docker_common import HAS_DOCKER_PY
except ImportError:
# ansible 2.8+
try:
from ansible.module_utils.docker.common import HAS_DOCKER_PY
except ImportError:
pass
if not HAS_DOCKER_PY:
msg = ('Missing Docker driver dependency. Please '
"install via 'molecule[docker]' or refer to "
'your INSTALL.rst driver documentation file')
sysexit_with_message(msg)
try:
import docker
import requests
docker_client = docker.from_env()
docker_client.ping()
except requests.exceptions.ConnectionError:
msg = ('Unable to contact the Docker daemon. '
'Please refer to https://docs.docker.com/config/daemon/ '
'for managing the daemon')
sysexit_with_message(msg)
self._config.state.change_state('sanity_checked', True)
| 1 | 9,380 | Maybe 2 host/ip values could be used in the example? | ansible-community-molecule | py |
@@ -139,7 +139,9 @@ namespace Datadog.Trace.Tests.Sampling
var limiter = new RateLimiter(maxTracesPerInterval: intervalLimit);
- var traceContext = new TraceContext(Tracer.Instance);
+ var tracerInstance = new Tracer();
+
+ var traceContext = new TraceContext(tracerInstance);
var barrier = new Barrier(parallelism + 1);
| 1 | using System;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.Sampling;
using Xunit;
namespace Datadog.Trace.Tests.Sampling
{
[Collection(nameof(Datadog.Trace.Tests.Sampling))]
public class RateLimiterTests
{
private const int DefaultLimitPerSecond = 100;
private static readonly ThreadLocal<Random> Random = new ThreadLocal<Random>(() => new Random());
[Fact]
public void One_Is_Allowed()
{
var traceContext = new TraceContext(Tracer.Instance);
var spanContext = new SpanContext(null, traceContext, "Weeeee");
var span = new Span(spanContext, null);
var rateLimiter = new RateLimiter(maxTracesPerInterval: null);
var allowed = rateLimiter.Allowed(span);
Assert.True(allowed);
}
[Fact]
public void All_Traces_Disabled()
{
var rateLimiter = new RateLimiter(maxTracesPerInterval: 0);
var allowedCount = AskTheRateLimiterABunchOfTimes(rateLimiter, 500);
Assert.Equal(expected: 0, actual: allowedCount);
}
[Fact]
public void All_Traces_Allowed()
{
var rateLimiter = new RateLimiter(maxTracesPerInterval: -1);
var allowedCount = AskTheRateLimiterABunchOfTimes(rateLimiter, 500);
Assert.Equal(expected: 500, actual: allowedCount);
}
[Fact]
public void Only_100_Allowed_In_500_Burst_For_Default()
{
var rateLimiter = new RateLimiter(maxTracesPerInterval: null);
var allowedCount = AskTheRateLimiterABunchOfTimes(rateLimiter, 500);
Assert.Equal(expected: DefaultLimitPerSecond, actual: allowedCount);
}
[Fact]
public void Limits_Approximately_To_Defaults()
{
Run_Limit_Test(intervalLimit: null, numberPerBurst: 100, numberOfBursts: 18, millisecondsBetweenBursts: 247);
}
[Fact]
public void Limits_To_Custom_Amount_Per_Second()
{
Run_Limit_Test(intervalLimit: 500, numberPerBurst: 200, numberOfBursts: 18, millisecondsBetweenBursts: 247);
}
private static void Run_Limit_Test(int? intervalLimit, int numberPerBurst, int numberOfBursts, int millisecondsBetweenBursts)
{
var actualIntervalLimit = intervalLimit ?? DefaultLimitPerSecond;
var test = new RateLimitLoadTest()
{
NumberPerBurst = numberPerBurst,
TimeBetweenBursts = TimeSpan.FromMilliseconds(millisecondsBetweenBursts),
NumberOfBursts = numberOfBursts
};
var result = RunTest(intervalLimit, test);
var totalMilliseconds = result.TimeElapsed.TotalMilliseconds;
var expectedLimit = totalMilliseconds * actualIntervalLimit / 1_000;
var acceptableUpperVariance = (actualIntervalLimit * 1.0);
var acceptableLowerVariance = (actualIntervalLimit * 1.15); // Allow for increased tolerance on lower limit since the rolling window does not get dequeued as quickly as it can queued
var upperLimit = expectedLimit + acceptableUpperVariance;
var lowerLimit = expectedLimit - acceptableLowerVariance;
Assert.True(
result.TotalAllowed >= lowerLimit && result.TotalAllowed <= upperLimit,
$"Expected between {lowerLimit} and {upperLimit}, received {result.TotalAllowed} out of {result.TotalAttempted} within {totalMilliseconds} milliseconds.");
// Rate should match for the last two intervals, which is a total of two seconds
var numberOfBurstsWithinTwoIntervals = 2_000 / millisecondsBetweenBursts;
var totalExpectedSent = numberOfBurstsWithinTwoIntervals * numberPerBurst;
var totalExpectedAllowed = 2 * actualIntervalLimit;
var expectedRate = totalExpectedAllowed / (float)totalExpectedSent;
var lowestRate = expectedRate - 0.40f;
if (lowestRate < 0)
{
lowestRate = expectedRate / 2;
}
var highestRate = expectedRate + 0.40f;
Assert.True(
result.ReportedRate >= lowestRate && result.ReportedRate <= highestRate,
$"Expected rate between {lowestRate} and {highestRate}, received {result.ReportedRate}.");
}
private static int AskTheRateLimiterABunchOfTimes(RateLimiter rateLimiter, int howManyTimes)
{
var traceContext = new TraceContext(Tracer.Instance);
var spanContext = new SpanContext(null, traceContext, "Weeeee");
var span = new Span(spanContext, null);
var remaining = howManyTimes;
var allowedCount = 0;
while (remaining-- > 0)
{
var allowed = rateLimiter.Allowed(span);
if (allowed)
{
allowedCount++;
}
}
return allowedCount;
}
private static RateLimitResult RunTest(int? intervalLimit, RateLimitLoadTest test)
{
var parallelism = test.NumberPerBurst;
if (parallelism > 10)
{
parallelism = 10;
}
var result = new RateLimitResult();
var limiter = new RateLimiter(maxTracesPerInterval: intervalLimit);
var traceContext = new TraceContext(Tracer.Instance);
var barrier = new Barrier(parallelism + 1);
var numberPerThread = test.NumberPerBurst / parallelism;
var workers = new Task[parallelism];
for (int i = 0; i < workers.Length; i++)
{
workers[i] = Task.Factory.StartNew(
() =>
{
var stopwatch = new Stopwatch();
for (var i = 0; i < test.NumberOfBursts; i++)
{
// Wait for every worker to be ready for next burst
barrier.SignalAndWait();
stopwatch.Restart();
for (int j = 0; j < numberPerThread; j++)
{
var spanContext = new SpanContext(null, traceContext, "Weeeee");
var span = new Span(spanContext, null);
if (limiter.Allowed(span))
{
result.Allowed.Add(span.SpanId);
}
else
{
result.Denied.Add(span.SpanId);
}
}
var remainingTime = (test.TimeBetweenBursts - stopwatch.Elapsed).TotalMilliseconds;
if (remainingTime > 0)
{
Thread.Sleep((int)remainingTime);
}
}
},
TaskCreationOptions.LongRunning);
}
// Wait for all workers to be ready
barrier.SignalAndWait();
var sw = Stopwatch.StartNew();
// We do not need to synchronize with workers anymore
barrier.RemoveParticipant();
// Wait for workers to finish
Task.WaitAll(workers);
result.TimeElapsed = sw.Elapsed;
result.RateLimiter = limiter;
result.ReportedRate = limiter.GetEffectiveRate();
return result;
}
private class RateLimitLoadTest
{
public int NumberPerBurst { get; set; }
public TimeSpan TimeBetweenBursts { get; set; }
public int NumberOfBursts { get; set; }
}
private class RateLimitResult
{
public RateLimiter RateLimiter { get; set; }
public TimeSpan TimeElapsed { get; set; }
public ConcurrentBag<ulong> Allowed { get; } = new ConcurrentBag<ulong>();
public ConcurrentBag<ulong> Denied { get; } = new ConcurrentBag<ulong>();
public float ReportedRate { get; set; }
public int TotalAttempted => Allowed.Count + Denied.Count;
public int TotalAllowed => Allowed.Count;
}
}
}
| 1 | 18,299 | We're only testing `RateLimiter.Allowed(Span)`, so I think we can use a mock `ITraceContext` instead of a real `Tracer` or `TraceContext`. | DataDog-dd-trace-dotnet | .cs |
@@ -89,7 +89,8 @@ int main(int argc, char *argv[]) {
lbann_data::Model *pb_model = pb.mutable_model();
auto model = build_model_from_prototext(argc, argv, pb_trainer, pb,
- comm.get(), opts, io_thread_pool, true);
+ comm.get(), opts, trainer->get_name(), io_thread_pool,
+ trainer->get_callbacks(), true);
if (opts->has_string("create_tarball")) {
return EXIT_SUCCESS; | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// lbann_proto.cpp - prototext application
////////////////////////////////////////////////////////////////////////////////
#include "lbann/lbann.hpp"
#include "lbann/proto/proto_common.hpp"
#include "lbann/utils/protobuf_utils.hpp"
#include "lbann/data_store/data_store_conduit.hpp"
#include <lbann.pb.h>
#include <model.pb.h>
#include <cstdlib>
using namespace lbann;
int main(int argc, char *argv[]) {
int random_seed = lbann_default_random_seed;
world_comm_ptr comm = initialize(argc, argv, random_seed);
const bool master = comm->am_world_master();
if (master) {
std::cout << "\n\n==============================================================\n"
<< "STARTING lbann with this command line:\n";
for (int j=0; j<argc; j++) {
std::cout << argv[j] << " ";
}
std::cout << std::endl << std::endl;
}
try {
// Initialize options db (this parses the command line)
options *opts = options::get();
opts->init(argc, argv);
if (opts->has_string("h") or opts->has_string("help") or argc == 1) {
print_help(*comm);
return EXIT_SUCCESS;
}
//this must be called after call to opts->init();
if (!opts->get_bool("disable_signal_handler")) {
std::string file_base = (opts->get_bool("stack_trace_to_file") ?
"stack_trace" : "");
stack_trace::register_signal_handler(file_base);
}
//to activate, must specify --st_on on cmd line
stack_profiler::get()->activate(comm->get_rank_in_world());
// Load the prototexts specificed on the command line
auto pbs = protobuf_utils::load_prototext(master, argc, argv);
// Optionally over-ride some values in the prototext for each model
for(size_t i = 0; i < pbs.size(); i++) {
get_cmdline_overrides(*comm, *(pbs[i]));
}
lbann_data::LbannPB& pb = *(pbs[0]);
lbann_data::Trainer *pb_trainer = pb.mutable_trainer();
// Construct the trainer
std::unique_ptr<trainer> trainer = construct_trainer(comm.get(), pb_trainer, opts);
thread_pool& io_thread_pool = trainer->get_io_thread_pool();
lbann_data::Model *pb_model = pb.mutable_model();
auto model = build_model_from_prototext(argc, argv, pb_trainer, pb,
comm.get(), opts, io_thread_pool, true);
if (opts->has_string("create_tarball")) {
return EXIT_SUCCESS;
}
if (! opts->get_bool("exit_after_setup")) {
// Train model
trainer->train(model.get(), pb_model->num_epochs());
// Evaluate model on test set
trainer->evaluate(model.get(), execution_mode::testing);
//has no affect unless option: --st_on was given
stack_profiler::get()->print();
} else {
if (comm->am_world_master()) {
std::cout <<
"--------------------------------------------------------------------------------\n"
"ALERT: model has been setup; we are now exiting due to command\n"
" line option: --exit_after_setup\n"
"--------------------------------------------------------------------------------\n";
}
//has no affect unless option: --st_on was given
stack_profiler::get()->print();
}
} catch (exception& e) {
if (options::get()->get_bool("stack_trace_to_file")) {
std::ostringstream ss("stack_trace");
const auto& rank = get_rank_in_world();
if (rank >= 0) {
ss << "_rank" << rank;
}
ss << ".txt";
std::ofstream fs(ss.str());
e.print_report(fs);
}
El::ReportException(e);
return EXIT_FAILURE;
} catch (std::exception& e) {
El::ReportException(e);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| 1 | 15,193 | IMO, this is the wrong way to associate a model with a trainer... Why the string when you actually have a trainer object? | LLNL-lbann | cpp |
@@ -217,7 +217,7 @@ void ledger_processor::state_block (rai::state_block const & block_a)
void ledger_processor::state_block_impl (rai::state_block const & block_a)
{
auto hash (block_a.hash ());
- auto existing (ledger.store.block_exists (transaction, hash));
+ auto existing (ledger.store.block_exists (transaction, block_a.type (), hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block before? (Unambiguous)
if (result.code == rai::process_result::progress)
{ | 1 | #include <rai/node/common.hpp>
#include <rai/node/stats.hpp>
#include <rai/secure/blockstore.hpp>
#include <rai/secure/ledger.hpp>
namespace
{
/**
* Roll back the visited block
*/
class rollback_visitor : public rai::block_visitor
{
public:
rollback_visitor (rai::transaction const & transaction_a, rai::ledger & ledger_a) :
transaction (transaction_a),
ledger (ledger_a)
{
}
virtual ~rollback_visitor () = default;
void send_block (rai::send_block const & block_a) override
{
auto hash (block_a.hash ());
rai::pending_info pending;
rai::pending_key key (block_a.hashables.destination, hash);
while (ledger.store.pending_get (transaction, key, pending))
{
ledger.rollback (transaction, ledger.latest (transaction, block_a.hashables.destination));
}
rai::account_info info;
auto error (ledger.store.account_get (transaction, pending.source, info));
assert (!error);
ledger.store.pending_del (transaction, key);
ledger.store.representation_add (transaction, ledger.representative (transaction, hash), pending.amount.number ());
ledger.change_latest (transaction, pending.source, block_a.hashables.previous, info.rep_block, ledger.balance (transaction, block_a.hashables.previous), info.block_count - 1);
ledger.store.block_del (transaction, hash);
ledger.store.frontier_del (transaction, hash);
ledger.store.frontier_put (transaction, block_a.hashables.previous, pending.source);
ledger.store.block_successor_clear (transaction, block_a.hashables.previous);
if (!(info.block_count % ledger.store.block_info_max))
{
ledger.store.block_info_del (transaction, hash);
}
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::send);
}
void receive_block (rai::receive_block const & block_a) override
{
auto hash (block_a.hash ());
auto representative (ledger.representative (transaction, block_a.hashables.previous));
auto amount (ledger.amount (transaction, block_a.hashables.source));
auto destination_account (ledger.account (transaction, hash));
auto source_account (ledger.account (transaction, block_a.hashables.source));
rai::account_info info;
auto error (ledger.store.account_get (transaction, destination_account, info));
assert (!error);
ledger.store.representation_add (transaction, ledger.representative (transaction, hash), 0 - amount);
ledger.change_latest (transaction, destination_account, block_a.hashables.previous, representative, ledger.balance (transaction, block_a.hashables.previous), info.block_count - 1);
ledger.store.block_del (transaction, hash);
ledger.store.pending_put (transaction, rai::pending_key (destination_account, block_a.hashables.source), { source_account, amount, rai::epoch::epoch_0 });
ledger.store.frontier_del (transaction, hash);
ledger.store.frontier_put (transaction, block_a.hashables.previous, destination_account);
ledger.store.block_successor_clear (transaction, block_a.hashables.previous);
if (!(info.block_count % ledger.store.block_info_max))
{
ledger.store.block_info_del (transaction, hash);
}
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::receive);
}
void open_block (rai::open_block const & block_a) override
{
auto hash (block_a.hash ());
auto amount (ledger.amount (transaction, block_a.hashables.source));
auto destination_account (ledger.account (transaction, hash));
auto source_account (ledger.account (transaction, block_a.hashables.source));
ledger.store.representation_add (transaction, ledger.representative (transaction, hash), 0 - amount);
ledger.change_latest (transaction, destination_account, 0, 0, 0, 0);
ledger.store.block_del (transaction, hash);
ledger.store.pending_put (transaction, rai::pending_key (destination_account, block_a.hashables.source), { source_account, amount, rai::epoch::epoch_0 });
ledger.store.frontier_del (transaction, hash);
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::open);
}
void change_block (rai::change_block const & block_a) override
{
auto hash (block_a.hash ());
auto representative (ledger.representative (transaction, block_a.hashables.previous));
auto account (ledger.account (transaction, block_a.hashables.previous));
rai::account_info info;
auto error (ledger.store.account_get (transaction, account, info));
assert (!error);
auto balance (ledger.balance (transaction, block_a.hashables.previous));
ledger.store.representation_add (transaction, representative, balance);
ledger.store.representation_add (transaction, hash, 0 - balance);
ledger.store.block_del (transaction, hash);
ledger.change_latest (transaction, account, block_a.hashables.previous, representative, info.balance, info.block_count - 1);
ledger.store.frontier_del (transaction, hash);
ledger.store.frontier_put (transaction, block_a.hashables.previous, account);
ledger.store.block_successor_clear (transaction, block_a.hashables.previous);
if (!(info.block_count % ledger.store.block_info_max))
{
ledger.store.block_info_del (transaction, hash);
}
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::change);
}
void state_block (rai::state_block const & block_a) override
{
auto hash (block_a.hash ());
rai::block_hash representative (0);
if (!block_a.hashables.previous.is_zero ())
{
representative = ledger.representative (transaction, block_a.hashables.previous);
}
auto balance (ledger.balance (transaction, block_a.hashables.previous));
auto is_send (block_a.hashables.balance < balance);
// Add in amount delta
ledger.store.representation_add (transaction, hash, 0 - block_a.hashables.balance.number ());
if (!representative.is_zero ())
{
// Move existing representation
ledger.store.representation_add (transaction, representative, balance);
}
rai::account_info info;
auto error (ledger.store.account_get (transaction, block_a.hashables.account, info));
if (is_send)
{
rai::pending_key key (block_a.hashables.link, hash);
while (!ledger.store.pending_exists (transaction, key))
{
ledger.rollback (transaction, ledger.latest (transaction, block_a.hashables.link));
}
ledger.store.pending_del (transaction, key);
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::send);
}
else if (!block_a.hashables.link.is_zero () && !ledger.is_epoch_link (block_a.hashables.link))
{
auto source_version (ledger.store.block_version (transaction, block_a.hashables.link));
rai::pending_info pending_info (ledger.account (transaction, block_a.hashables.link), block_a.hashables.balance.number () - balance, source_version);
ledger.store.pending_put (transaction, rai::pending_key (block_a.hashables.account, block_a.hashables.link), pending_info);
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::receive);
}
assert (!error);
auto previous_version (ledger.store.block_version (transaction, block_a.hashables.previous));
ledger.change_latest (transaction, block_a.hashables.account, block_a.hashables.previous, representative, balance, info.block_count - 1, false, previous_version);
auto previous (ledger.store.block_get (transaction, block_a.hashables.previous));
if (previous != nullptr)
{
ledger.store.block_successor_clear (transaction, block_a.hashables.previous);
if (previous->type () < rai::block_type::state)
{
ledger.store.frontier_put (transaction, block_a.hashables.previous, block_a.hashables.account);
}
}
else
{
ledger.stats.inc (rai::stat::type::rollback, rai::stat::detail::open);
}
ledger.store.block_del (transaction, hash);
}
rai::transaction const & transaction;
rai::ledger & ledger;
};
class ledger_processor : public rai::block_visitor
{
public:
ledger_processor (rai::ledger &, rai::transaction const &, bool = false);
virtual ~ledger_processor () = default;
void send_block (rai::send_block const &) override;
void receive_block (rai::receive_block const &) override;
void open_block (rai::open_block const &) override;
void change_block (rai::change_block const &) override;
void state_block (rai::state_block const &) override;
void state_block_impl (rai::state_block const &);
void epoch_block_impl (rai::state_block const &);
rai::ledger & ledger;
rai::transaction const & transaction;
bool valid_signature;
rai::process_return result;
};
void ledger_processor::state_block (rai::state_block const & block_a)
{
result.code = rai::process_result::progress;
auto is_epoch_block (false);
// Check if this is an epoch block
if (!ledger.epoch_link.is_zero () && ledger.is_epoch_link (block_a.hashables.link))
{
rai::amount prev_balance (0);
if (!block_a.hashables.previous.is_zero ())
{
result.code = ledger.store.block_exists (transaction, block_a.hashables.previous) ? rai::process_result::progress : rai::process_result::gap_previous;
if (result.code == rai::process_result::progress)
{
prev_balance = ledger.balance (transaction, block_a.hashables.previous);
}
}
if (block_a.hashables.balance == prev_balance)
{
is_epoch_block = true;
}
}
if (result.code == rai::process_result::progress)
{
if (is_epoch_block)
{
epoch_block_impl (block_a);
}
else
{
state_block_impl (block_a);
}
}
}
void ledger_processor::state_block_impl (rai::state_block const & block_a)
{
auto hash (block_a.hash ());
auto existing (ledger.store.block_exists (transaction, hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block before? (Unambiguous)
if (result.code == rai::process_result::progress)
{
// Revalidate blocks with epoch links
if (!valid_signature || ledger.is_epoch_link (block_a.hashables.link))
{
result.code = validate_message (block_a.hashables.account, hash, block_a.signature) ? rai::process_result::bad_signature : rai::process_result::progress; // Is this block signed correctly (Unambiguous)
}
if (result.code == rai::process_result::progress)
{
result.code = block_a.hashables.account.is_zero () ? rai::process_result::opened_burn_account : rai::process_result::progress; // Is this for the burn account? (Unambiguous)
if (result.code == rai::process_result::progress)
{
rai::epoch epoch (rai::epoch::epoch_0);
rai::account_info info;
result.amount = block_a.hashables.balance;
auto is_send (false);
auto account_error (ledger.store.account_get (transaction, block_a.hashables.account, info));
if (!account_error)
{
epoch = info.epoch;
// Account already exists
result.code = block_a.hashables.previous.is_zero () ? rai::process_result::fork : rai::process_result::progress; // Has this account already been opened? (Ambigious)
if (result.code == rai::process_result::progress)
{
result.code = ledger.store.block_exists (transaction, block_a.hashables.previous) ? rai::process_result::progress : rai::process_result::gap_previous; // Does the previous block exist in the ledger? (Unambigious)
if (result.code == rai::process_result::progress)
{
is_send = block_a.hashables.balance < info.balance;
result.amount = is_send ? (info.balance.number () - result.amount.number ()) : (result.amount.number () - info.balance.number ());
result.code = block_a.hashables.previous == info.head ? rai::process_result::progress : rai::process_result::fork; // Is the previous block the account's head block? (Ambigious)
}
}
}
else
{
// Account does not yet exists
result.code = block_a.previous ().is_zero () ? rai::process_result::progress : rai::process_result::gap_previous; // Does the first block in an account yield 0 for previous() ? (Unambigious)
if (result.code == rai::process_result::progress)
{
result.code = !block_a.hashables.link.is_zero () ? rai::process_result::progress : rai::process_result::gap_source; // Is the first block receiving from a send ? (Unambigious)
}
}
if (result.code == rai::process_result::progress)
{
if (!is_send)
{
if (!block_a.hashables.link.is_zero ())
{
result.code = ledger.store.block_exists (transaction, block_a.hashables.link) ? rai::process_result::progress : rai::process_result::gap_source; // Have we seen the source block already? (Harmless)
if (result.code == rai::process_result::progress)
{
rai::pending_key key (block_a.hashables.account, block_a.hashables.link);
rai::pending_info pending;
result.code = ledger.store.pending_get (transaction, key, pending) ? rai::process_result::unreceivable : rai::process_result::progress; // Has this source already been received (Malformed)
if (result.code == rai::process_result::progress)
{
result.code = result.amount == pending.amount ? rai::process_result::progress : rai::process_result::balance_mismatch;
epoch = std::max (epoch, pending.epoch);
}
}
}
else
{
// If there's no link, the balance must remain the same, only the representative can change
result.code = result.amount.is_zero () ? rai::process_result::progress : rai::process_result::balance_mismatch;
}
}
}
if (result.code == rai::process_result::progress)
{
ledger.stats.inc (rai::stat::type::ledger, rai::stat::detail::state_block);
result.state_is_send = is_send;
ledger.store.block_put (transaction, hash, block_a, 0, epoch);
if (!info.rep_block.is_zero ())
{
// Move existing representation
ledger.store.representation_add (transaction, info.rep_block, 0 - info.balance.number ());
}
// Add in amount delta
ledger.store.representation_add (transaction, hash, block_a.hashables.balance.number ());
if (is_send)
{
rai::pending_key key (block_a.hashables.link, hash);
rai::pending_info info (block_a.hashables.account, result.amount.number (), epoch);
ledger.store.pending_put (transaction, key, info);
}
else if (!block_a.hashables.link.is_zero ())
{
ledger.store.pending_del (transaction, rai::pending_key (block_a.hashables.account, block_a.hashables.link));
}
ledger.change_latest (transaction, block_a.hashables.account, hash, hash, block_a.hashables.balance, info.block_count + 1, true, epoch);
if (!ledger.store.frontier_get (transaction, info.head).is_zero ())
{
ledger.store.frontier_del (transaction, info.head);
}
// Frontier table is unnecessary for state blocks and this also prevents old blocks from being inserted on top of state blocks
result.account = block_a.hashables.account;
}
}
}
}
}
void ledger_processor::epoch_block_impl (rai::state_block const & block_a)
{
auto hash (block_a.hash ());
auto existing (ledger.store.block_exists (transaction, hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block before? (Unambiguous)
if (result.code == rai::process_result::progress)
{
result.code = validate_message (ledger.epoch_signer, hash, block_a.signature) ? rai::process_result::bad_signature : rai::process_result::progress; // Is this block signed correctly (Unambiguous)
if (result.code == rai::process_result::progress)
{
result.code = block_a.hashables.account.is_zero () ? rai::process_result::opened_burn_account : rai::process_result::progress; // Is this for the burn account? (Unambiguous)
if (result.code == rai::process_result::progress)
{
rai::account_info info;
auto account_error (ledger.store.account_get (transaction, block_a.hashables.account, info));
if (!account_error)
{
// Account already exists
result.code = block_a.hashables.previous.is_zero () ? rai::process_result::fork : rai::process_result::progress; // Has this account already been opened? (Ambigious)
if (result.code == rai::process_result::progress)
{
result.code = ledger.store.block_exists (transaction, block_a.hashables.previous) ? rai::process_result::progress : rai::process_result::gap_previous; // Does the previous block exist in the ledger? (Unambigious)
if (result.code == rai::process_result::progress)
{
result.code = block_a.hashables.previous == info.head ? rai::process_result::progress : rai::process_result::fork; // Is the previous block the account's head block? (Ambigious)
if (result.code == rai::process_result::progress)
{
auto last_rep_block (ledger.store.block_get (transaction, info.rep_block));
assert (last_rep_block != nullptr);
result.code = block_a.hashables.representative == last_rep_block->representative () ? rai::process_result::progress : rai::process_result::representative_mismatch;
}
}
}
}
else
{
result.code = block_a.hashables.representative.is_zero () ? rai::process_result::progress : rai::process_result::representative_mismatch;
}
if (result.code == rai::process_result::progress)
{
result.code = info.epoch == rai::epoch::epoch_0 ? rai::process_result::progress : rai::process_result::block_position;
if (result.code == rai::process_result::progress)
{
result.code = block_a.hashables.balance == info.balance ? rai::process_result::progress : rai::process_result::balance_mismatch;
if (result.code == rai::process_result::progress)
{
ledger.stats.inc (rai::stat::type::ledger, rai::stat::detail::epoch_block);
result.account = block_a.hashables.account;
result.amount = 0;
ledger.store.block_put (transaction, hash, block_a, 0, rai::epoch::epoch_1);
ledger.change_latest (transaction, block_a.hashables.account, hash, hash, info.balance, info.block_count + 1, true, rai::epoch::epoch_1);
if (!ledger.store.frontier_get (transaction, info.head).is_zero ())
{
ledger.store.frontier_del (transaction, info.head);
}
}
}
}
}
}
}
}
void ledger_processor::change_block (rai::change_block const & block_a)
{
auto hash (block_a.hash ());
auto existing (ledger.store.block_exists (transaction, hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block before? (Harmless)
if (result.code == rai::process_result::progress)
{
auto previous (ledger.store.block_get (transaction, block_a.hashables.previous));
result.code = previous != nullptr ? rai::process_result::progress : rai::process_result::gap_previous; // Have we seen the previous block already? (Harmless)
if (result.code == rai::process_result::progress)
{
result.code = block_a.valid_predecessor (*previous) ? rai::process_result::progress : rai::process_result::block_position;
if (result.code == rai::process_result::progress)
{
auto account (ledger.store.frontier_get (transaction, block_a.hashables.previous));
result.code = account.is_zero () ? rai::process_result::fork : rai::process_result::progress;
if (result.code == rai::process_result::progress)
{
rai::account_info info;
auto latest_error (ledger.store.account_get (transaction, account, info));
assert (!latest_error);
assert (info.head == block_a.hashables.previous);
result.code = validate_message (account, hash, block_a.signature) ? rai::process_result::bad_signature : rai::process_result::progress; // Is this block signed correctly (Malformed)
if (result.code == rai::process_result::progress)
{
ledger.store.block_put (transaction, hash, block_a);
auto balance (ledger.balance (transaction, block_a.hashables.previous));
ledger.store.representation_add (transaction, hash, balance);
ledger.store.representation_add (transaction, info.rep_block, 0 - balance);
ledger.change_latest (transaction, account, hash, hash, info.balance, info.block_count + 1);
ledger.store.frontier_del (transaction, block_a.hashables.previous);
ledger.store.frontier_put (transaction, hash, account);
result.account = account;
result.amount = 0;
ledger.stats.inc (rai::stat::type::ledger, rai::stat::detail::change);
}
}
}
}
}
}
void ledger_processor::send_block (rai::send_block const & block_a)
{
auto hash (block_a.hash ());
auto existing (ledger.store.block_exists (transaction, hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block before? (Harmless)
if (result.code == rai::process_result::progress)
{
auto previous (ledger.store.block_get (transaction, block_a.hashables.previous));
result.code = previous != nullptr ? rai::process_result::progress : rai::process_result::gap_previous; // Have we seen the previous block already? (Harmless)
if (result.code == rai::process_result::progress)
{
result.code = block_a.valid_predecessor (*previous) ? rai::process_result::progress : rai::process_result::block_position;
if (result.code == rai::process_result::progress)
{
auto account (ledger.store.frontier_get (transaction, block_a.hashables.previous));
result.code = account.is_zero () ? rai::process_result::fork : rai::process_result::progress;
if (result.code == rai::process_result::progress)
{
result.code = validate_message (account, hash, block_a.signature) ? rai::process_result::bad_signature : rai::process_result::progress; // Is this block signed correctly (Malformed)
if (result.code == rai::process_result::progress)
{
rai::account_info info;
auto latest_error (ledger.store.account_get (transaction, account, info));
assert (!latest_error);
assert (info.head == block_a.hashables.previous);
result.code = info.balance.number () >= block_a.hashables.balance.number () ? rai::process_result::progress : rai::process_result::negative_spend; // Is this trying to spend a negative amount (Malicious)
if (result.code == rai::process_result::progress)
{
auto amount (info.balance.number () - block_a.hashables.balance.number ());
ledger.store.representation_add (transaction, info.rep_block, 0 - amount);
ledger.store.block_put (transaction, hash, block_a);
ledger.change_latest (transaction, account, hash, info.rep_block, block_a.hashables.balance, info.block_count + 1);
ledger.store.pending_put (transaction, rai::pending_key (block_a.hashables.destination, hash), { account, amount, rai::epoch::epoch_0 });
ledger.store.frontier_del (transaction, block_a.hashables.previous);
ledger.store.frontier_put (transaction, hash, account);
result.account = account;
result.amount = amount;
result.pending_account = block_a.hashables.destination;
ledger.stats.inc (rai::stat::type::ledger, rai::stat::detail::send);
}
}
}
}
}
}
}
void ledger_processor::receive_block (rai::receive_block const & block_a)
{
auto hash (block_a.hash ());
auto existing (ledger.store.block_exists (transaction, hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block already? (Harmless)
if (result.code == rai::process_result::progress)
{
auto previous (ledger.store.block_get (transaction, block_a.hashables.previous));
result.code = previous != nullptr ? rai::process_result::progress : rai::process_result::gap_previous;
if (result.code == rai::process_result::progress)
{
result.code = block_a.valid_predecessor (*previous) ? rai::process_result::progress : rai::process_result::block_position;
if (result.code == rai::process_result::progress)
{
result.code = ledger.store.block_exists (transaction, block_a.hashables.source) ? rai::process_result::progress : rai::process_result::gap_source; // Have we seen the source block already? (Harmless)
if (result.code == rai::process_result::progress)
{
auto account (ledger.store.frontier_get (transaction, block_a.hashables.previous));
result.code = account.is_zero () ? rai::process_result::gap_previous : rai::process_result::progress; //Have we seen the previous block? No entries for account at all (Harmless)
if (result.code == rai::process_result::progress)
{
result.code = rai::validate_message (account, hash, block_a.signature) ? rai::process_result::bad_signature : rai::process_result::progress; // Is the signature valid (Malformed)
if (result.code == rai::process_result::progress)
{
rai::account_info info;
ledger.store.account_get (transaction, account, info);
result.code = info.head == block_a.hashables.previous ? rai::process_result::progress : rai::process_result::gap_previous; // Block doesn't immediately follow latest block (Harmless)
if (result.code == rai::process_result::progress)
{
rai::pending_key key (account, block_a.hashables.source);
rai::pending_info pending;
result.code = ledger.store.pending_get (transaction, key, pending) ? rai::process_result::unreceivable : rai::process_result::progress; // Has this source already been received (Malformed)
if (result.code == rai::process_result::progress)
{
result.code = pending.epoch == rai::epoch::epoch_0 ? rai::process_result::progress : rai::process_result::unreceivable; // Are we receiving a state-only send? (Malformed)
if (result.code == rai::process_result::progress)
{
auto new_balance (info.balance.number () + pending.amount.number ());
rai::account_info source_info;
auto error (ledger.store.account_get (transaction, pending.source, source_info));
assert (!error);
ledger.store.pending_del (transaction, key);
ledger.store.block_put (transaction, hash, block_a);
ledger.change_latest (transaction, account, hash, info.rep_block, new_balance, info.block_count + 1);
ledger.store.representation_add (transaction, info.rep_block, pending.amount.number ());
ledger.store.frontier_del (transaction, block_a.hashables.previous);
ledger.store.frontier_put (transaction, hash, account);
result.account = account;
result.amount = pending.amount;
ledger.stats.inc (rai::stat::type::ledger, rai::stat::detail::receive);
}
}
}
}
}
else
{
result.code = ledger.store.block_exists (transaction, block_a.hashables.previous) ? rai::process_result::fork : rai::process_result::gap_previous; // If we have the block but it's not the latest we have a signed fork (Malicious)
}
}
}
}
}
}
void ledger_processor::open_block (rai::open_block const & block_a)
{
auto hash (block_a.hash ());
auto existing (ledger.store.block_exists (transaction, hash));
result.code = existing ? rai::process_result::old : rai::process_result::progress; // Have we seen this block already? (Harmless)
if (result.code == rai::process_result::progress)
{
auto source_missing (!ledger.store.block_exists (transaction, block_a.hashables.source));
result.code = source_missing ? rai::process_result::gap_source : rai::process_result::progress; // Have we seen the source block? (Harmless)
if (result.code == rai::process_result::progress)
{
result.code = rai::validate_message (block_a.hashables.account, hash, block_a.signature) ? rai::process_result::bad_signature : rai::process_result::progress; // Is the signature valid (Malformed)
if (result.code == rai::process_result::progress)
{
rai::account_info info;
result.code = ledger.store.account_get (transaction, block_a.hashables.account, info) ? rai::process_result::progress : rai::process_result::fork; // Has this account already been opened? (Malicious)
if (result.code == rai::process_result::progress)
{
rai::pending_key key (block_a.hashables.account, block_a.hashables.source);
rai::pending_info pending;
result.code = ledger.store.pending_get (transaction, key, pending) ? rai::process_result::unreceivable : rai::process_result::progress; // Has this source already been received (Malformed)
if (result.code == rai::process_result::progress)
{
result.code = block_a.hashables.account == rai::burn_account ? rai::process_result::opened_burn_account : rai::process_result::progress; // Is it burning 0 account? (Malicious)
if (result.code == rai::process_result::progress)
{
result.code = pending.epoch == rai::epoch::epoch_0 ? rai::process_result::progress : rai::process_result::unreceivable; // Are we receiving a state-only send? (Malformed)
if (result.code == rai::process_result::progress)
{
rai::account_info source_info;
auto error (ledger.store.account_get (transaction, pending.source, source_info));
assert (!error);
ledger.store.pending_del (transaction, key);
ledger.store.block_put (transaction, hash, block_a);
ledger.change_latest (transaction, block_a.hashables.account, hash, hash, pending.amount.number (), info.block_count + 1);
ledger.store.representation_add (transaction, hash, pending.amount.number ());
ledger.store.frontier_put (transaction, hash, block_a.hashables.account);
result.account = block_a.hashables.account;
result.amount = pending.amount;
ledger.stats.inc (rai::stat::type::ledger, rai::stat::detail::open);
}
}
}
}
}
}
}
}
ledger_processor::ledger_processor (rai::ledger & ledger_a, rai::transaction const & transaction_a, bool valid_signature_a) :
ledger (ledger_a),
transaction (transaction_a),
valid_signature (valid_signature_a)
{
}
} // namespace
size_t rai::shared_ptr_block_hash::operator() (std::shared_ptr<rai::block> const & block_a) const
{
auto hash (block_a->hash ());
auto result (static_cast<size_t> (hash.qwords[0]));
return result;
}
bool rai::shared_ptr_block_hash::operator() (std::shared_ptr<rai::block> const & lhs, std::shared_ptr<rai::block> const & rhs) const
{
return lhs->hash () == rhs->hash ();
}
rai::ledger::ledger (rai::block_store & store_a, rai::stat & stat_a, rai::uint256_union const & epoch_link_a, rai::account const & epoch_signer_a) :
store (store_a),
stats (stat_a),
check_bootstrap_weights (true),
epoch_link (epoch_link_a),
epoch_signer (epoch_signer_a)
{
}
// Balance for account containing hash
rai::uint128_t rai::ledger::balance (rai::transaction const & transaction_a, rai::block_hash const & hash_a)
{
rai::summation_visitor visitor (transaction_a, store);
return visitor.compute_balance (hash_a);
}
// Balance for an account by account number
rai::uint128_t rai::ledger::account_balance (rai::transaction const & transaction_a, rai::account const & account_a)
{
rai::uint128_t result (0);
rai::account_info info;
auto none (store.account_get (transaction_a, account_a, info));
if (!none)
{
result = info.balance.number ();
}
return result;
}
rai::uint128_t rai::ledger::account_pending (rai::transaction const & transaction_a, rai::account const & account_a)
{
rai::uint128_t result (0);
rai::account end (account_a.number () + 1);
for (auto i (store.pending_v0_begin (transaction_a, rai::pending_key (account_a, 0))), n (store.pending_v0_begin (transaction_a, rai::pending_key (end, 0))); i != n; ++i)
{
rai::pending_info info (i->second);
result += info.amount.number ();
}
for (auto i (store.pending_v1_begin (transaction_a, rai::pending_key (account_a, 0))), n (store.pending_v1_begin (transaction_a, rai::pending_key (end, 0))); i != n; ++i)
{
rai::pending_info info (i->second);
result += info.amount.number ();
}
return result;
}
rai::process_return rai::ledger::process (rai::transaction const & transaction_a, rai::block const & block_a, bool valid_signature)
{
ledger_processor processor (*this, transaction_a, valid_signature);
block_a.visit (processor);
return processor.result;
}
rai::block_hash rai::ledger::representative (rai::transaction const & transaction_a, rai::block_hash const & hash_a)
{
auto result (representative_calculated (transaction_a, hash_a));
assert (result.is_zero () || store.block_exists (transaction_a, result));
return result;
}
rai::block_hash rai::ledger::representative_calculated (rai::transaction const & transaction_a, rai::block_hash const & hash_a)
{
representative_visitor visitor (transaction_a, store);
visitor.compute (hash_a);
return visitor.result;
}
bool rai::ledger::block_exists (rai::block_hash const & hash_a)
{
auto transaction (store.tx_begin_read ());
auto result (store.block_exists (transaction, hash_a));
return result;
}
std::string rai::ledger::block_text (char const * hash_a)
{
return block_text (rai::block_hash (hash_a));
}
std::string rai::ledger::block_text (rai::block_hash const & hash_a)
{
std::string result;
auto transaction (store.tx_begin_read ());
auto block (store.block_get (transaction, hash_a));
if (block != nullptr)
{
block->serialize_json (result);
}
return result;
}
bool rai::ledger::is_send (rai::transaction const & transaction_a, rai::state_block const & block_a)
{
bool result (false);
rai::block_hash previous (block_a.hashables.previous);
if (!previous.is_zero ())
{
if (block_a.hashables.balance < balance (transaction_a, previous))
{
result = true;
}
}
return result;
}
rai::block_hash rai::ledger::block_destination (rai::transaction const & transaction_a, rai::block const & block_a)
{
rai::block_hash result (0);
rai::send_block const * send_block (dynamic_cast<rai::send_block const *> (&block_a));
rai::state_block const * state_block (dynamic_cast<rai::state_block const *> (&block_a));
if (send_block != nullptr)
{
result = send_block->hashables.destination;
}
else if (state_block != nullptr && is_send (transaction_a, *state_block))
{
result = state_block->hashables.link;
}
return result;
}
rai::block_hash rai::ledger::block_source (rai::transaction const & transaction_a, rai::block const & block_a)
{
/*
* block_source() requires that the previous block of the block
* passed in exist in the database. This is because it will try
* to check account balances to determine if it is a send block.
*/
assert (block_a.previous ().is_zero () || store.block_exists (transaction_a, block_a.previous ()));
// If block_a.source () is nonzero, then we have our source.
// However, universal blocks will always return zero.
rai::block_hash result (block_a.source ());
rai::state_block const * state_block (dynamic_cast<rai::state_block const *> (&block_a));
if (state_block != nullptr && !is_send (transaction_a, *state_block))
{
result = state_block->hashables.link;
}
return result;
}
// Vote weight of an account
rai::uint128_t rai::ledger::weight (rai::transaction const & transaction_a, rai::account const & account_a)
{
if (check_bootstrap_weights.load ())
{
auto blocks = store.block_count (transaction_a);
if (blocks.sum () < bootstrap_weight_max_blocks)
{
auto weight = bootstrap_weights.find (account_a);
if (weight != bootstrap_weights.end ())
{
return weight->second;
}
}
else
{
check_bootstrap_weights = false;
}
}
return store.representation_get (transaction_a, account_a);
}
// Rollback blocks until `block_a' doesn't exist
void rai::ledger::rollback (rai::transaction const & transaction_a, rai::block_hash const & block_a)
{
assert (store.block_exists (transaction_a, block_a));
auto account_l (account (transaction_a, block_a));
rollback_visitor rollback (transaction_a, *this);
rai::account_info info;
while (store.block_exists (transaction_a, block_a))
{
auto latest_error (store.account_get (transaction_a, account_l, info));
assert (!latest_error);
auto block (store.block_get (transaction_a, info.head));
block->visit (rollback);
}
}
// Return account containing hash
rai::account rai::ledger::account (rai::transaction const & transaction_a, rai::block_hash const & hash_a)
{
rai::account result;
auto hash (hash_a);
rai::block_hash successor (1);
rai::block_info block_info;
auto block (store.block_get (transaction_a, hash));
assert (block);
while (!successor.is_zero () && block->type () != rai::block_type::state && store.block_info_get (transaction_a, successor, block_info))
{
successor = store.block_successor (transaction_a, hash);
if (!successor.is_zero ())
{
hash = successor;
block = store.block_get (transaction_a, hash);
}
}
if (block->type () == rai::block_type::state)
{
auto state_block (dynamic_cast<rai::state_block *> (block.get ()));
result = state_block->hashables.account;
}
else if (successor.is_zero ())
{
result = store.frontier_get (transaction_a, hash);
}
else
{
result = block_info.account;
}
assert (!result.is_zero ());
return result;
}
// Return amount decrease or increase for block
rai::uint128_t rai::ledger::amount (rai::transaction const & transaction_a, rai::block_hash const & hash_a)
{
summation_visitor amount (transaction_a, store);
return amount.compute_amount (hash_a);
}
// Return latest block for account
rai::block_hash rai::ledger::latest (rai::transaction const & transaction_a, rai::account const & account_a)
{
rai::account_info info;
auto latest_error (store.account_get (transaction_a, account_a, info));
return latest_error ? 0 : info.head;
}
// Return latest root for account, account number of there are no blocks for this account.
rai::block_hash rai::ledger::latest_root (rai::transaction const & transaction_a, rai::account const & account_a)
{
rai::account_info info;
auto latest_error (store.account_get (transaction_a, account_a, info));
rai::block_hash result;
if (latest_error)
{
result = account_a;
}
else
{
result = info.head;
}
return result;
}
rai::checksum rai::ledger::checksum (rai::transaction const & transaction_a, rai::account const & begin_a, rai::account const & end_a)
{
rai::checksum result;
auto error (store.checksum_get (transaction_a, 0, 0, result));
assert (!error);
return result;
}
void rai::ledger::dump_account_chain (rai::account const & account_a)
{
auto transaction (store.tx_begin_read ());
auto hash (latest (transaction, account_a));
while (!hash.is_zero ())
{
auto block (store.block_get (transaction, hash));
assert (block != nullptr);
std::cerr << hash.to_string () << std::endl;
hash = block->previous ();
}
}
class block_fit_visitor : public rai::block_visitor
{
public:
block_fit_visitor (rai::ledger & ledger_a, rai::transaction const & transaction_a) :
ledger (ledger_a),
transaction (transaction_a),
result (false)
{
}
void send_block (rai::send_block const & block_a) override
{
result = ledger.store.block_exists (transaction, block_a.previous ());
}
void receive_block (rai::receive_block const & block_a) override
{
result = ledger.store.block_exists (transaction, block_a.previous ());
result &= ledger.store.block_exists (transaction, block_a.source ());
}
void open_block (rai::open_block const & block_a) override
{
result = ledger.store.block_exists (transaction, block_a.source ());
}
void change_block (rai::change_block const & block_a) override
{
result = ledger.store.block_exists (transaction, block_a.previous ());
}
void state_block (rai::state_block const & block_a) override
{
result = block_a.previous ().is_zero () || ledger.store.block_exists (transaction, block_a.previous ());
if (result && !ledger.is_send (transaction, block_a))
{
result &= ledger.store.block_exists (transaction, block_a.hashables.link) || block_a.hashables.link.is_zero () || ledger.is_epoch_link (block_a.hashables.link);
}
}
rai::ledger & ledger;
rai::transaction const & transaction;
bool result;
};
bool rai::ledger::could_fit (rai::transaction const & transaction_a, rai::block const & block_a)
{
block_fit_visitor visitor (*this, transaction_a);
block_a.visit (visitor);
return visitor.result;
}
bool rai::ledger::is_epoch_link (rai::uint256_union const & link_a)
{
return link_a == epoch_link;
}
void rai::ledger::checksum_update (rai::transaction const & transaction_a, rai::block_hash const & hash_a)
{
rai::checksum value;
auto error (store.checksum_get (transaction_a, 0, 0, value));
assert (!error);
value ^= hash_a;
store.checksum_put (transaction_a, 0, 0, value);
}
void rai::ledger::change_latest (rai::transaction const & transaction_a, rai::account const & account_a, rai::block_hash const & hash_a, rai::block_hash const & rep_block_a, rai::amount const & balance_a, uint64_t block_count_a, bool is_state, rai::epoch epoch_a)
{
rai::account_info info;
auto exists (!store.account_get (transaction_a, account_a, info));
if (exists)
{
checksum_update (transaction_a, info.head);
}
else
{
assert (store.block_get (transaction_a, hash_a)->previous ().is_zero ());
info.open_block = hash_a;
}
if (!hash_a.is_zero ())
{
info.head = hash_a;
info.rep_block = rep_block_a;
info.balance = balance_a;
info.modified = rai::seconds_since_epoch ();
info.block_count = block_count_a;
if (exists && info.epoch != epoch_a)
{
// otherwise we'd end up with a duplicate
store.account_del (transaction_a, account_a);
}
info.epoch = epoch_a;
store.account_put (transaction_a, account_a, info);
if (!(block_count_a % store.block_info_max) && !is_state)
{
rai::block_info block_info;
block_info.account = account_a;
block_info.balance = balance_a;
store.block_info_put (transaction_a, hash_a, block_info);
}
checksum_update (transaction_a, hash_a);
}
else
{
store.account_del (transaction_a, account_a);
}
}
std::shared_ptr<rai::block> rai::ledger::successor (rai::transaction const & transaction_a, rai::uint256_union const & root_a)
{
rai::block_hash successor (0);
if (store.account_exists (transaction_a, root_a))
{
rai::account_info info;
auto error (store.account_get (transaction_a, root_a, info));
assert (!error);
successor = info.open_block;
}
else
{
successor = store.block_successor (transaction_a, root_a);
}
std::shared_ptr<rai::block> result;
if (!successor.is_zero ())
{
result = store.block_get (transaction_a, successor);
}
assert (successor.is_zero () || result != nullptr);
return result;
}
std::shared_ptr<rai::block> rai::ledger::forked_block (rai::transaction const & transaction_a, rai::block const & block_a)
{
assert (!store.block_exists (transaction_a, block_a.hash ()));
auto root (block_a.root ());
assert (store.block_exists (transaction_a, root) || store.account_exists (transaction_a, root));
auto result (store.block_get (transaction_a, store.block_successor (transaction_a, root)));
if (result == nullptr)
{
rai::account_info info;
auto error (store.account_get (transaction_a, root, info));
assert (!error);
result = store.block_get (transaction_a, info.open_block);
assert (result != nullptr);
}
return result;
}
| 1 | 14,603 | Wondering if we can directly put types here & for other similar ledger_processor items like rai::block_type::state | nanocurrency-nano-node | cpp |
@@ -296,17 +296,8 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
}
if len(removePacks) != 0 {
- bar = newProgressMax(!gopts.Quiet, uint64(len(removePacks)), "packs deleted")
- bar.Start()
- for packID := range removePacks {
- h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
- err = repo.Backend().Remove(ctx, h)
- if err != nil {
- Warnf("unable to remove file %v from the repository\n", packID.Str())
- }
- bar.Report(restic.Stat{Blobs: 1})
- }
- bar.Done()
+ Verbosef("remove %d old packs\n", len(removePacks))
+ DeleteFiles(gopts, repo, removePacks, restic.DataFile)
}
Verbosef("done\n") | 1 | package main
import (
"fmt"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/index"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
)
var cmdPrune = &cobra.Command{
Use: "prune [flags]",
Short: "Remove unneeded data from the repository",
Long: `
The "prune" command checks the repository and removes data that is not
referenced and therefore not needed any more.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runPrune(globalOptions)
},
}
func init() {
cmdRoot.AddCommand(cmdPrune)
}
func shortenStatus(maxLength int, s string) string {
if len(s) <= maxLength {
return s
}
if maxLength < 3 {
return s[:maxLength]
}
return s[:maxLength-3] + "..."
}
// newProgressMax returns a progress that counts blobs.
func newProgressMax(show bool, max uint64, description string) *restic.Progress {
if !show {
return nil
}
p := restic.NewProgress()
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
status := fmt.Sprintf("[%s] %s %d / %d %s",
formatDuration(d),
formatPercent(s.Blobs, max),
s.Blobs, max, description)
if w := stdoutTerminalWidth(); w > 0 {
status = shortenStatus(w, status)
}
PrintProgress("%s", status)
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\n")
}
return p
}
func runPrune(gopts GlobalOptions) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
// we do not need index updates while pruning!
repo.DisableAutoIndexUpdate()
return pruneRepository(gopts, repo)
}
func mixedBlobs(list []restic.Blob) bool {
var tree, data bool
for _, pb := range list {
switch pb.Type {
case restic.TreeBlob:
tree = true
case restic.DataBlob:
data = true
}
if tree && data {
return true
}
}
return false
}
func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
ctx := gopts.ctx
err := repo.LoadIndex(ctx)
if err != nil {
return err
}
var stats struct {
blobs int
packs int
snapshots int
bytes int64
}
Verbosef("counting files in repo\n")
err = repo.List(ctx, restic.DataFile, func(restic.ID, int64) error {
stats.packs++
return nil
})
if err != nil {
return err
}
Verbosef("building new index for repo\n")
bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
idx, invalidFiles, err := index.New(ctx, repo, restic.NewIDSet(), bar)
if err != nil {
return err
}
for _, id := range invalidFiles {
Warnf("incomplete pack file (will be removed): %v\n", id)
}
blobs := 0
for _, pack := range idx.Packs {
stats.bytes += pack.Size
blobs += len(pack.Entries)
}
Verbosef("repository contains %v packs (%v blobs) with %v\n",
len(idx.Packs), blobs, formatBytes(uint64(stats.bytes)))
blobCount := make(map[restic.BlobHandle]int)
var duplicateBlobs uint64
var duplicateBytes uint64
// find duplicate blobs
for _, p := range idx.Packs {
for _, entry := range p.Entries {
stats.blobs++
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
blobCount[h]++
if blobCount[h] > 1 {
duplicateBlobs++
duplicateBytes += uint64(entry.Length)
}
}
}
Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n",
stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes)))
Verbosef("load all snapshots\n")
// find referenced blobs
snapshots, err := restic.LoadAllSnapshots(ctx, repo)
if err != nil {
return err
}
stats.snapshots = len(snapshots)
Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
usedBlobs := restic.NewBlobSet()
bar = newProgressMax(!gopts.Quiet, uint64(len(snapshots)), "snapshots")
bar.Start()
for _, sn := range snapshots {
debug.Log("process snapshot %v", sn.ID())
err = restic.FindUsedBlobs(ctx, repo, *sn.Tree, usedBlobs)
if err != nil {
if repo.Backend().IsNotExist(err) {
return errors.Fatal("unable to load a tree from the repo: " + err.Error())
}
return err
}
debug.Log("processed snapshot %v", sn.ID())
bar.Report(restic.Stat{Blobs: 1})
}
bar.Done()
if len(usedBlobs) > stats.blobs {
return errors.Fatalf("number of used blobs is larger than number of available blobs!\n" +
"Please report this error (along with the output of the 'prune' run) at\n" +
"https://github.com/restic/restic/issues/new")
}
Verbosef("found %d of %d data blobs still in use, removing %d blobs\n",
len(usedBlobs), stats.blobs, stats.blobs-len(usedBlobs))
// find packs that need a rewrite
rewritePacks := restic.NewIDSet()
for _, pack := range idx.Packs {
if mixedBlobs(pack.Entries) {
rewritePacks.Insert(pack.ID)
continue
}
for _, blob := range pack.Entries {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if !usedBlobs.Has(h) {
rewritePacks.Insert(pack.ID)
continue
}
if blobCount[h] > 1 {
rewritePacks.Insert(pack.ID)
}
}
}
removeBytes := duplicateBytes
// find packs that are unneeded
removePacks := restic.NewIDSet()
Verbosef("will remove %d invalid files\n", len(invalidFiles))
for _, id := range invalidFiles {
removePacks.Insert(id)
}
for packID, p := range idx.Packs {
hasActiveBlob := false
for _, blob := range p.Entries {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if usedBlobs.Has(h) {
hasActiveBlob = true
continue
}
removeBytes += uint64(blob.Length)
}
if hasActiveBlob {
continue
}
removePacks.Insert(packID)
if !rewritePacks.Has(packID) {
return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str())
}
rewritePacks.Delete(packID)
}
Verbosef("will delete %d packs and rewrite %d packs, this frees %s\n",
len(removePacks), len(rewritePacks), formatBytes(uint64(removeBytes)))
var obsoletePacks restic.IDSet
if len(rewritePacks) != 0 {
bar = newProgressMax(!gopts.Quiet, uint64(len(rewritePacks)), "packs rewritten")
bar.Start()
obsoletePacks, err = repository.Repack(ctx, repo, rewritePacks, usedBlobs, bar)
if err != nil {
return err
}
bar.Done()
}
removePacks.Merge(obsoletePacks)
if err = rebuildIndex(ctx, repo, removePacks); err != nil {
return err
}
if len(removePacks) != 0 {
bar = newProgressMax(!gopts.Quiet, uint64(len(removePacks)), "packs deleted")
bar.Start()
for packID := range removePacks {
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
err = repo.Backend().Remove(ctx, h)
if err != nil {
Warnf("unable to remove file %v from the repository\n", packID.Str())
}
bar.Report(restic.Stat{Blobs: 1})
}
bar.Done()
}
Verbosef("done\n")
return nil
}
| 1 | 13,489 | As `DeleteFiles` is not only used for prune this function deserves its own file. Maybe something like `delete_files.go` or `parallel.go`? | restic-restic | go |
@@ -35,7 +35,7 @@ return [
'digits' => ':attribute 必须是 :digits 位的数字。',
'digits_between' => ':attribute 必须是介于 :min 和 :max 位的数字。',
'dimensions' => ':attribute 图片尺寸不正确。',
- 'distinct' => ':attribute 已經存在。',
+ 'distinct' => ':attribute 已经存在。',
'email' => ':attribute 不是一个合法的邮箱。',
'exists' => ':attribute 不存在。',
'file' => ':attribute 必须是文件。', | 1 | <?php
return [
/*
|--------------------------------------------------------------------------
| Validation Language Lines
|--------------------------------------------------------------------------
|
| The following language lines contain the default error messages used by
| the validator class. Some of these rules have multiple versions such
| such as the size rules. Feel free to tweak each of these messages.
|
*/
'accepted' => ':attribute 必须接受。',
'active_url' => ':attribute 不是一个有效的网址。',
'after' => ':attribute 必须是一个在 :date 之后的日期。',
'alpha' => ':attribute 只能由字母组成。',
'alpha_dash' => ':attribute 只能由字母、数字和斜杠组成。',
'alpha_num' => ':attribute 只能由字母和数字组成。',
'array' => ':attribute 必须是一个数组。',
'before' => ':attribute 必须是一个在 :date 之前的日期。',
'between' => [
'numeric' => ':attribute 必须介于 :min - :max 之间。',
'file' => ':attribute 必须介于 :min - :max kb 之间。',
'string' => ':attribute 必须介于 :min - :max 个字符之间。',
'array' => ':attribute 必须只有 :min - :max 个单元。',
],
'boolean' => ':attribute 必须为布尔值。',
'confirmed' => ':attribute 两次输入不一致。',
'date' => ':attribute 不是一个有效的日期。',
'date_format' => ':attribute 的格式必须为 :format。',
'different' => ':attribute 和 :other 必须不同。',
'digits' => ':attribute 必须是 :digits 位的数字。',
'digits_between' => ':attribute 必须是介于 :min 和 :max 位的数字。',
'dimensions' => ':attribute 图片尺寸不正确。',
'distinct' => ':attribute 已經存在。',
'email' => ':attribute 不是一个合法的邮箱。',
'exists' => ':attribute 不存在。',
'file' => ':attribute 必须是文件。',
'filled' => ':attribute 不能为空。',
'image' => ':attribute 必须是图片。',
'in' => '已选的属性 :attribute 非法。',
'in_array' => ':attribute 没有在 :other 中。',
'integer' => ':attribute 必须是整数。',
'ip' => ':attribute 必须是有效的 IP 地址。',
'json' => ':attribute 必须是正确的 JSON 格式。',
'max' => [
'numeric' => ':attribute 不能大于 :max。',
'file' => ':attribute 不能大于 :max kb。',
'string' => ':attribute 不能大于 :max 个字符。',
'array' => ':attribute 最多只有 :max 个单元。',
],
'mimes' => ':attribute 必须是一个 :values 类型的文件。',
'min' => [
'numeric' => ':attribute 必须大于等于 :min。',
'file' => ':attribute 大小不能小于 :min kb。',
'string' => ':attribute 至少为 :min 个字符。',
'array' => ':attribute 至少有 :min 个单元。',
],
'not_in' => '已选的属性 :attribute 非法。',
'numeric' => ':attribute 必须是一个数字。',
'present' => ':attribute 必须存在。',
'regex' => ':attribute 格式不正确。',
'required' => ':attribute 不能为空。',
'required_if' => '当 :other 为 :value 时 :attribute 不能为空。',
'required_unless' => '当 :other 不为 :value 时 :attribute 不能为空。',
'required_with' => '当 :values 存在时 :attribute 不能为空。',
'required_with_all' => '当 :values 存在时 :attribute 不能为空。',
'required_without' => '当 :values 不存在时 :attribute 不能为空。',
'required_without_all' => '当 :values 都不存在时 :attribute 不能为空。',
'same' => ':attribute 和 :other 必须相同。',
'size' => [
'numeric' => ':attribute 大小必须为 :size。',
'file' => ':attribute 大小必须为 :size kb。',
'string' => ':attribute 必须是 :size 个字符。',
'array' => ':attribute 必须为 :size 个单元。',
],
'string' => ':attribute 必须是一个字符串。',
'timezone' => ':attribute 必须是一个合法的时区值。',
'unique' => ':attribute 已经存在。',
'url' => ':attribute 格式不正确。',
/*
|--------------------------------------------------------------------------
| Custom Validation Language Lines
|--------------------------------------------------------------------------
|
| Here you may specify custom validation messages for attributes using the
| convention 'attribute.rule' to name the lines. This makes it quick to
| specify a specific custom language line for a given attribute rule.
|
*/
'custom' => [
'attribute-name' => [
'rule-name' => 'custom-message',
],
],
/*
|--------------------------------------------------------------------------
| Custom Validation Attributes
|--------------------------------------------------------------------------
|
| The following language lines are used to swap attribute place-holders
| with something more reader friendly such as E-Mail Address instead
| of 'email'. This simply helps us make messages a little cleaner.
|
*/
'attributes' => [
'name' => '名称',
'username' => '用户名',
'email' => '邮箱',
'first_name' => '名',
'last_name' => '姓',
'password' => '密码',
'password_confirmation' => '确认密码',
'city' => '城市',
'country' => '国家',
'address' => '地址',
'phone' => '电话',
'mobile' => '手机',
'age' => '年龄',
'sex' => '性别',
'gender' => '性别',
'day' => '天',
'month' => '月',
'year' => '年',
'hour' => '时',
'minute' => '分',
'second' => '秒',
'title' => '标题',
'content' => '内容',
'description' => '描述',
'excerpt' => '摘要',
'date' => '日期',
'time' => '时间',
'available' => '可用的',
'size' => '大小',
],
];
| 1 | 6,671 | It should be simplified character. `` -> `` | Laravel-Lang-lang | php |
@@ -33,5 +33,9 @@ namespace Nethermind.Trie
void VisitLeaf(TrieNode node, TrieVisitContext trieVisitContext, byte[] value = null);
void VisitCode(Keccak codeHash, TrieVisitContext trieVisitContext);
+
+ bool VisitAccounts => true;
+
+ int ParallelLevels => -1;
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Core.Crypto;
namespace Nethermind.Trie
{
public interface ITreeVisitor
{
bool ShouldVisit(Keccak nextNode);
void VisitTree(Keccak rootHash, TrieVisitContext trieVisitContext);
void VisitMissingNode(Keccak nodeHash, TrieVisitContext trieVisitContext);
void VisitBranch(TrieNode node, TrieVisitContext trieVisitContext);
void VisitExtension(TrieNode node, TrieVisitContext trieVisitContext);
void VisitLeaf(TrieNode node, TrieVisitContext trieVisitContext, byte[] value = null);
void VisitCode(Keccak codeHash, TrieVisitContext trieVisitContext);
}
}
| 1 | 25,708 | I am pretty sure it breaks the visitor pattern, visitor should have no knowledge about the structure of what it is visiting or control over visiting mechanism | NethermindEth-nethermind | .cs |
@@ -20,7 +20,7 @@ import java.util.stream.Collector;
import static javaslang.collection.Comparators.naturalComparator;
/**
- * An {@link HashMap}-based implementation of {@link Multimap}
+ * An {@link TreeMap}-based implementation of {@link Multimap}
*
* @param <K> Key type
* @param <V> Value type | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2017 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Tuple2;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Objects;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collector;
import static javaslang.collection.Comparators.naturalComparator;
/**
* An {@link HashMap}-based implementation of {@link Multimap}
*
* @param <K> Key type
* @param <V> Value type
* @author Ruslan Sennov
* @since 2.1.0
*/
public final class TreeMultimap<K, V> extends AbstractMultimap<K, V, TreeMultimap<K, V>> implements Serializable {
private static final long serialVersionUID = 1L;
public static <V> Builder<V> withSeq() {
return new Builder<>(ContainerType.SEQ, List::empty);
}
public static <V> Builder<V> withSet() {
return new Builder<>(ContainerType.SET, HashSet::empty);
}
public static <V extends Comparable<? super V>> Builder<V> withSortedSet() {
return new Builder<>(ContainerType.SORTED_SET, TreeSet::empty);
}
public static <V> Builder<V> withSortedSet(Comparator<? super V> comparator) {
return new Builder<>(ContainerType.SORTED_SET, () -> TreeSet.empty(comparator));
}
public static class Builder<V> {
private final ContainerType containerType;
private final SerializableSupplier<Traversable<?>> emptyContainer;
private Builder(ContainerType containerType, SerializableSupplier<Traversable<?>> emptyContainer) {
this.containerType = containerType;
this.emptyContainer = emptyContainer;
}
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> empty() {
return empty(naturalComparator());
}
public <K, V2 extends V> TreeMultimap<K, V2> empty(Comparator<? super K> keyComparator) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
return new TreeMultimap<>(TreeMap.empty(keyComparator), containerType, emptyContainer);
}
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> ofEntries(Iterable<? extends Tuple2<? extends K, ? extends V2>> entries) {
return ofEntries(naturalComparator(), entries);
}
public <K, V2 extends V> TreeMultimap<K, V2> ofEntries(Comparator<? super K> keyComparator, Iterable<? extends Tuple2<? extends K, ? extends V2>> entries) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
Objects.requireNonNull(entries, "entries is null");
TreeMultimap<K, V2> result = empty(keyComparator);
for (Tuple2<? extends K, ? extends V2> entry : entries) {
result = result.put(entry._1, entry._2);
}
return result;
}
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public final <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> ofEntries(Tuple2<? extends K, ? extends V2>... entries) {
return ofEntries(naturalComparator(), entries);
}
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public final <K, V2 extends V> TreeMultimap<K, V2> ofEntries(Comparator<? super K> keyComparator, Tuple2<? extends K, ? extends V2>... entries) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
Objects.requireNonNull(entries, "entries is null");
TreeMultimap<K, V2> result = empty(keyComparator);
for (Tuple2<? extends K, ? extends V2> entry : entries) {
result = result.put(entry._1, entry._2);
}
return result;
}
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public final <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> ofEntries(java.util.Map.Entry<? extends K, ? extends V2>... entries) {
return ofEntries(naturalComparator(), entries);
}
@SuppressWarnings({ "unchecked", "varargs" })
@SafeVarargs
public final <K, V2 extends V> TreeMultimap<K, V2> ofEntries(Comparator<? super K> keyComparator, java.util.Map.Entry<? extends K, ? extends V2>... entries) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
Objects.requireNonNull(entries, "entries is null");
TreeMultimap<K, V2> result = empty(keyComparator);
for (java.util.Map.Entry<? extends K, ? extends V2> entry : entries) {
result = result.put(entry.getKey(), entry.getValue());
}
return result;
}
@SuppressWarnings("unchecked")
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> tabulate(int n, Function<? super Integer, ? extends Tuple2<? extends K, ? extends V2>> f) {
return tabulate(naturalComparator(), n, f);
}
@SuppressWarnings("unchecked")
public <K, V2 extends V> TreeMultimap<K, V2> tabulate(Comparator<? super K> keyComparator, int n, Function<? super Integer, ? extends Tuple2<? extends K, ? extends V2>> f) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
Objects.requireNonNull(f, "f is null");
return ofEntries(keyComparator, Collections.tabulate(n, (Function<? super Integer, ? extends Tuple2<K, V2>>) f));
}
@SuppressWarnings("unchecked")
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> fill(int n, Supplier<? extends Tuple2<? extends K, ? extends V2>> s) {
return fill(naturalComparator(), n, s);
}
@SuppressWarnings("unchecked")
public <K, V2 extends V> TreeMultimap<K, V2> fill(Comparator<? super K> keyComparator, int n, Supplier<? extends Tuple2<? extends K, ? extends V2>> s) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
Objects.requireNonNull(s, "s is null");
return ofEntries(keyComparator, Collections.fill(n, (Supplier<? extends Tuple2<K, V2>>) s));
}
/**
* Creates a TreeMultimap of the given key-value pair.
*
* @param key A singleton map key.
* @param value A singleton map value.
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K key, V2 value) {
return of(naturalComparator(), key, value);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2) {
return of(naturalComparator(), k1, v1, k2, v2);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7, K k8, V2 v8) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7, K k8, V2 v8, K k9, V2 v9) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param k10 a key for the map
* @param v10 the value for k10
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entries
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7, K k8, V2 v8, K k9, V2 v9, K k10, V2 v10) {
return of(naturalComparator(), k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9, k10, v10);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param <K> The key type
* @param <V2> The value type
* @param entry The key-value pair used to form a new TreeMultimap.
* @return A new Multimap containing the given entry
*/
public <K extends Comparable<? super K>, V2 extends V> TreeMultimap<K, V2> of(Tuple2<? extends K, ? extends V2> entry) {
return of(naturalComparator(), entry);
}
/**
* Creates a TreeMultimap of the given key-value pair.
*
* @param keyComparator The comparator used to sort the entries by their key.
* @param key A singleton map key.
* @param value A singleton map value.
* @param <K> The key type
* @param <V2> The value type
* @return A new Multimap containing the given entry
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K key, V2 value) {
final TreeMultimap<K, V2> e = empty(keyComparator);
return e.put(key, value);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2) {
return of(keyComparator, k1, v1).put(k2, v2);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3) {
return of(keyComparator, k1, v1, k2, v2).put(k3, v3);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4) {
return of(keyComparator, k1, v1, k2, v2, k3, v3).put(k4, v4);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5) {
return of(keyComparator, k1, v1, k2, v2, k3, v3, k4, v4).put(k5, v5);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6) {
return of(keyComparator, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5).put(k6, v6);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7) {
return of(keyComparator, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6).put(k7, v7);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7, K k8, V2 v8) {
return of(keyComparator, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7).put(k8, v8);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7, K k8, V2 v8, K k9, V2 v9) {
return of(keyComparator, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8).put(k9, v9);
}
/**
* Creates a TreeMultimap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param k10 a key for the map
* @param v10 the value for k10
* @param <K> The key type
* @param <V2> The value type
* @param keyComparator The comparator used to sort the entries by their key.
* @return A new Multimap containing the given entries
*/
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, K k1, V2 v1, K k2, V2 v2, K k3, V2 v3, K k4, V2 v4, K k5, V2 v5, K k6, V2 v6, K k7, V2 v7, K k8, V2 v8, K k9, V2 v9, K k10, V2 v10) {
return of(keyComparator, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9).put(k10, v10);
}
public <K, V2 extends V> TreeMultimap<K, V2> of(Comparator<? super K> keyComparator, Tuple2<? extends K, ? extends V2> entry) {
final TreeMultimap<K, V2> e = empty(keyComparator);
return e.put(entry._1, entry._2);
}
public <K extends Comparable<? super K>, V2 extends V> Collector<Tuple2<K, V2>, ArrayList<Tuple2<K, V2>>, TreeMultimap<K, V2>> collector() {
return collector(naturalComparator());
}
public <K, V2 extends V> Collector<Tuple2<K, V2>, ArrayList<Tuple2<K, V2>>, TreeMultimap<K, V2>> collector(Comparator<? super K> keyComparator) {
Objects.requireNonNull(keyComparator, "keyComparator is null");
final Supplier<ArrayList<Tuple2<K, V2>>> supplier = ArrayList::new;
final BiConsumer<ArrayList<Tuple2<K, V2>>, Tuple2<K, V2>> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<Tuple2<K, V2>>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<Tuple2<K, V2>>, TreeMultimap<K, V2>> finisher = list -> ofEntries(keyComparator, list);
return Collector.of(supplier, accumulator, combiner, finisher);
}
}
/**
* Narrows a widened {@code HashMultimap<? extends K, ? extends V>} to {@code HashMultimap<K, V>}
* by performing a type safe-cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param map A {@code Map}.
* @param <K> Key type
* @param <V> Value type
* @return the given {@code multimap} instance as narrowed type {@code Multimap<K, V>}.
*/
@SuppressWarnings("unchecked")
public static <K, V> TreeMultimap<K, V> narrow(TreeMultimap<? extends K, ? extends V> map) {
return (TreeMultimap<K, V>) map;
}
private TreeMultimap(Map<K, Traversable<V>> back, ContainerType containerType, SerializableSupplier<Traversable<?>> emptyContainer) {
super(back, containerType, emptyContainer);
}
@Override
protected <K2, V2> Map<K2, V2> emptyMapSupplier() {
return TreeMap.empty(naturalComparator());
}
@SuppressWarnings("unchecked")
@Override
protected <K2, V2> TreeMultimap<K2, V2> emptyInstance() {
return new TreeMultimap<>(emptyMapSupplier(), getContainerType(), emptyContainer);
}
@Override
protected <K2, V2> TreeMultimap<K2, V2> createFromMap(Map<K2, Traversable<V2>> back) {
return new TreeMultimap<>(back, getContainerType(), emptyContainer);
}
}
| 1 | 11,922 | 'A' instead of 'An' here | vavr-io-vavr | java |
@@ -37,7 +37,7 @@ import (
const (
// DefaultTimeout is the default timeout used to make calls
- DefaultTimeout = 10 * time.Second
+ DefaultTimeout = time.Second * 10
// DefaultLongPollTimeout is the long poll default timeout used to make calls
DefaultLongPollTimeout = time.Minute * 3
) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package frontend
import (
"context"
"time"
"github.com/pborman/uuid"
"go.temporal.io/api/workflowservice/v1"
"google.golang.org/grpc"
"go.temporal.io/server/common"
)
const (
// DefaultTimeout is the default timeout used to make calls
DefaultTimeout = 10 * time.Second
// DefaultLongPollTimeout is the long poll default timeout used to make calls
DefaultLongPollTimeout = time.Minute * 3
)
var _ workflowservice.WorkflowServiceClient = (*clientImpl)(nil)
type clientImpl struct {
timeout time.Duration
longPollTimeout time.Duration
clients common.ClientCache
}
// NewClient creates a new frontend service gRPC client
func NewClient(
timeout time.Duration,
longPollTimeout time.Duration,
clients common.ClientCache,
) workflowservice.WorkflowServiceClient {
return &clientImpl{
timeout: timeout,
longPollTimeout: longPollTimeout,
clients: clients,
}
}
func (c *clientImpl) DeprecateNamespace(
ctx context.Context,
request *workflowservice.DeprecateNamespaceRequest,
opts ...grpc.CallOption,
) (*workflowservice.DeprecateNamespaceResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.DeprecateNamespace(ctx, request, opts...)
}
func (c *clientImpl) DescribeNamespace(
ctx context.Context,
request *workflowservice.DescribeNamespaceRequest,
opts ...grpc.CallOption,
) (*workflowservice.DescribeNamespaceResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.DescribeNamespace(ctx, request, opts...)
}
func (c *clientImpl) DescribeTaskQueue(
ctx context.Context,
request *workflowservice.DescribeTaskQueueRequest,
opts ...grpc.CallOption,
) (*workflowservice.DescribeTaskQueueResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.DescribeTaskQueue(ctx, request, opts...)
}
func (c *clientImpl) DescribeWorkflowExecution(
ctx context.Context,
request *workflowservice.DescribeWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.DescribeWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.DescribeWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) GetWorkflowExecutionHistory(
ctx context.Context,
request *workflowservice.GetWorkflowExecutionHistoryRequest,
opts ...grpc.CallOption,
) (*workflowservice.GetWorkflowExecutionHistoryResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.GetWorkflowExecutionHistory(ctx, request, opts...)
}
func (c *clientImpl) ListArchivedWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListArchivedWorkflowExecutionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.ListArchivedWorkflowExecutionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createLongPollContext(ctx)
defer cancel()
return client.ListArchivedWorkflowExecutions(ctx, request, opts...)
}
func (c *clientImpl) ListClosedWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListClosedWorkflowExecutionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.ListClosedWorkflowExecutionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ListClosedWorkflowExecutions(ctx, request, opts...)
}
func (c *clientImpl) ListNamespaces(
ctx context.Context,
request *workflowservice.ListNamespacesRequest,
opts ...grpc.CallOption,
) (*workflowservice.ListNamespacesResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ListNamespaces(ctx, request, opts...)
}
func (c *clientImpl) ListOpenWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListOpenWorkflowExecutionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.ListOpenWorkflowExecutionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ListOpenWorkflowExecutions(ctx, request, opts...)
}
func (c *clientImpl) ListWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListWorkflowExecutionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.ListWorkflowExecutionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ListWorkflowExecutions(ctx, request, opts...)
}
func (c *clientImpl) ScanWorkflowExecutions(
ctx context.Context,
request *workflowservice.ScanWorkflowExecutionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.ScanWorkflowExecutionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ScanWorkflowExecutions(ctx, request, opts...)
}
func (c *clientImpl) CountWorkflowExecutions(
ctx context.Context,
request *workflowservice.CountWorkflowExecutionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.CountWorkflowExecutionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.CountWorkflowExecutions(ctx, request, opts...)
}
func (c *clientImpl) GetSearchAttributes(
ctx context.Context,
request *workflowservice.GetSearchAttributesRequest,
opts ...grpc.CallOption,
) (*workflowservice.GetSearchAttributesResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.GetSearchAttributes(ctx, request, opts...)
}
func (c *clientImpl) PollActivityTaskQueue(
ctx context.Context,
request *workflowservice.PollActivityTaskQueueRequest,
opts ...grpc.CallOption,
) (*workflowservice.PollActivityTaskQueueResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createLongPollContext(ctx)
defer cancel()
return client.PollActivityTaskQueue(ctx, request, opts...)
}
func (c *clientImpl) PollWorkflowTaskQueue(
ctx context.Context,
request *workflowservice.PollWorkflowTaskQueueRequest,
opts ...grpc.CallOption,
) (*workflowservice.PollWorkflowTaskQueueResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createLongPollContext(ctx)
defer cancel()
return client.PollWorkflowTaskQueue(ctx, request, opts...)
}
func (c *clientImpl) QueryWorkflow(
ctx context.Context,
request *workflowservice.QueryWorkflowRequest,
opts ...grpc.CallOption,
) (*workflowservice.QueryWorkflowResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.QueryWorkflow(ctx, request, opts...)
}
func (c *clientImpl) RecordActivityTaskHeartbeat(
ctx context.Context,
request *workflowservice.RecordActivityTaskHeartbeatRequest,
opts ...grpc.CallOption,
) (*workflowservice.RecordActivityTaskHeartbeatResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RecordActivityTaskHeartbeat(ctx, request, opts...)
}
func (c *clientImpl) RecordActivityTaskHeartbeatById(
ctx context.Context,
request *workflowservice.RecordActivityTaskHeartbeatByIdRequest,
opts ...grpc.CallOption,
) (*workflowservice.RecordActivityTaskHeartbeatByIdResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RecordActivityTaskHeartbeatById(ctx, request, opts...)
}
func (c *clientImpl) RegisterNamespace(
ctx context.Context,
request *workflowservice.RegisterNamespaceRequest,
opts ...grpc.CallOption,
) (*workflowservice.RegisterNamespaceResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RegisterNamespace(ctx, request, opts...)
}
func (c *clientImpl) RequestCancelWorkflowExecution(
ctx context.Context,
request *workflowservice.RequestCancelWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.RequestCancelWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RequestCancelWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) ResetStickyTaskQueue(
ctx context.Context,
request *workflowservice.ResetStickyTaskQueueRequest,
opts ...grpc.CallOption,
) (*workflowservice.ResetStickyTaskQueueResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ResetStickyTaskQueue(ctx, request, opts...)
}
func (c *clientImpl) ResetWorkflowExecution(
ctx context.Context,
request *workflowservice.ResetWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.ResetWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ResetWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) RespondActivityTaskCanceled(
ctx context.Context,
request *workflowservice.RespondActivityTaskCanceledRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondActivityTaskCanceledResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondActivityTaskCanceled(ctx, request, opts...)
}
func (c *clientImpl) RespondActivityTaskCanceledById(
ctx context.Context,
request *workflowservice.RespondActivityTaskCanceledByIdRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondActivityTaskCanceledByIdResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondActivityTaskCanceledById(ctx, request, opts...)
}
func (c *clientImpl) RespondActivityTaskCompleted(
ctx context.Context,
request *workflowservice.RespondActivityTaskCompletedRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondActivityTaskCompletedResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondActivityTaskCompleted(ctx, request, opts...)
}
func (c *clientImpl) RespondActivityTaskCompletedById(
ctx context.Context,
request *workflowservice.RespondActivityTaskCompletedByIdRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondActivityTaskCompletedByIdResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondActivityTaskCompletedById(ctx, request, opts...)
}
func (c *clientImpl) RespondActivityTaskFailed(
ctx context.Context,
request *workflowservice.RespondActivityTaskFailedRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondActivityTaskFailedResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondActivityTaskFailed(ctx, request, opts...)
}
func (c *clientImpl) RespondActivityTaskFailedById(
ctx context.Context,
request *workflowservice.RespondActivityTaskFailedByIdRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondActivityTaskFailedByIdResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondActivityTaskFailedById(ctx, request, opts...)
}
func (c *clientImpl) RespondWorkflowTaskCompleted(
ctx context.Context,
request *workflowservice.RespondWorkflowTaskCompletedRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondWorkflowTaskCompletedResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondWorkflowTaskCompleted(ctx, request, opts...)
}
func (c *clientImpl) RespondWorkflowTaskFailed(
ctx context.Context,
request *workflowservice.RespondWorkflowTaskFailedRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondWorkflowTaskFailedResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondWorkflowTaskFailed(ctx, request, opts...)
}
func (c *clientImpl) RespondQueryTaskCompleted(
ctx context.Context,
request *workflowservice.RespondQueryTaskCompletedRequest,
opts ...grpc.CallOption,
) (*workflowservice.RespondQueryTaskCompletedResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.RespondQueryTaskCompleted(ctx, request, opts...)
}
func (c *clientImpl) SignalWithStartWorkflowExecution(
ctx context.Context,
request *workflowservice.SignalWithStartWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.SignalWithStartWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.SignalWithStartWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) SignalWorkflowExecution(
ctx context.Context,
request *workflowservice.SignalWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.SignalWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.SignalWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) StartWorkflowExecution(
ctx context.Context,
request *workflowservice.StartWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.StartWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.StartWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) TerminateWorkflowExecution(
ctx context.Context,
request *workflowservice.TerminateWorkflowExecutionRequest,
opts ...grpc.CallOption,
) (*workflowservice.TerminateWorkflowExecutionResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.TerminateWorkflowExecution(ctx, request, opts...)
}
func (c *clientImpl) UpdateNamespace(
ctx context.Context,
request *workflowservice.UpdateNamespaceRequest,
opts ...grpc.CallOption,
) (*workflowservice.UpdateNamespaceResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.UpdateNamespace(ctx, request, opts...)
}
func (c *clientImpl) GetClusterInfo(
ctx context.Context,
request *workflowservice.GetClusterInfoRequest,
opts ...grpc.CallOption,
) (*workflowservice.GetClusterInfoResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.GetClusterInfo(ctx, request, opts...)
}
func (c *clientImpl) ListTaskQueuePartitions(
ctx context.Context,
request *workflowservice.ListTaskQueuePartitionsRequest,
opts ...grpc.CallOption,
) (*workflowservice.ListTaskQueuePartitionsResponse, error) {
client, err := c.getRandomClient()
if err != nil {
return nil, err
}
ctx, cancel := c.createContext(ctx)
defer cancel()
return client.ListTaskQueuePartitions(ctx, request, opts...)
}
func (c *clientImpl) createContext(parent context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(parent, c.timeout)
}
func (c *clientImpl) createLongPollContext(parent context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(parent, c.longPollTimeout)
}
func (c *clientImpl) getRandomClient() (workflowservice.WorkflowServiceClient, error) {
// generate a random shard key to do load balancing
key := uuid.New()
client, err := c.clients.GetClientForKey(key)
if err != nil {
return nil, err
}
return client.(workflowservice.WorkflowServiceClient), nil
}
| 1 | 11,778 | Can you flip it back (here and in other places)? It is more natural to represent "10 seconds" as `10 * time.Second`. | temporalio-temporal | go |
@@ -133,6 +133,11 @@ def test_classifier(output, centers, client, listen_port):
centers=centers
)
+ X_1, y_1, w_1, dX_1, dy_1, dw_1 = _create_data(
+ objective='classification',
+ output='array'
+ )
+
params = {
"n_estimators": 10,
"num_leaves": 10 | 1 | # coding: utf-8
"""Tests for lightgbm.dask module"""
import itertools
import os
import socket
import sys
import pytest
if not sys.platform.startswith('linux'):
pytest.skip('lightgbm.dask is currently supported in Linux environments', allow_module_level=True)
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import scipy.sparse
from dask.array.utils import assert_eq
from dask_ml.metrics import accuracy_score, r2_score
from distributed.utils_test import client, cluster_fixture, gen_cluster, loop
from sklearn.datasets import make_blobs, make_regression
from sklearn.utils import check_random_state
import lightgbm
import lightgbm.dask as dlgbm
from .utils import make_ranking
data_output = ['array', 'scipy_csr_matrix', 'dataframe']
data_centers = [[[-4, -4], [4, 4]], [[-4, -4], [4, 4], [-4, 4]]]
group_sizes = [5, 5, 5, 10, 10, 10, 20, 20, 20, 50, 50]
pytestmark = [
pytest.mark.skipif(os.getenv('TASK', '') == 'mpi', reason='Fails to run with MPI interface'),
pytest.mark.skipif(os.getenv('TASK', '') == 'gpu', reason='Fails to run with GPU interface')
]
@pytest.fixture()
def listen_port():
listen_port.port += 10
return listen_port.port
listen_port.port = 13000
def _create_ranking_data(n_samples=100, output='array', chunk_size=50, **kwargs):
X, y, g = make_ranking(n_samples=n_samples, random_state=42, **kwargs)
rnd = np.random.RandomState(42)
w = rnd.rand(X.shape[0]) * 0.01
g_rle = np.array([len(list(grp)) for _, grp in itertools.groupby(g)])
if output == 'dataframe':
# add target, weight, and group to DataFrame so that partitions abide by group boundaries.
X_df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(X.shape[1])])
X = X_df.copy()
X_df = X_df.assign(y=y, g=g, w=w)
# set_index ensures partitions are based on group id.
# See https://stackoverflow.com/questions/49532824/dask-dataframe-split-partitions-based-on-a-column-or-function.
X_df.set_index('g', inplace=True)
dX = dd.from_pandas(X_df, chunksize=chunk_size)
# separate target, weight from features.
dy = dX['y']
dw = dX['w']
dX = dX.drop(columns=['y', 'w'])
dg = dX.index.to_series()
# encode group identifiers into run-length encoding, the format LightGBMRanker is expecting
# so that within each partition, sum(g) = n_samples.
dg = dg.map_partitions(lambda p: p.groupby('g', sort=False).apply(lambda z: z.shape[0]))
elif output == 'array':
# ranking arrays: one chunk per group. Each chunk must include all columns.
p = X.shape[1]
dX, dy, dw, dg = [], [], [], []
for g_idx, rhs in enumerate(np.cumsum(g_rle)):
lhs = rhs - g_rle[g_idx]
dX.append(da.from_array(X[lhs:rhs, :], chunks=(rhs - lhs, p)))
dy.append(da.from_array(y[lhs:rhs]))
dw.append(da.from_array(w[lhs:rhs]))
dg.append(da.from_array(np.array([g_rle[g_idx]])))
dX = da.concatenate(dX, axis=0)
dy = da.concatenate(dy, axis=0)
dw = da.concatenate(dw, axis=0)
dg = da.concatenate(dg, axis=0)
else:
raise ValueError('Ranking data creation only supported for Dask arrays and dataframes')
return X, y, w, g_rle, dX, dy, dw, dg
def _create_data(objective, n_samples=100, centers=2, output='array', chunk_size=50):
if objective == 'classification':
X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42)
elif objective == 'regression':
X, y = make_regression(n_samples=n_samples, random_state=42)
else:
raise ValueError("Unknown objective '%s'" % objective)
rnd = np.random.RandomState(42)
weights = rnd.random(X.shape[0]) * 0.01
if output == 'array':
dX = da.from_array(X, (chunk_size, X.shape[1]))
dy = da.from_array(y, chunk_size)
dw = da.from_array(weights, chunk_size)
elif output == 'dataframe':
X_df = pd.DataFrame(X, columns=['feature_%d' % i for i in range(X.shape[1])])
y_df = pd.Series(y, name='target')
dX = dd.from_pandas(X_df, chunksize=chunk_size)
dy = dd.from_pandas(y_df, chunksize=chunk_size)
dw = dd.from_array(weights, chunksize=chunk_size)
elif output == 'scipy_csr_matrix':
dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(scipy.sparse.csr_matrix)
dy = da.from_array(y, chunks=chunk_size)
dw = da.from_array(weights, chunk_size)
else:
raise ValueError("Unknown output type '%s'" % output)
return X, y, weights, dX, dy, dw
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('centers', data_centers)
def test_classifier(output, centers, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='classification',
output=output,
centers=centers
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
dask_classifier = dlgbm.DaskLGBMClassifier(
time_out=5,
local_listen_port=listen_port,
**params
)
dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw, client=client)
p1 = dask_classifier.predict(dX)
p1_proba = dask_classifier.predict_proba(dX).compute()
s1 = accuracy_score(dy, p1)
p1 = p1.compute()
local_classifier = lightgbm.LGBMClassifier(**params)
local_classifier.fit(X, y, sample_weight=w)
p2 = local_classifier.predict(X)
p2_proba = local_classifier.predict_proba(X)
s2 = local_classifier.score(X, y)
assert_eq(s1, s2)
assert_eq(p1, p2)
assert_eq(y, p1)
assert_eq(y, p2)
assert_eq(p1_proba, p2_proba, atol=0.3)
client.close()
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('centers', data_centers)
def test_classifier_pred_contrib(output, centers, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='classification',
output=output,
centers=centers
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
dask_classifier = dlgbm.DaskLGBMClassifier(
time_out=5,
local_listen_port=listen_port,
tree_learner='data',
**params
)
dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw, client=client)
preds_with_contrib = dask_classifier.predict(dX, pred_contrib=True).compute()
local_classifier = lightgbm.LGBMClassifier(**params)
local_classifier.fit(X, y, sample_weight=w)
local_preds_with_contrib = local_classifier.predict(X, pred_contrib=True)
if output == 'scipy_csr_matrix':
preds_with_contrib = np.array(preds_with_contrib.todense())
# shape depends on whether it is binary or multiclass classification
num_features = dask_classifier.n_features_
num_classes = dask_classifier.n_classes_
if num_classes == 2:
expected_num_cols = num_features + 1
else:
expected_num_cols = (num_features + 1) * num_classes
# * shape depends on whether it is binary or multiclass classification
# * matrix for binary classification is of the form [feature_contrib, base_value],
# for multi-class it's [feat_contrib_class1, base_value_class1, feat_contrib_class2, base_value_class2, etc.]
# * contrib outputs for distributed training are different than from local training, so we can just test
# that the output has the right shape and base values are in the right position
assert preds_with_contrib.shape[1] == expected_num_cols
assert preds_with_contrib.shape == local_preds_with_contrib.shape
if num_classes == 2:
assert len(np.unique(preds_with_contrib[:, num_features]) == 1)
else:
for i in range(num_classes):
base_value_col = num_features * (i + 1) + i
assert len(np.unique(preds_with_contrib[:, base_value_col]) == 1)
def test_training_does_not_fail_on_port_conflicts(client):
_, _, _, dX, dy, dw = _create_data('classification', output='array')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 12400))
dask_classifier = dlgbm.DaskLGBMClassifier(
time_out=5,
local_listen_port=12400,
n_estimators=5,
num_leaves=5
)
for _ in range(5):
dask_classifier.fit(
X=dX,
y=dy,
sample_weight=dw,
client=client
)
assert dask_classifier.booster_
client.close()
def test_classifier_local_predict(client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='classification',
output='array'
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
dask_classifier = dlgbm.DaskLGBMClassifier(
time_out=5,
local_port=listen_port,
**params
)
dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw, client=client)
p1 = dask_classifier.to_local().predict(dX)
local_classifier = lightgbm.LGBMClassifier(**params)
local_classifier.fit(X, y, sample_weight=w)
p2 = local_classifier.predict(X)
assert_eq(p1, p2)
assert_eq(y, p1)
assert_eq(y, p2)
client.close()
@pytest.mark.parametrize('output', data_output)
def test_regressor(output, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='regression',
output=output
)
params = {
"random_state": 42,
"num_leaves": 10
}
dask_regressor = dlgbm.DaskLGBMRegressor(
time_out=5,
local_listen_port=listen_port,
tree='data',
**params
)
dask_regressor = dask_regressor.fit(dX, dy, client=client, sample_weight=dw)
p1 = dask_regressor.predict(dX)
if output != 'dataframe':
s1 = r2_score(dy, p1)
p1 = p1.compute()
local_regressor = lightgbm.LGBMRegressor(**params)
local_regressor.fit(X, y, sample_weight=w)
s2 = local_regressor.score(X, y)
p2 = local_regressor.predict(X)
# Scores should be the same
if output != 'dataframe':
assert_eq(s1, s2, atol=.01)
# Predictions should be roughly the same
assert_eq(y, p1, rtol=1., atol=100.)
assert_eq(y, p2, rtol=1., atol=50.)
client.close()
@pytest.mark.parametrize('output', data_output)
def test_regressor_pred_contrib(output, client, listen_port):
X, y, w, dX, dy, dw = _create_data(
objective='regression',
output=output
)
params = {
"n_estimators": 10,
"num_leaves": 10
}
dask_regressor = dlgbm.DaskLGBMRegressor(
time_out=5,
local_listen_port=listen_port,
tree_learner='data',
**params
)
dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw, client=client)
preds_with_contrib = dask_regressor.predict(dX, pred_contrib=True).compute()
local_regressor = lightgbm.LGBMRegressor(**params)
local_regressor.fit(X, y, sample_weight=w)
local_preds_with_contrib = local_regressor.predict(X, pred_contrib=True)
if output == "scipy_csr_matrix":
preds_with_contrib = np.array(preds_with_contrib.todense())
# contrib outputs for distributed training are different than from local training, so we can just test
# that the output has the right shape and base values are in the right position
num_features = dX.shape[1]
assert preds_with_contrib.shape[1] == num_features + 1
assert preds_with_contrib.shape == local_preds_with_contrib.shape
@pytest.mark.parametrize('output', data_output)
@pytest.mark.parametrize('alpha', [.1, .5, .9])
def test_regressor_quantile(output, client, listen_port, alpha):
X, y, w, dX, dy, dw = _create_data(
objective='regression',
output=output
)
params = {
"objective": "quantile",
"alpha": alpha,
"random_state": 42,
"n_estimators": 10,
"num_leaves": 10
}
dask_regressor = dlgbm.DaskLGBMRegressor(
local_listen_port=listen_port,
tree_learner_type='data_parallel',
**params
)
dask_regressor = dask_regressor.fit(dX, dy, client=client, sample_weight=dw)
p1 = dask_regressor.predict(dX).compute()
q1 = np.count_nonzero(y < p1) / y.shape[0]
local_regressor = lightgbm.LGBMRegressor(**params)
local_regressor.fit(X, y, sample_weight=w)
p2 = local_regressor.predict(X)
q2 = np.count_nonzero(y < p2) / y.shape[0]
# Quantiles should be right
np.testing.assert_allclose(q1, alpha, atol=0.2)
np.testing.assert_allclose(q2, alpha, atol=0.2)
client.close()
def test_regressor_local_predict(client, listen_port):
X, y, _, dX, dy, dw = _create_data('regression', output='array')
dask_regressor = dlgbm.DaskLGBMRegressor(
local_listen_port=listen_port,
random_state=42,
n_estimators=10,
num_leaves=10,
tree_type='data'
)
dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw, client=client)
p1 = dask_regressor.predict(dX)
p2 = dask_regressor.to_local().predict(X)
s1 = r2_score(dy, p1)
p1 = p1.compute()
s2 = dask_regressor.to_local().score(X, y)
# Predictions and scores should be the same
assert_eq(p1, p2)
assert_eq(s1, s2)
client.close()
@pytest.mark.parametrize('output', ['array', 'dataframe'])
@pytest.mark.parametrize('group', [None, group_sizes])
def test_ranker(output, client, listen_port, group):
X, y, w, g, dX, dy, dw, dg = _create_ranking_data(
output=output,
group=group
)
# use many trees + leaves to overfit, help ensure that dask data-parallel strategy matches that of
# serial learner. See https://github.com/microsoft/LightGBM/issues/3292#issuecomment-671288210.
params = {
"random_state": 42,
"n_estimators": 50,
"num_leaves": 20,
"min_child_samples": 1
}
dask_ranker = dlgbm.DaskLGBMRanker(
time_out=5,
local_listen_port=listen_port,
tree_learner_type='data_parallel',
**params
)
dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg, client=client)
rnkvec_dask = dask_ranker.predict(dX)
rnkvec_dask = rnkvec_dask.compute()
local_ranker = lightgbm.LGBMRanker(**params)
local_ranker.fit(X, y, sample_weight=w, group=g)
rnkvec_local = local_ranker.predict(X)
# distributed ranker should be able to rank decently well and should
# have high rank correlation with scores from serial ranker.
dcor = spearmanr(rnkvec_dask, y).correlation
assert dcor > 0.6
assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.75
client.close()
@pytest.mark.parametrize('output', ['array', 'dataframe'])
@pytest.mark.parametrize('group', [None, group_sizes])
def test_ranker_local_predict(output, client, listen_port, group):
X, y, w, g, dX, dy, dw, dg = _create_ranking_data(
output=output,
group=group
)
dask_ranker = dlgbm.DaskLGBMRanker(
time_out=5,
local_listen_port=listen_port,
tree_learner='data',
n_estimators=10,
num_leaves=10,
random_state=42,
min_child_samples=1
)
dask_ranker = dask_ranker.fit(dX, dy, group=dg, client=client)
rnkvec_dask = dask_ranker.predict(dX)
rnkvec_dask = rnkvec_dask.compute()
rnkvec_local = dask_ranker.to_local().predict(X)
# distributed and to-local scores should be the same.
assert_eq(rnkvec_dask, rnkvec_local)
client.close()
def test_find_open_port_works():
worker_ip = '127.0.0.1'
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((worker_ip, 12400))
new_port = dlgbm._find_open_port(
worker_ip=worker_ip,
local_listen_port=12400,
ports_to_skip=set()
)
assert new_port == 12401
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s_1:
s_1.bind((worker_ip, 12400))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s_2:
s_2.bind((worker_ip, 12401))
new_port = dlgbm._find_open_port(
worker_ip=worker_ip,
local_listen_port=12400,
ports_to_skip=set()
)
assert new_port == 12402
@gen_cluster(client=True, timeout=None)
def test_errors(c, s, a, b):
def f(part):
raise Exception('foo')
df = dd.demo.make_timeseries()
df = df.map_partitions(f, meta=df._meta)
with pytest.raises(Exception) as info:
yield dlgbm._train(
client=c,
data=df,
label=df.x,
params={},
model_factory=lightgbm.LGBMClassifier
)
assert 'foo' in str(info.value)
| 1 | 28,063 | Why was this necessary? You should just use the `dask_classifier` defined below this. With this change, you'd only be doing the local predict on arrays each time, but we want to test on all of DataFrame, Array, and sparse matrix. | microsoft-LightGBM | cpp |
@@ -0,0 +1,6 @@
+<div class="text-box-wrapper">
+ <div class="text-box">
+ <h3><%= t('.added_to_github_repo') %></h3>
+ <p>Give it a few seconds for the background job to work, and then <%= link_to 'check out the repo', @purchaseable.github_url %>.</p>
+ </div>
+</div> | 1 | 1 | 6,934 | To me, this is sort of weird to have a completely separate page for this. Can we extract the different stuff (github, videos, downloads) presentation from the top of `app/views/purchases/show.html.erb` and reuse it on this page. I worry that we'll change things with fulfillment or copy and have to change it in multiple places. | thoughtbot-upcase | rb |
|
@@ -1,6 +1,8 @@
package de.danoeh.antennapod.adapter;
import android.content.Context;
+import android.view.View;
+
import androidx.appcompat.app.AlertDialog;
import de.danoeh.antennapod.R; | 1 | package de.danoeh.antennapod.adapter;
import android.content.Context;
import androidx.appcompat.app.AlertDialog;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.storage.StatisticsItem;
import de.danoeh.antennapod.core.util.Converter;
import de.danoeh.antennapod.view.PieChartView;
import java.util.List;
/**
* Adapter for the playback statistics list.
*/
public class PlaybackStatisticsListAdapter extends StatisticsListAdapter {
boolean countAll = true;
public PlaybackStatisticsListAdapter(Context context) {
super(context);
}
public void setCountAll(boolean countAll) {
this.countAll = countAll;
}
@Override
int getHeaderCaptionResourceId() {
return R.string.total_time_listened_to_podcasts;
}
@Override
String getHeaderValue() {
return Converter.shortLocalizedDuration(context, (long) pieChartData.getSum());
}
@Override
PieChartView.PieChartData generateChartData(List<StatisticsItem> statisticsData) {
float[] dataValues = new float[statisticsData.size()];
for (int i = 0; i < statisticsData.size(); i++) {
StatisticsItem item = statisticsData.get(i);
dataValues[i] = countAll ? item.timePlayedCountAll : item.timePlayed;
}
return new PieChartView.PieChartData(dataValues);
}
@Override
void onBindFeedViewHolder(StatisticsHolder holder, StatisticsItem statsItem) {
long time = countAll ? statsItem.timePlayedCountAll : statsItem.timePlayed;
holder.value.setText(Converter.shortLocalizedDuration(context, time));
holder.itemView.setOnClickListener(v -> {
AlertDialog.Builder dialog = new AlertDialog.Builder(context);
dialog.setTitle(statsItem.feed.getTitle());
dialog.setMessage(context.getString(R.string.statistics_details_dialog,
countAll ? statsItem.episodesStartedIncludingMarked : statsItem.episodesStarted,
statsItem.episodes, Converter.shortLocalizedDuration(context,
countAll ? statsItem.timePlayedCountAll : statsItem.timePlayed),
Converter.shortLocalizedDuration(context, statsItem.time)));
dialog.setPositiveButton(android.R.string.ok, null);
dialog.show();
});
}
}
| 1 | 17,483 | Please revert the lines above. Not touching unrelated code keeps the git history clean. | AntennaPod-AntennaPod | java |
@@ -0,0 +1,3 @@
+# Copyright (c) Open-MMLab. All rights reserved.
+
+__version__ = '2.3.0rc0' | 1 | 1 | 20,923 | `short_version` and `version_info` | open-mmlab-mmdetection | py |
|
@@ -52,7 +52,7 @@ type TaskStateChange struct {
Reason string
// Task is a pointer to the task involved in the state change that gives the event handler a hook into storing
- // what status was sent. This is used to ensure the same event is handled only once.
+ // what stats was sent. This is used to ensure the same event is handled only once.
Task *Task
}
| 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"fmt"
"strconv"
)
// ContainerStateChange represents a state change that needs to be sent to the
// SubmitContainerStateChange API
type ContainerStateChange struct {
// TaskArn is the unique identifier for the task
TaskArn string
// ContainerName is the name of the container
ContainerName string
// Status is the status to send
Status ContainerStatus
// Reason may contain details of why the container stopped
Reason string
// ExitCode is the exit code of the container, if available
ExitCode *int
// PortBindings are the details of the host ports picked for the specified
// container ports
PortBindings []PortBinding
// Container is a pointer to the container involved in the state change that gives the event handler a hook into
// storing what status was sent. This is used to ensure the same event is handled only once.
Container *Container
}
// TaskStateChange represents a state change that needs to be sent to the
// SubmitTaskStateChange API
type TaskStateChange struct {
// TaskArn is the unique identifier for the task
TaskArn string
// Status is the status to send
Status TaskStatus
// Reason may contain details of why the task stopped
Reason string
// Task is a pointer to the task involved in the state change that gives the event handler a hook into storing
// what status was sent. This is used to ensure the same event is handled only once.
Task *Task
}
// String returns a human readable string representation of this object
func (c *ContainerStateChange) String() string {
res := fmt.Sprintf("%s %s -> %s", c.TaskArn, c.ContainerName, c.Status.String())
if c.ExitCode != nil {
res += ", Exit " + strconv.Itoa(*c.ExitCode) + ", "
}
if c.Reason != "" {
res += ", Reason " + c.Reason
}
if len(c.PortBindings) != 0 {
res += fmt.Sprintf(", Ports %v", c.PortBindings)
}
if c.Container != nil {
res += ", Known Sent: " + c.Container.GetSentStatus().String()
}
return res
}
// String returns a human readable string representation of this object
func (t *TaskStateChange) String() string {
res := fmt.Sprintf("%s -> %s", t.TaskArn, t.Status.String())
if t.Task != nil {
res += ", Known Sent: " + t.Task.GetSentStatus().String()
}
return res
}
| 1 | 15,044 | This is supposed to be `status`. Why did you change this? | aws-amazon-ecs-agent | go |
@@ -1,3 +1,6 @@
+// Copyright 2012-2017 Apcera Inc. All rights reserved.
+// Copyright 2018 Synadia Communications Inc. All rights reserved.
+
package server
import ( | 1 | package server
import (
"fmt"
"runtime"
"strings"
"sync"
"testing"
dbg "runtime/debug"
)
func stackFatalf(t *testing.T, f string, args ...interface{}) {
lines := make([]string, 0, 32)
msg := fmt.Sprintf(f, args...)
lines = append(lines, msg)
// Generate the Stack of callers: Skip us and verify* frames.
for i := 2; true; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
msg := fmt.Sprintf("%d - %s:%d", i, file, line)
lines = append(lines, msg)
}
t.Fatalf("%s", strings.Join(lines, "\n"))
}
func verifyCount(s *Sublist, count uint32, t *testing.T) {
if s.Count() != count {
stackFatalf(t, "Count is %d, should be %d", s.Count(), count)
}
}
func verifyLen(r []*subscription, l int, t *testing.T) {
if len(r) != l {
stackFatalf(t, "Results len is %d, should be %d", len(r), l)
}
}
func verifyQLen(r [][]*subscription, l int, t *testing.T) {
if len(r) != l {
stackFatalf(t, "Queue Results len is %d, should be %d", len(r), l)
}
}
func verifyNumLevels(s *Sublist, expected int, t *testing.T) {
dl := s.numLevels()
if dl != expected {
stackFatalf(t, "NumLevels is %d, should be %d", dl, expected)
}
}
func verifyMember(r []*subscription, val *subscription, t *testing.T) {
for _, v := range r {
if v == nil {
continue
}
if v == val {
return
}
}
stackFatalf(t, "Value '%+v' not found in results", val)
}
// Helpera to generate test subscriptions.
func newSub(subject string) *subscription {
return &subscription{subject: []byte(subject)}
}
func newQSub(subject, queue string) *subscription {
return &subscription{subject: []byte(subject), queue: []byte(queue)}
}
func TestSublistInit(t *testing.T) {
s := NewSublist()
verifyCount(s, 0, t)
}
func TestSublistInsertCount(t *testing.T) {
s := NewSublist()
s.Insert(newSub("foo"))
s.Insert(newSub("bar"))
s.Insert(newSub("foo.bar"))
verifyCount(s, 3, t)
}
func TestSublistSimple(t *testing.T) {
s := NewSublist()
subject := "foo"
sub := newSub(subject)
s.Insert(sub)
r := s.Match(subject)
verifyLen(r.psubs, 1, t)
verifyMember(r.psubs, sub, t)
}
func TestSublistSimpleMultiTokens(t *testing.T) {
s := NewSublist()
subject := "foo.bar.baz"
sub := newSub(subject)
s.Insert(sub)
r := s.Match(subject)
verifyLen(r.psubs, 1, t)
verifyMember(r.psubs, sub, t)
}
func TestSublistPartialWildcard(t *testing.T) {
s := NewSublist()
lsub := newSub("a.b.c")
psub := newSub("a.*.c")
s.Insert(lsub)
s.Insert(psub)
r := s.Match("a.b.c")
verifyLen(r.psubs, 2, t)
verifyMember(r.psubs, lsub, t)
verifyMember(r.psubs, psub, t)
}
func TestSublistPartialWildcardAtEnd(t *testing.T) {
s := NewSublist()
lsub := newSub("a.b.c")
psub := newSub("a.b.*")
s.Insert(lsub)
s.Insert(psub)
r := s.Match("a.b.c")
verifyLen(r.psubs, 2, t)
verifyMember(r.psubs, lsub, t)
verifyMember(r.psubs, psub, t)
}
func TestSublistFullWildcard(t *testing.T) {
s := NewSublist()
lsub := newSub("a.b.c")
fsub := newSub("a.>")
s.Insert(lsub)
s.Insert(fsub)
r := s.Match("a.b.c")
verifyLen(r.psubs, 2, t)
verifyMember(r.psubs, lsub, t)
verifyMember(r.psubs, fsub, t)
}
func TestSublistRemove(t *testing.T) {
s := NewSublist()
subject := "a.b.c.d"
sub := newSub(subject)
s.Insert(sub)
verifyCount(s, 1, t)
r := s.Match(subject)
verifyLen(r.psubs, 1, t)
s.Remove(newSub("a.b.c"))
verifyCount(s, 1, t)
s.Remove(sub)
verifyCount(s, 0, t)
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
}
func TestSublistRemoveWildcard(t *testing.T) {
s := NewSublist()
subject := "a.b.c.d"
sub := newSub(subject)
psub := newSub("a.b.*.d")
fsub := newSub("a.b.>")
s.Insert(sub)
s.Insert(psub)
s.Insert(fsub)
verifyCount(s, 3, t)
r := s.Match(subject)
verifyLen(r.psubs, 3, t)
s.Remove(sub)
verifyCount(s, 2, t)
s.Remove(fsub)
verifyCount(s, 1, t)
s.Remove(psub)
verifyCount(s, 0, t)
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
}
func TestSublistRemoveCleanup(t *testing.T) {
s := NewSublist()
literal := "a.b.c.d.e.f"
depth := len(strings.Split(literal, tsep))
sub := newSub(literal)
verifyNumLevels(s, 0, t)
s.Insert(sub)
verifyNumLevels(s, depth, t)
s.Remove(sub)
verifyNumLevels(s, 0, t)
}
func TestSublistRemoveCleanupWildcards(t *testing.T) {
s := NewSublist()
subject := "a.b.*.d.e.>"
depth := len(strings.Split(subject, tsep))
sub := newSub(subject)
verifyNumLevels(s, 0, t)
s.Insert(sub)
verifyNumLevels(s, depth, t)
s.Remove(sub)
verifyNumLevels(s, 0, t)
}
func TestSublistInvalidSubjectsInsert(t *testing.T) {
s := NewSublist()
// Insert, or subscriptions, can have wildcards, but not empty tokens,
// and can not have a FWC that is not the terminal token.
// beginning empty token
if err := s.Insert(newSub(".foo")); err != ErrInvalidSubject {
t.Fatal("Expected invalid subject error")
}
// trailing empty token
if err := s.Insert(newSub("foo.")); err != ErrInvalidSubject {
t.Fatal("Expected invalid subject error")
}
// empty middle token
if err := s.Insert(newSub("foo..bar")); err != ErrInvalidSubject {
t.Fatal("Expected invalid subject error")
}
// empty middle token #2
if err := s.Insert(newSub("foo.bar..baz")); err != ErrInvalidSubject {
t.Fatal("Expected invalid subject error")
}
// fwc not terminal
if err := s.Insert(newSub("foo.>.bar")); err != ErrInvalidSubject {
t.Fatal("Expected invalid subject error")
}
}
func TestSublistCache(t *testing.T) {
s := NewSublist()
// Test add a remove logistics
subject := "a.b.c.d"
sub := newSub(subject)
psub := newSub("a.b.*.d")
fsub := newSub("a.b.>")
s.Insert(sub)
r := s.Match(subject)
verifyLen(r.psubs, 1, t)
s.Insert(psub)
s.Insert(fsub)
verifyCount(s, 3, t)
r = s.Match(subject)
verifyLen(r.psubs, 3, t)
s.Remove(sub)
verifyCount(s, 2, t)
s.Remove(fsub)
verifyCount(s, 1, t)
s.Remove(psub)
verifyCount(s, 0, t)
// Check that cache is now empty
if cc := s.CacheCount(); cc != 0 {
t.Fatalf("Cache should be zero, got %d\n", cc)
}
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
for i := 0; i < 2*slCacheMax; i++ {
s.Match(fmt.Sprintf("foo-%d\n", i))
}
if cc := s.CacheCount(); cc > slCacheMax {
t.Fatalf("Cache should be constrained by cacheMax, got %d for current count\n", cc)
}
}
func TestSublistBasicQueueResults(t *testing.T) {
s := NewSublist()
// Test some basics
subject := "foo"
sub := newSub(subject)
sub1 := newQSub(subject, "bar")
sub2 := newQSub(subject, "baz")
s.Insert(sub1)
r := s.Match(subject)
verifyLen(r.psubs, 0, t)
verifyQLen(r.qsubs, 1, t)
verifyLen(r.qsubs[0], 1, t)
verifyMember(r.qsubs[0], sub1, t)
s.Insert(sub2)
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
verifyQLen(r.qsubs, 2, t)
verifyLen(r.qsubs[0], 1, t)
verifyLen(r.qsubs[1], 1, t)
verifyMember(r.qsubs[0], sub1, t)
verifyMember(r.qsubs[1], sub2, t)
s.Insert(sub)
r = s.Match(subject)
verifyLen(r.psubs, 1, t)
verifyQLen(r.qsubs, 2, t)
verifyLen(r.qsubs[0], 1, t)
verifyLen(r.qsubs[1], 1, t)
verifyMember(r.qsubs[0], sub1, t)
verifyMember(r.qsubs[1], sub2, t)
verifyMember(r.psubs, sub, t)
s.Insert(sub1)
s.Insert(sub2)
r = s.Match(subject)
verifyLen(r.psubs, 1, t)
verifyQLen(r.qsubs, 2, t)
verifyLen(r.qsubs[0], 2, t)
verifyLen(r.qsubs[1], 2, t)
verifyMember(r.qsubs[0], sub1, t)
verifyMember(r.qsubs[1], sub2, t)
verifyMember(r.psubs, sub, t)
// Now removal
s.Remove(sub)
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
verifyQLen(r.qsubs, 2, t)
verifyLen(r.qsubs[0], 2, t)
verifyLen(r.qsubs[1], 2, t)
verifyMember(r.qsubs[0], sub1, t)
verifyMember(r.qsubs[1], sub2, t)
s.Remove(sub1)
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
verifyQLen(r.qsubs, 2, t)
verifyLen(r.qsubs[0], 1, t)
verifyLen(r.qsubs[1], 2, t)
verifyMember(r.qsubs[0], sub1, t)
verifyMember(r.qsubs[1], sub2, t)
s.Remove(sub1) // Last one
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
verifyQLen(r.qsubs, 1, t)
verifyLen(r.qsubs[0], 2, t) // this is sub2/baz now
verifyMember(r.qsubs[0], sub2, t)
s.Remove(sub2)
s.Remove(sub2)
r = s.Match(subject)
verifyLen(r.psubs, 0, t)
verifyQLen(r.qsubs, 0, t)
}
func checkBool(b, expected bool, t *testing.T) {
if b != expected {
dbg.PrintStack()
t.Fatalf("Expected %v, but got %v\n", expected, b)
}
}
func TestSublistValidLiteralSubjects(t *testing.T) {
checkBool(IsValidLiteralSubject("foo"), true, t)
checkBool(IsValidLiteralSubject(".foo"), false, t)
checkBool(IsValidLiteralSubject("foo."), false, t)
checkBool(IsValidLiteralSubject("foo..bar"), false, t)
checkBool(IsValidLiteralSubject("foo.bar.*"), false, t)
checkBool(IsValidLiteralSubject("foo.bar.>"), false, t)
checkBool(IsValidLiteralSubject("*"), false, t)
checkBool(IsValidLiteralSubject(">"), false, t)
// The followings have widlcards characters but are not
// considered as such because they are not individual tokens.
checkBool(IsValidLiteralSubject("foo*"), true, t)
checkBool(IsValidLiteralSubject("foo**"), true, t)
checkBool(IsValidLiteralSubject("foo.**"), true, t)
checkBool(IsValidLiteralSubject("foo*bar"), true, t)
checkBool(IsValidLiteralSubject("foo.*bar"), true, t)
checkBool(IsValidLiteralSubject("foo*.bar"), true, t)
checkBool(IsValidLiteralSubject("*bar"), true, t)
checkBool(IsValidLiteralSubject("foo>"), true, t)
checkBool(IsValidLiteralSubject("foo>>"), true, t)
checkBool(IsValidLiteralSubject("foo.>>"), true, t)
checkBool(IsValidLiteralSubject("foo>bar"), true, t)
checkBool(IsValidLiteralSubject("foo.>bar"), true, t)
checkBool(IsValidLiteralSubject("foo>.bar"), true, t)
checkBool(IsValidLiteralSubject(">bar"), true, t)
}
func TestSublistValidlSubjects(t *testing.T) {
checkBool(IsValidSubject("."), false, t)
checkBool(IsValidSubject(".foo"), false, t)
checkBool(IsValidSubject("foo."), false, t)
checkBool(IsValidSubject("foo..bar"), false, t)
checkBool(IsValidSubject(">.bar"), false, t)
checkBool(IsValidSubject("foo.>.bar"), false, t)
checkBool(IsValidSubject("foo"), true, t)
checkBool(IsValidSubject("foo.bar.*"), true, t)
checkBool(IsValidSubject("foo.bar.>"), true, t)
checkBool(IsValidSubject("*"), true, t)
checkBool(IsValidSubject(">"), true, t)
checkBool(IsValidSubject("foo*"), true, t)
checkBool(IsValidSubject("foo**"), true, t)
checkBool(IsValidSubject("foo.**"), true, t)
checkBool(IsValidSubject("foo*bar"), true, t)
checkBool(IsValidSubject("foo.*bar"), true, t)
checkBool(IsValidSubject("foo*.bar"), true, t)
checkBool(IsValidSubject("*bar"), true, t)
checkBool(IsValidSubject("foo>"), true, t)
checkBool(IsValidSubject("foo.>>"), true, t)
checkBool(IsValidSubject("foo>bar"), true, t)
checkBool(IsValidSubject("foo.>bar"), true, t)
checkBool(IsValidSubject("foo>.bar"), true, t)
checkBool(IsValidSubject(">bar"), true, t)
}
func TestSublistMatchLiterals(t *testing.T) {
checkBool(matchLiteral("foo", "foo"), true, t)
checkBool(matchLiteral("foo", "bar"), false, t)
checkBool(matchLiteral("foo", "*"), true, t)
checkBool(matchLiteral("foo", ">"), true, t)
checkBool(matchLiteral("foo.bar", ">"), true, t)
checkBool(matchLiteral("foo.bar", "foo.>"), true, t)
checkBool(matchLiteral("foo.bar", "bar.>"), false, t)
checkBool(matchLiteral("stats.test.22", "stats.>"), true, t)
checkBool(matchLiteral("stats.test.22", "stats.*.*"), true, t)
checkBool(matchLiteral("foo.bar", "foo"), false, t)
checkBool(matchLiteral("stats.test.foos", "stats.test.foos"), true, t)
checkBool(matchLiteral("stats.test.foos", "stats.test.foo"), false, t)
checkBool(matchLiteral("stats.test", "stats.test.*"), false, t)
checkBool(matchLiteral("stats.test.foos", "stats.*"), false, t)
checkBool(matchLiteral("stats.test.foos", "stats.*.*.foos"), false, t)
// These are cases where wildcards characters should not be considered
// wildcards since they do not follow the rules of wildcards.
checkBool(matchLiteral("*bar", "*bar"), true, t)
checkBool(matchLiteral("foo*", "foo*"), true, t)
checkBool(matchLiteral("foo*bar", "foo*bar"), true, t)
checkBool(matchLiteral("foo.***.bar", "foo.***.bar"), true, t)
checkBool(matchLiteral(">bar", ">bar"), true, t)
checkBool(matchLiteral("foo>", "foo>"), true, t)
checkBool(matchLiteral("foo>bar", "foo>bar"), true, t)
checkBool(matchLiteral("foo.>>>.bar", "foo.>>>.bar"), true, t)
}
func TestSublistBadSubjectOnRemove(t *testing.T) {
bad := "a.b..d"
sub := newSub(bad)
s := NewSublist()
if err := s.Insert(sub); err != ErrInvalidSubject {
t.Fatalf("Expected ErrInvalidSubject, got %v\n", err)
}
if err := s.Remove(sub); err != ErrInvalidSubject {
t.Fatalf("Expected ErrInvalidSubject, got %v\n", err)
}
badfwc := "a.>.b"
if err := s.Remove(newSub(badfwc)); err != ErrInvalidSubject {
t.Fatalf("Expected ErrInvalidSubject, got %v\n", err)
}
}
// This is from bug report #18
func TestSublistTwoTokenPubMatchSingleTokenSub(t *testing.T) {
s := NewSublist()
sub := newSub("foo")
s.Insert(sub)
r := s.Match("foo")
verifyLen(r.psubs, 1, t)
verifyMember(r.psubs, sub, t)
r = s.Match("foo.bar")
verifyLen(r.psubs, 0, t)
}
func TestSublistInsertWithWildcardsAsLiterals(t *testing.T) {
s := NewSublist()
subjects := []string{"foo.*-", "foo.>-"}
for _, subject := range subjects {
sub := newSub(subject)
s.Insert(sub)
// Should find no match
r := s.Match("foo.bar")
verifyLen(r.psubs, 0, t)
// Should find a match
r = s.Match(subject)
verifyLen(r.psubs, 1, t)
}
}
func TestSublistRemoveWithWildcardsAsLiterals(t *testing.T) {
s := NewSublist()
subjects := []string{"foo.*-", "foo.>-"}
for _, subject := range subjects {
sub := newSub(subject)
s.Insert(sub)
// Should find no match
rsub := newSub("foo.bar")
s.Remove(rsub)
if c := s.Count(); c != 1 {
t.Fatalf("Expected sublist to still contain sub, got %v", c)
}
s.Remove(sub)
if c := s.Count(); c != 0 {
t.Fatalf("Expected sublist to be empty, got %v", c)
}
}
}
// -- Benchmarks Setup --
var subs []*subscription
var toks = []string{"apcera", "continuum", "component", "router", "api", "imgr", "jmgr", "auth"}
var sl = NewSublist()
func init() {
subs = make([]*subscription, 0, 256*1024)
subsInit("")
for i := 0; i < len(subs); i++ {
sl.Insert(subs[i])
}
addWildcards()
}
func subsInit(pre string) {
var sub string
for _, t := range toks {
if len(pre) > 0 {
sub = pre + tsep + t
} else {
sub = t
}
subs = append(subs, newSub(sub))
if len(strings.Split(sub, tsep)) < 5 {
subsInit(sub)
}
}
}
func addWildcards() {
sl.Insert(newSub("cloud.>"))
sl.Insert(newSub("cloud.continuum.component.>"))
sl.Insert(newSub("cloud.*.*.router.*"))
}
// -- Benchmarks Setup End --
func Benchmark______________________SublistInsert(b *testing.B) {
s := NewSublist()
for i, l := 0, len(subs); i < b.N; i++ {
index := i % l
s.Insert(subs[index])
}
}
func Benchmark____________SublistMatchSingleToken(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("apcera")
}
}
func Benchmark______________SublistMatchTwoTokens(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("apcera.continuum")
}
}
func Benchmark____________SublistMatchThreeTokens(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("apcera.continuum.component")
}
}
func Benchmark_____________SublistMatchFourTokens(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("apcera.continuum.component.router")
}
}
func Benchmark_SublistMatchFourTokensSingleResult(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("apcera.continuum.component.router")
}
}
func Benchmark_SublistMatchFourTokensMultiResults(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("cloud.continuum.component.router")
}
}
func Benchmark_______SublistMissOnLastTokenOfFive(b *testing.B) {
for i := 0; i < b.N; i++ {
sl.Match("apcera.continuum.component.router.ZZZZ")
}
}
func multiRead(b *testing.B, num int) {
b.StopTimer()
var swg, fwg sync.WaitGroup
swg.Add(num)
fwg.Add(num)
s := "apcera.continuum.component.router"
for i := 0; i < num; i++ {
go func() {
swg.Done()
swg.Wait()
for i := 0; i < b.N; i++ {
sl.Match(s)
}
fwg.Done()
}()
}
swg.Wait()
b.StartTimer()
fwg.Wait()
}
func Benchmark_____________Sublist10XMultipleReads(b *testing.B) {
multiRead(b, 10)
}
func Benchmark____________Sublist100XMultipleReads(b *testing.B) {
multiRead(b, 100)
}
func Benchmark_SublistMatchLiteral(b *testing.B) {
b.StopTimer()
cachedSubj := "foo.foo.foo.foo.foo.foo.foo.foo.foo.foo"
subjects := []string{
"foo.foo.foo.foo.foo.foo.foo.foo.foo.foo",
"foo.foo.foo.foo.foo.foo.foo.foo.foo.>",
"foo.foo.foo.foo.foo.foo.foo.foo.>",
"foo.foo.foo.foo.foo.foo.foo.>",
"foo.foo.foo.foo.foo.foo.>",
"foo.foo.foo.foo.foo.>",
"foo.foo.foo.foo.>",
"foo.foo.foo.>",
"foo.foo.>",
"foo.>",
">",
"foo.foo.foo.foo.foo.foo.foo.foo.foo.*",
"foo.foo.foo.foo.foo.foo.foo.foo.*.*",
"foo.foo.foo.foo.foo.foo.foo.*.*.*",
"foo.foo.foo.foo.foo.foo.*.*.*.*",
"foo.foo.foo.foo.foo.*.*.*.*.*",
"foo.foo.foo.foo.*.*.*.*.*.*",
"foo.foo.foo.*.*.*.*.*.*.*",
"foo.foo.*.*.*.*.*.*.*.*",
"foo.*.*.*.*.*.*.*.*.*",
"*.*.*.*.*.*.*.*.*.*",
}
b.StartTimer()
for i := 0; i < b.N; i++ {
for _, subject := range subjects {
if !matchLiteral(cachedSubj, subject) {
b.Fatalf("Subject %q no match with %q", cachedSubj, subject)
}
}
}
}
| 1 | 7,491 | I will change all of these at some point next week. | nats-io-nats-server | go |
@@ -42,7 +42,7 @@ namespace Datadog.Trace.ClrProfiler
/// </summary>
public string TargetAssembly
{
- get => throw new NotSupportedException("Use property TargetAssemblies instead of TargetAssembly.");
+ get => string.Empty;
set => TargetAssemblies = new[] { value };
}
| 1 | using System;
using System.Linq;
namespace Datadog.Trace.ClrProfiler
{
/// <summary>
/// Attribute that indicates that the decorated method is meant to intercept calls
/// to another method. Used to generate the integration definitions file.
/// </summary>
[AttributeUsage(AttributeTargets.Method, AllowMultiple = true, Inherited = false)]
public class InterceptMethodAttribute : Attribute
{
/// <summary>
/// Gets or sets the name of the integration.
/// </summary>
/// <remarks>
/// Multiple method replacements with the same integration name are grouped together.
/// </remarks>
public string Integration { get; set; }
/// <summary>
/// Gets or sets the name of the assembly where calls to the target method are searched.
/// If null, search in all loaded assemblies.
/// </summary>
public string CallerAssembly { get; set; }
/// <summary>
/// Gets or sets the name of the type where calls to the target method are searched.
/// If null, search in all types defined in loaded assemblies.
/// </summary>
public string CallerType { get; set; }
/// <summary>
/// Gets or sets the name of the method where calls to the target method are searched.
/// If null, search in all loaded types.
/// </summary>
public string CallerMethod { get; set; }
/// <summary>
/// Gets or sets the name of the assembly that contains the target method to be intercepted.
/// Required if <see cref="TargetAssemblies"/> is not set.
/// </summary>
public string TargetAssembly
{
get => throw new NotSupportedException("Use property TargetAssemblies instead of TargetAssembly.");
set => TargetAssemblies = new[] { value };
}
/// <summary>
/// Gets or sets the name of the assemblies that contain the target method to be intercepted.
/// Required if <see cref="TargetAssembly"/> is not set.
/// </summary>
public string[] TargetAssemblies { get; set; }
/// <summary>
/// Gets or sets the name of the type that contains the target method to be intercepted.
/// Required.
/// </summary>
public string TargetType { get; set; }
/// <summary>
/// Gets or sets the name of the target method to be intercepted.
/// If null, default to the name of the decorated method.
/// </summary>
public string TargetMethod { get; set; }
/// <summary>
/// Gets or sets the method signature that is matched to the target method to be intercepted.
/// If null, signature check is not done.
/// </summary>
public string TargetSignature { get; set; }
/// <summary>
/// Gets or sets the explicit type array for the target method to be intercepted.
/// This is a required field.
/// Follows format:
/// new[] { return_type, param_1_type, param_2_type, ..., param_n_type }
/// Must match the wrapper method in count:
/// n (parameters) + 1 (return type) - (is_instance_method : 1 : 0) - 1 (opcode)
/// Indexes with "_" are ignored for comparison purposes.
/// </summary>
public string[] TargetSignatureTypes { get; set; }
/// <summary>
/// Gets the target version range for <see cref="TargetAssembly"/>.
/// </summary>
public IntegrationVersionRange TargetVersionRange { get; } = new IntegrationVersionRange();
/// <summary>
/// Gets or sets the target minimum version.
/// </summary>
public string TargetMinimumVersion
{
get => TargetVersionRange.MinimumVersion;
set => TargetVersionRange.MinimumVersion = value;
}
/// <summary>
/// Gets or sets the target maximum version.
/// </summary>
public string TargetMaximumVersion
{
get => TargetVersionRange.MaximumVersion;
set => TargetVersionRange.MaximumVersion = value;
}
}
}
| 1 | 16,400 | I made this change because, while trying to debug, in Visual Studio, the `IntegrationSignatureTests`, this property getter was hit and interfered with my ability to complete the debugging. | DataDog-dd-trace-dotnet | .cs |
@@ -106,7 +106,8 @@ func determineDeploymentHealth(obj *unstructured.Unstructured) (status model.Kub
d := &appsv1.Deployment{}
err := scheme.Scheme.Convert(obj, d, nil)
if err != nil {
- desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, d, err)
+ status = model.KubernetesResourceState_OTHER
+ desc = fmt.Sprintf("Unexpected error while calculating: failed while convert %T to %T: %v", obj, d, err)
return
}
| 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"fmt"
"sort"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes/scheme"
"github.com/pipe-cd/pipe/pkg/model"
)
func MakeKubernetesResourceState(uid string, key ResourceKey, obj *unstructured.Unstructured, now time.Time) model.KubernetesResourceState {
var (
owners = obj.GetOwnerReferences()
ownerIDs = make([]string, 0, len(owners))
creationTime = obj.GetCreationTimestamp()
status, desc = determineResourceHealth(key, obj)
)
for _, owner := range owners {
ownerIDs = append(ownerIDs, string(owner.UID))
}
sort.Strings(ownerIDs)
state := model.KubernetesResourceState{
Id: uid,
OwnerIds: ownerIDs,
// TODO: Think about adding more parents by using label selectors
ParentIds: ownerIDs,
Name: key.Name,
ApiVersion: key.APIVersion,
Kind: key.Kind,
Namespace: obj.GetNamespace(),
HealthStatus: status,
HealthDescription: desc,
CreatedAt: creationTime.Unix(),
UpdatedAt: now.Unix(),
}
return state
}
func determineResourceHealth(key ResourceKey, obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
if !IsKubernetesBuiltInResource(key.APIVersion) {
desc = fmt.Sprintf("Unreadable resource kind %s/%s", key.APIVersion, key.Kind)
return
}
switch key.Kind {
case KindDeployment:
return determineDeploymentHealth(obj)
case KindStatefulSet:
return determineStatefulSetHealth(obj)
case KindDaemonSet:
return determineDaemonSetHealth(obj)
case KindReplicaSet:
return determineReplicaSetHealth(obj)
case KindJob:
return determineJobHealth(obj)
case KindPod:
return determinePodHealth(obj)
case KindService:
return determineServiceHealth(obj)
case KindIngress:
return determineIngressHealth(obj)
case KindConfigMap:
return determineConfigMapHealth(obj)
case KindPersistentVolumeClaim:
return determinePVCHealth(obj)
case KindSecret:
return determineSecretHealth(obj)
case KindServiceAccount:
return determineServiceAccountHealth(obj)
default:
desc = "Unimplemented or unknown resource"
return
}
return
}
func determineDeploymentHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
d := &appsv1.Deployment{}
err := scheme.Scheme.Convert(obj, d, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, d, err)
return
}
status = model.KubernetesResourceState_OTHER
if d.Spec.Paused {
desc = "Deployment is paused"
return
}
// Referred to:
// https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L75
if d.Generation > d.Status.ObservedGeneration {
desc = "Waiting for rollout to finish because observed deployment generation less than desired generation"
return
}
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
const timedOutReason = "ProgressDeadlineExceeded"
var cond *appsv1.DeploymentCondition
for i := range d.Status.Conditions {
c := d.Status.Conditions[i]
if c.Type == appsv1.DeploymentProgressing {
cond = &c
break
}
}
if cond != nil && cond.Reason == timedOutReason {
desc = fmt.Sprintf("Deployment %q exceeded its progress deadline", obj.GetName())
}
if d.Spec.Replicas == nil {
desc = "The number of desired replicas is unspecified"
return
}
if d.Status.UpdatedReplicas < *d.Spec.Replicas {
desc = fmt.Sprintf("Waiting for remaining %d/%d replicas to be updated", d.Status.UpdatedReplicas, *d.Spec.Replicas)
return
}
if d.Status.UpdatedReplicas < d.Status.Replicas {
desc = fmt.Sprintf("%d old replicas are pending termination", d.Status.Replicas-d.Status.UpdatedReplicas)
return
}
if d.Status.AvailableReplicas < d.Status.Replicas {
desc = fmt.Sprintf("Waiting for remaining %d/%d replicas to be available", d.Status.Replicas-d.Status.AvailableReplicas, d.Status.Replicas)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineStatefulSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
s := &appsv1.StatefulSet{}
err := scheme.Scheme.Convert(obj, s, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, s, err)
return
}
// Referred to:
// https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L130-L149
status = model.KubernetesResourceState_OTHER
if s.Status.ObservedGeneration == 0 || s.Generation > s.Status.ObservedGeneration {
desc = "Waiting for statefulset spec update to be observed"
return
}
if s.Spec.Replicas == nil {
desc = "The number of desired replicas is unspecified"
return
}
if *s.Spec.Replicas != s.Status.ReadyReplicas {
desc = fmt.Sprintf("The number of ready replicas (%d) is different from the desired number (%d)", s.Status.ReadyReplicas, *s.Spec.Replicas)
return
}
// Check if the partitioned roll out is in progress.
if s.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && s.Spec.UpdateStrategy.RollingUpdate != nil {
if s.Spec.Replicas != nil && s.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
if s.Status.UpdatedReplicas < (*s.Spec.Replicas - *s.Spec.UpdateStrategy.RollingUpdate.Partition) {
desc = fmt.Sprintf("Waiting for partitioned roll out to finish because %d out of %d new pods have been updated",
s.Status.UpdatedReplicas, (*s.Spec.Replicas - *s.Spec.UpdateStrategy.RollingUpdate.Partition))
return
}
}
status = model.KubernetesResourceState_HEALTHY
return
}
if s.Status.UpdateRevision != s.Status.CurrentRevision {
desc = fmt.Sprintf("Waiting for statefulset rolling update to complete %d pods at revision %s", s.Status.UpdatedReplicas, s.Status.UpdateRevision)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineDaemonSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
d := &appsv1.DaemonSet{}
err := scheme.Scheme.Convert(obj, d, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, d, err)
return
}
// Referred to:
// https://github.com/kubernetes/kubernetes/blob/7942dca975b7be9386540df3c17e309c3cb2de60/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L107-L115
status = model.KubernetesResourceState_OTHER
if d.Status.ObservedGeneration == 0 || d.Generation > d.Status.ObservedGeneration {
desc = "Waiting for rollout to finish because observed daemon set generation less than desired generation"
return
}
if d.Status.UpdatedNumberScheduled < d.Status.DesiredNumberScheduled {
desc = fmt.Sprintf("Waiting for daemon set %q rollout to finish because %d out of %d new pods have been updated", d.Name, d.Status.UpdatedNumberScheduled, d.Status.DesiredNumberScheduled)
return
}
if d.Status.NumberAvailable < d.Status.DesiredNumberScheduled {
desc = fmt.Sprintf("Waiting for daemon set %q rollout to finish because %d of %d updated pods are available", d.Name, d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)
return
}
if d.Status.NumberMisscheduled > 0 {
desc = fmt.Sprintf("%d nodes that are running the daemon pod, but are not supposed to run the daemon pod", d.Status.NumberMisscheduled)
return
}
if d.Status.NumberUnavailable > 0 {
desc = fmt.Sprintf("%d nodes that should be running the daemon pod and have none of the daemon pod running and available", d.Status.NumberUnavailable)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineReplicaSetHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
r := &appsv1.ReplicaSet{}
err := scheme.Scheme.Convert(obj, r, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, r, err)
return
}
status = model.KubernetesResourceState_OTHER
if r.Status.ObservedGeneration == 0 || r.Generation > r.Status.ObservedGeneration {
desc = "Waiting for rollout to finish because observed replica set generation less than desired generation"
return
}
var cond *appsv1.ReplicaSetCondition
for i := range r.Status.Conditions {
c := r.Status.Conditions[i]
if c.Type == appsv1.ReplicaSetReplicaFailure {
cond = &c
break
}
}
if cond != nil && cond.Status == corev1.ConditionTrue {
desc = cond.Message
return
} else if r.Spec.Replicas == nil {
desc = "The number of desired replicas is unspecified"
return
} else if r.Status.AvailableReplicas < *r.Spec.Replicas {
desc = fmt.Sprintf("Waiting for rollout to finish because only %d/%d replicas are available", r.Status.AvailableReplicas, *r.Spec.Replicas)
return
} else if *r.Spec.Replicas != r.Status.ReadyReplicas {
desc = fmt.Sprintf("The number of ready replicas (%d) is different from the desired number (%d)", r.Status.ReadyReplicas, *r.Spec.Replicas)
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineJobHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
job := &batchv1.Job{}
err := scheme.Scheme.Convert(obj, job, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, job, err)
return
}
var (
failed bool
completed bool
message string
)
for _, condition := range job.Status.Conditions {
switch condition.Type {
case batchv1.JobFailed:
failed = true
completed = true
message = condition.Message
case batchv1.JobComplete:
completed = true
message = condition.Message
}
}
if !completed {
status = model.KubernetesResourceState_HEALTHY
desc = "Job is in progress"
} else if failed {
status = model.KubernetesResourceState_OTHER
desc = message
} else {
status = model.KubernetesResourceState_HEALTHY
desc = message
}
return
}
func determinePodHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
p := &corev1.Pod{}
err := scheme.Scheme.Convert(obj, p, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, p, err)
return
}
// Determine based on its container statuses.
if p.Spec.RestartPolicy == corev1.RestartPolicyAlways {
var messages []string
for _, s := range p.Status.ContainerStatuses {
waiting := s.State.Waiting
if waiting == nil {
continue
}
if strings.HasPrefix(waiting.Reason, "Err") || strings.HasSuffix(waiting.Reason, "Error") || strings.HasSuffix(waiting.Reason, "BackOff") {
status = model.KubernetesResourceState_OTHER
messages = append(messages, waiting.Message)
}
}
if status == model.KubernetesResourceState_OTHER {
desc = strings.Join(messages, ", ")
return
}
}
// Determine based on its phase.
switch p.Status.Phase {
case corev1.PodRunning, corev1.PodSucceeded:
status = model.KubernetesResourceState_HEALTHY
desc = p.Status.Message
default:
status = model.KubernetesResourceState_OTHER
desc = p.Status.Message
}
return
}
func determineIngressHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
i := &networkingv1beta1.Ingress{}
err := scheme.Scheme.Convert(obj, i, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, i, err)
return
}
status = model.KubernetesResourceState_OTHER
if len(i.Status.LoadBalancer.Ingress) <= 0 {
desc = "Ingress points for the load-balancer are in progress"
return
}
status = model.KubernetesResourceState_HEALTHY
return
}
func determineServiceHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
s := &corev1.Service{}
err := scheme.Scheme.Convert(obj, s, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, s, err)
return
}
status = model.KubernetesResourceState_HEALTHY
if s.Spec.Type != corev1.ServiceTypeLoadBalancer {
return
}
if len(s.Status.LoadBalancer.Ingress) <= 0 {
status = model.KubernetesResourceState_OTHER
desc = "Ingress points for the load-balancer are in progress"
return
}
return
}
func determineConfigMapHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
c := &corev1.ConfigMap{}
err := scheme.Scheme.Convert(obj, c, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, c, err)
return
}
desc = fmt.Sprintf("%q created", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determineSecretHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
s := &corev1.Secret{}
err := scheme.Scheme.Convert(obj, s, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, s, err)
return
}
desc = fmt.Sprintf("%q created", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
func determinePVCHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
pvc := &corev1.PersistentVolumeClaim{}
err := scheme.Scheme.Convert(obj, pvc, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, pvc, err)
return
}
switch pvc.Status.Phase {
case corev1.ClaimLost:
status = model.KubernetesResourceState_OTHER
desc = "Lost its underlying PersistentVolume"
case corev1.ClaimPending:
status = model.KubernetesResourceState_OTHER
desc = "Being not yet bound"
case corev1.ClaimBound:
status = model.KubernetesResourceState_HEALTHY
default:
status = model.KubernetesResourceState_OTHER
desc = "The current phase of PersistentVolumeClaim is unexpected"
}
return
}
func determineServiceAccountHealth(obj *unstructured.Unstructured) (status model.KubernetesResourceState_HealthStatus, desc string) {
s := &corev1.ServiceAccount{}
err := scheme.Scheme.Convert(obj, s, nil)
if err != nil {
desc = fmt.Sprintf("Failed while convert %T to %T: %v", obj, s, err)
return
}
desc = fmt.Sprintf("%q created", obj.GetName())
status = model.KubernetesResourceState_HEALTHY
return
}
| 1 | 7,912 | nit: `"Unexpected error while calculating: unable to convert %T to %T: %v"` | pipe-cd-pipe | go |
@@ -87,6 +87,10 @@ public abstract class SampleConfig {
@JsonProperty("packagePrefix")
public abstract String packagePrefix();
+ /** Returns a sample application name. */
+ @JsonProperty("appName")
+ public abstract String appName();
+
/** Returns a map of method names to methods. */
@JsonProperty("methods")
public abstract Map<String, MethodInfo> methods(); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.discovery.config;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.google.auto.value.AutoValue;
import java.util.Map;
/**
* Contains the set of information required to produce a code sample from a discovery document.
*
* <p>Supports serialization to/from JSON to accommodate overrides for any value. A generated
* SampleConfig will contain some language specific type information that is intended to be pieced
* together at a later stage of generation.
*
* <p>For example, in Java:
*
* <pre>{
* "apiTypeName": "HelloWorld"
* "packagePrefix": "foo.google.bar.helloworld.v1"
* "methods": {
* "helloworld.list": {
* "nameComponents": ["foo", "bar", "list"]
* "requestType": {
* "message": {
* "typeName": "ListWorldsRequest",
* "subpackage": "model"
* }
* }
* }
* }
* }</pre>
*
* The generated API type name might be {@code "foo.google.bar.helloworld.v1.HelloWorld"}, and the
* generated request type name might be {@code
* "foo.google.bar.helloworld.v1.model.ListWorldsRequest"}.
*/
@AutoValue
@JsonDeserialize(builder = AutoValue_SampleConfig.Builder.class)
public abstract class SampleConfig {
/**
* Returns the API's title.
*
* <p>A printable representation of the API's title. For example: "Ad Exchange Buyer API"
*/
@JsonProperty("apiTitle")
public abstract String apiTitle();
/**
* Returns the API's name.
*
* <p>For example: "adexchangebuyer"
*/
@JsonProperty("apiName")
public abstract String apiName();
/** Returns the API's version. */
@JsonProperty("apiVersion")
public abstract String apiVersion();
/**
* Returns the API's type name.
*
* <p>The type name of the message in the target language, but not fully-qualified. To produce a
* fully qualified name, it may be necessary to use {@link #packagePrefix()}}.
*
* <p>For example: "Adexchangebuyer"
*/
@JsonProperty("apiTypeName")
public abstract String apiTypeName();
/** Returns the language specific package prefix for API types. */
@JsonProperty("packagePrefix")
public abstract String packagePrefix();
/** Returns a map of method names to methods. */
@JsonProperty("methods")
public abstract Map<String, MethodInfo> methods();
/** Returns the API's authentication type. */
@JsonProperty("authType")
public abstract AuthType authType();
/** Returns the authentication instructions URL. */
@JsonProperty("authInstructionsUrl")
public abstract String authInstructionsUrl();
public static Builder newBuilder() {
return new AutoValue_SampleConfig.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
@JsonProperty("apiTitle")
public abstract Builder apiTitle(String val);
@JsonProperty("apiName")
public abstract Builder apiName(String val);
@JsonProperty("apiVersion")
public abstract Builder apiVersion(String val);
@JsonProperty("apiTypeName")
public abstract Builder apiTypeName(String val);
@JsonProperty("packagePrefix")
public abstract Builder packagePrefix(String val);
@JsonProperty("methods")
public abstract Builder methods(Map<String, MethodInfo> val);
@JsonProperty("authType")
public abstract Builder authType(AuthType val);
@JsonProperty("authInstructionsUrl")
public abstract Builder authInstructionsUrl(String val);
public abstract SampleConfig build();
}
}
| 1 | 19,164 | I'd actually recommend not putting this in the `SampleConfig`, it's intended more for properties that are inherent of the discovery format. Add a method `getSampleAppName(String apiTypeName)` to `SampleNamer` and override it in the language specific `SampleNamer`s if needed. Then assign it in the transformer. | googleapis-gapic-generator | java |
@@ -53,6 +53,8 @@ export * from './standalone';
export * from './storage';
export * from './i18n';
export * from './helpers';
+export * from './sort-object-map-by-key';
+export * from './convert-array-to-keyed-object-map';
/**
* Remove a parameter from a URL string. | 1 | /**
* Utility functions.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import {
map,
isEqual,
isFinite,
get,
unescape,
} from 'lodash';
/**
* WordPress dependencies
*/
import {
addFilter,
} from '@wordpress/hooks';
import {
__,
sprintf,
} from '@wordpress/i18n';
import { addQueryArgs, getQueryString } from '@wordpress/url';
/**
* Internal dependencies
*/
import SvgIcon from './svg-icon';
import { trackEvent } from './tracking';
import { fillFilterWithComponent } from './helpers';
export { trackEvent };
export { SvgIcon };
export * from './sanitize';
export * from './stringify';
export * from './standalone';
export * from './storage';
export * from './i18n';
export * from './helpers';
/**
* Remove a parameter from a URL string.
*
* Fallback for when URL is unable to handle parsedURL.searchParams.delete.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*
* @return {string} URL without the deleted parameter.
*
*/
const removeURLFallBack = ( url, parameter ) => {
const urlparts = url.split( '?' );
if ( 2 <= urlparts.length ) {
const prefix = encodeURIComponent( parameter ) + '=';
const pars = urlparts[ 1 ].split( /[&;]/g );
//reverse iteration as may be destructive
const newPars = pars.filter( ( param ) => {
return -1 === param.lastIndexOf( prefix, 0 );
} );
url = urlparts[ 0 ] + '/' + ( 0 < newPars.length ? '?' + newPars.join( '&' ) : '' );
return url;
}
return url;
};
/**
* Remove a parameter from a URL string.
*
* Leverages the URL object internally.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*
* @return {string} URL without the deleted parameter.
*/
export const removeURLParameter = ( url, parameter ) => {
const parsedURL = new URL( url );
// If the URL implementation doesn't support ! parsedURL.searchParams, use the fallback handler.
if ( ! parsedURL.searchParams || ! parsedURL.searchParams.delete ) {
return removeURLFallBack( url, parameter );
}
parsedURL.searchParams.delete( parameter );
return parsedURL.href;
};
/**
* Prepares a number to be used in readableLargeNumber.
*
* @param {number} number The large number to prepare.
*
* @return {number} The prepared number
*/
export const prepareForReadableLargeNumber = ( number ) => {
if ( 1000000 <= number ) {
return Math.round( number / 100000 ) / 10;
}
if ( 10000 <= number ) {
return Math.round( number / 1000 );
}
if ( 1000 <= number ) {
return Math.round( number / 100 ) / 10;
}
return number;
};
/**
* Format a large number for shortened display.
*
* @param {number} number The large number to format.
* @param {(string|boolean)} currencyCode Optional currency code to format as amount.
*
* @return {string} The formatted number.
*/
export const readableLargeNumber = ( number, currencyCode = false ) => {
// Cast parseable values to numeric types.
number = isFinite( number ) ? number : Number( number );
if ( ! isFinite( number ) ) {
// eslint-disable-next-line no-console
console.warn( 'Invalid number', number, typeof number );
number = 0;
}
if ( currencyCode ) {
return numberFormat( number, { style: 'currency', currency: currencyCode } );
}
const withSingleDecimal = {
minimumFractionDigits: 1,
maximumFractionDigits: 1,
};
// Numbers over 1,000,000 round normally and display a single decimal unless the decimal is 0.
if ( 1000000 <= number ) {
return sprintf(
// translators: %s: an abbreviated number in millions.
__( '%sM', 'google-site-kit' ),
numberFormat( prepareForReadableLargeNumber( number ), number % 10 === 0 ? {} : withSingleDecimal )
);
}
// Numbers between 10,000 and 1,000,000 round normally and have no decimals
if ( 10000 <= number ) {
return sprintf(
// translators: %s: an abbreviated number in thousands.
__( '%sK', 'google-site-kit' ),
numberFormat( prepareForReadableLargeNumber( number ) )
);
}
// Numbers between 1,000 and 10,000 round normally and display a single decimal unless the decimal is 0.
if ( 1000 <= number ) {
return sprintf(
// translators: %s: an abbreviated number in thousands.
__( '%sK', 'google-site-kit' ),
numberFormat( prepareForReadableLargeNumber( number ), number % 10 === 0 ? {} : withSingleDecimal )
);
}
return number.toString();
};
/**
* Internationalization Number Format.
*
* @param {number} number The number to format.
* @param {Object} [options] Formatting options.
* @param {string} [options.locale] Locale to use for formatting. Defaults to current locale used by Site Kit.
* @see {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/NumberFormat/NumberFormat|`options` parameter}
* For all available formatting options.
*
* @return {string} The formatted number.
*/
export const numberFormat = ( number, options = {} ) => {
const { locale = getLocale(), ...formatOptions } = options;
return new Intl.NumberFormat( locale, formatOptions ).format( number );
};
/**
* Gets the current locale for use with browser APIs.
*
* @param {Object} _global The global window object.
*
* @return {string} Current Site Kit locale if set, otherwise the current language set by the browser.
* E.g. `en-US` or `de-DE`
*/
export const getLocale = ( _global = global ) => {
const siteKitLocale = get( _global, [ 'googlesitekit', 'locale', '', 'lang' ] );
if ( siteKitLocale ) {
const matches = siteKitLocale.match( /^(\w{2})?(_)?(\w{2})/ );
if ( matches && matches[ 0 ] ) {
return matches[ 0 ].replace( /_/g, '-' );
}
}
return _global.navigator.language;
};
/**
* Transform a period string into a number of seconds.
*
* @param {string} period The period to transform.
*
* @return {number} The number of seconds
*/
export const getTimeInSeconds = ( period ) => {
const minute = 60;
const hour = minute * 60;
const day = hour * 24;
const week = day * 7;
const month = day * 30;
const year = day * 365;
switch ( period ) {
case 'minute':
return minute;
case 'hour':
return hour;
case 'day':
return day;
case 'week':
return week;
case 'month':
return month;
case 'year':
return year;
}
};
/**
* Converts seconds to a display ready string indicating
* the number of hours, minutes and seconds that have elapsed.
*
* For example, passing 65 returns '1m 5s'.
*
* @param {number} seconds The number of seconds.
*
* @return {string} Human readable string indicating time elapsed.
*
*/
export const prepareSecondsForDisplay = ( seconds ) => {
seconds = parseInt( seconds, 10 );
if ( isNaN( seconds ) || 0 === seconds ) {
return '0.0s';
}
const results = {};
results.hours = Math.floor( seconds / 60 / 60 );
results.minutes = Math.floor( ( seconds / 60 ) % 60 );
results.seconds = Math.floor( seconds % 60 );
const returnString =
( results.hours ? results.hours + 'h ' : '' ) +
( results.minutes ? results.minutes + 'm ' : '' ) +
( results.seconds ? results.seconds + 's ' : '' );
return returnString.trim();
};
/**
* Retrieve number of days between 2 dates.
*
* @param {Date} dateStart Start date instance.
* @param {Date} dateEnd End date instance.
*
* @return {number} The number of days.
*/
export const getDaysBetweenDates = ( dateStart, dateEnd ) => {
const dayMs = 1000 * getTimeInSeconds( 'day' );
const dateStartMs = dateStart.getTime();
const dateEndMs = dateEnd.getTime();
return Math.round( Math.abs( dateStartMs - dateEndMs ) / dayMs );
};
/**
* Calculate the percent change between two values.
*
* @param {number} previous The previous value.
* @param {number} current The current value.
*
* @return {(number|string)} The percent change.
*/
export const changeToPercent = ( previous, current ) => {
// Prevent divide by zero errors.
if ( '0' === previous || 0 === previous || isNaN( previous ) ) {
return '';
}
const change = ( ( current - previous ) / previous * 100 ).toFixed( 1 );
// Avoid NaN at all costs.
if ( isNaN( change ) || 'Infinity' === change ) {
return '';
}
return change;
};
/**
* Extract a single column of data for a sparkline from a dataset prepared for Google charts.
*
* @param {Array} rowData An array of Google charts row data.
* @param {number} column The column to extract for the sparkline.
*
* @return {Array} Extracted column of dataset prepared for Google charts.
*
*/
export const extractForSparkline = ( rowData, column ) => {
return map( rowData, ( row, i ) => {
return [
row[ 0 ], // row[0] always contains the x axis value (typically date).
row[ column ] || ( 0 === i ? '' : 0 ), // the data for the sparkline.
];
} );
};
/**
* Gets data for all modules.
*
* Because googlesitekit.modules contains both module information (legacy) and
* API functions (new), we should be using this function and never access
* googlesitekit.modules directly to access module data.
*
* This function should be removed once this object is no longer used to store
* legacy module data.
*
* @since 1.7.0
*
* @param {Object} _googlesitekit Optional. googlesitekit global; can be replaced for testing.
* @return {Object} Object with module data, with each module keyed by its slug.
*/
export const getModulesData = ( _googlesitekit = global.googlesitekit ) => {
const modulesObj = _googlesitekit.modules;
if ( ! modulesObj ) {
return {};
}
return Object.keys( modulesObj ).reduce( ( acc, slug ) => {
if ( 'object' !== typeof modulesObj[ slug ] ) {
return acc;
}
if (
'undefined' === typeof modulesObj[ slug ].slug ||
'undefined' === typeof modulesObj[ slug ].name ||
modulesObj[ slug ].slug !== slug
) {
return acc;
}
return { ...acc, [ slug ]: modulesObj[ slug ] };
}, {} );
};
/**
* Get the URL needed to initiate a reAuth flow.
*
* @param {string} slug The module slug. If included redirect URL will include page: page={ `googlesitekit-${slug}`}.
* @param {boolean} status The module activation status.
* @param {Object} _googlesitekit googlesitekit global; can be replaced for testing.
* @return {string} Authentication URL
*/
export const getReAuthURL = ( slug, status, _googlesitekit = global.googlesitekit ) => {
const {
connectURL,
adminRoot,
} = _googlesitekit.admin;
const { needReauthenticate } = _googlesitekit.setup;
const { screenID } = getModulesData( _googlesitekit )[ slug ];
// Special case handling for PageSpeed Insights.
// TODO: Refactor this out.
const pageSpeedQueryArgs = 'pagespeed-insights' === slug ? {
notification: 'authentication_success',
reAuth: undefined,
} : {};
let redirect = addQueryArgs(
adminRoot, {
// If the module has a submenu page, and is being activated, redirect back to the module page.
page: ( slug && status && screenID ) ? screenID : 'googlesitekit-dashboard',
slug,
reAuth: status,
...pageSpeedQueryArgs,
}
);
if ( ! needReauthenticate ) {
return redirect;
}
// Encodes the query string to ensure the redirect url is not messing up with the main url.
const queryString = encodeURIComponent( getQueryString( redirect ) );
// Rebuild the redirect url.
redirect = adminRoot + '?' + queryString;
return addQueryArgs(
connectURL, {
redirect,
status,
}
);
};
/**
* Get Site Kit Admin URL Helper
*
* @param {string} page The page slug. Optional. Default is 'googlesitekit-dashboard'.
* @param {Object} args Optional. Object of arguments to add to the URL.
*
* @return {string} Admin URL with appended query params.
*/
export const getSiteKitAdminURL = ( page, args ) => {
const { adminRoot } = global.googlesitekit.admin;
if ( ! page ) {
page = 'googlesitekit-dashboard';
}
args = { page, ...args };
return addQueryArgs( adminRoot, args );
};
/**
* Verifies whether JSON is valid.
*
* @param {string} stringToValidate The string to validate.
*
* @return {boolean} Indicates JSON is valid.
*/
export const validateJSON = ( stringToValidate ) => {
try {
return ( JSON.parse( stringToValidate ) && !! stringToValidate );
} catch ( e ) {
return false;
}
};
/**
* Verifies Optimize ID
*
* @param {string} stringToValidate The string to validate.
*
* @return {boolean} Indicates GTM or OPT tag is valid.
*/
export const validateOptimizeID = ( stringToValidate ) => {
return ( stringToValidate.match( /^(GTM|OPT)-[a-zA-Z\d]{7}$/ ) );
};
/**
* Activate or Deactivate a Module.
*
* @param {Object} restApiClient Rest API client from data module, this needed so we don't need to import data module in helper.
* @param {string} moduleSlug Module slug to activate or deactivate.
* @param {boolean} status True if module should be activated, false if it should be deactivated.
* @return {Promise} A promise for activating/deactivating a module.
*/
export const activateOrDeactivateModule = ( restApiClient, moduleSlug, status ) => {
return restApiClient.setModuleActive( moduleSlug, status ).then( ( responseData ) => {
const modulesData = getModulesData();
// We should really be using state management. This is terrible.
if ( modulesData[ moduleSlug ] ) {
modulesData[ moduleSlug ].active = responseData.active;
}
trackEvent(
`${ moduleSlug }_setup`,
! responseData.active ? 'module_deactivate' : 'module_activate',
moduleSlug,
);
return new Promise( ( resolve ) => {
resolve( responseData );
} );
} );
};
/**
* Helper to toggle confirm changes button disable/enable
* depending on the module changed settings.
*
* @param {string} moduleSlug The module slug being edited.
* @param {Object} settingsMapping The mapping between form settings names and saved settings.
* @param {Object} settingsState The changed settings component state to compare with.
* @param {Object} skipDOM Skip DOm checks/modifications, used for testing.
* @param {Object} _googlesitekit googlesitekit global; can be replaced for testing.
* @return {(void|boolean)} True if a module has been toggled.
*/
export const toggleConfirmModuleSettings = ( moduleSlug, settingsMapping, settingsState, skipDOM = false, _googlesitekit = global.googlesitekit ) => {
const { settings, setupComplete } = getModulesData( _googlesitekit )[ moduleSlug ];
const confirm = skipDOM || document.getElementById( `confirm-changes-${ moduleSlug }` );
if ( ! setupComplete || ! confirm ) {
return;
}
// Check if any of the mapped settings differ from the current/saved settings.
const changed = !! Object.keys( settingsMapping ).find( ( stateKey ) => {
const settingsKey = settingsMapping[ stateKey ];
return ! isEqual( settingsState[ stateKey ], settings[ settingsKey ] );
} );
if ( ! skipDOM ) {
confirm.disabled = ! changed;
}
return changed;
};
/**
* Trigger error notification on top of the page.
*
* @param {WPElement} ErrorComponent The error component to render in place.
* @param {Object} props The props to pass down to the error component. Optional.
*/
export const showErrorNotification = ( ErrorComponent, props = {} ) => {
addFilter( 'googlesitekit.ErrorNotification',
'googlesitekit.ErrorNotification',
fillFilterWithComponent( ErrorComponent, props ), 1 );
};
/**
* HTML text into HTML entity.
*
* _.unescape doesn't seem to decode some entities for admin bar titles.
* adding combination in this helper as a workaround.
*
* @param {string} str The string to decode.
*
* @return {string} Decoded HTML entity.
*/
export const decodeHtmlEntity = ( str ) => {
if ( ! str ) {
return '';
}
const decoded = str.replace( /&#(\d+);/g, function( match, dec ) {
return String.fromCharCode( dec );
} ).replace( /(\\)/g, '' );
return unescape( decoded );
};
/**
* Get the icon for a module.
*
* @param {string} module The module slug.
* @param {boolean} blockedByParentModule Whether the module is blocked by a parent module.
* @param {string} width The icon width.
* @param {string} height The icon height.
* @param {string} useClass Class string to use for icon.
*
* @return {HTMLImageElement} <img> tag with module icon.
*/
export function moduleIcon( module, blockedByParentModule, width = '33', height = '33', useClass = '' ) {
if ( ! global.googlesitekit ) {
return;
}
/* Set module icons. Page Speed Insights is a special case because only a .png is available. */
let iconComponent = <SvgIcon id={ module } width={ width } height={ height } className={ useClass } />;
if ( blockedByParentModule ) {
iconComponent = <SvgIcon id={ `${ module }-disabled` } width={ width } height={ height } className={ useClass } />;
} else if ( 'pagespeed-insights' === module ) {
iconComponent = <img src={ global.googlesitekit.admin.assetsRoot + 'images/icon-pagespeed.png' } width={ width } alt="" className={ useClass } />;
}
return iconComponent;
}
/**
* Gets the meta key for the given user option.
*
* @param {string} userOptionName User option name.
* @param {Object} _googlesitekitBaseData Site Kit base data (used for testing).
* @return {string} meta key name.
*/
export function getMetaKeyForUserOption( userOptionName, _googlesitekitBaseData = global._googlesitekitBaseData ) {
const { blogPrefix, isNetworkMode } = _googlesitekitBaseData;
if ( ! isNetworkMode ) {
return blogPrefix + userOptionName;
}
return userOptionName;
}
| 1 | 29,400 | Do we need to export these new functions? I thought the plan going forward was to keep util imports separated to make circular imports less likely and possibly do better chunk splitting. Since only new code references these files, we can scope the import to the specific files. | google-site-kit-wp | js |
@@ -0,0 +1 @@
+require "#{Rails.root}/app/models/acts_as_editable/acts_as_editable.rb" | 1 | 1 | 6,772 | Rails will autorequire `acts_as_editable` once it encounters the constant `ActsAsEditable`. Thus if we move `ActiveRecord::Base.send :include, ActsAsEditable` from _acts_as_editable.rb_ to this file, the require line will not be needed anymore. Instead of requiring the file ourselves, we will let Rails do it for us. This fix will also remove the need to explicitly require `acts_as_editable` in _mock_active_record_base.rb_. | blackducksoftware-ohloh-ui | rb |
|
@@ -23,6 +23,9 @@ public abstract class GrpcStreamingDetailView {
public abstract String methodName();
+ @Nullable
+ public abstract String upperCamelMethodName();
+
public abstract GrpcStreamingType grpcStreamingType();
@Nullable | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel;
import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType;
import com.google.auto.value.AutoValue;
import javax.annotation.Nullable;
@AutoValue
public abstract class GrpcStreamingDetailView {
public abstract String methodName();
public abstract GrpcStreamingType grpcStreamingType();
@Nullable
public abstract String grpcResourcesField();
public boolean hasGrpcResourcesField() {
return grpcResourcesField() != null;
}
@Nullable
public abstract String streamTypeName();
public static Builder newBuilder() {
return new AutoValue_GrpcStreamingDetailView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder methodName(String val);
public abstract Builder grpcStreamingType(GrpcStreamingType val);
public abstract Builder grpcResourcesField(String val);
public abstract Builder streamTypeName(String val);
public abstract GrpcStreamingDetailView build();
}
}
| 1 | 24,402 | I think we should name this `grpcMethodName` or similar. That is what this refers to, right? The fact that it is upper camel is an implementation. | googleapis-gapic-generator | java |
@@ -47,6 +47,13 @@
} \
} while (false);
+#define VALIDATE_NAME(_N) \
+ do { \
+ if (_N[0] == '.' || _N[0] == '/') { \
+ FPGA_MSG("%s is not a valid input", _N); \
+ return FPGA_INVALID_PARAM; \
+ } \
+ } while (false);
fpga_result __FPGA_API__ xfpga_fpgaTokenGetObject(fpga_token token, const char *name,
fpga_object *object, int flags) | 1 | // Copyright(c) 2017-2018, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include <sys/types.h>
#include <sys/stat.h>
#include "common_int.h"
#include "sysfs_int.h"
#include "types_int.h"
#include "safe_string/safe_string.h"
#include <opae/types_enum.h>
#include <opae/sysobject.h>
#include <opae/log.h>
#define FREE_IF(var) \
do { \
if (var) { \
free(var); \
var = NULL; \
} \
} while (false);
fpga_result __FPGA_API__ xfpga_fpgaTokenGetObject(fpga_token token, const char *name,
fpga_object *object, int flags)
{
char objpath[SYSFS_PATH_MAX];
fpga_result res = FPGA_EXCEPTION;
ASSERT_NOT_NULL(token);
ASSERT_NOT_NULL(name);
res = cat_token_sysfs_path(objpath, token, name);
if (res) {
return res;
}
return make_sysfs_object(objpath, name, object, flags, NULL);
}
fpga_result __FPGA_API__ xfpga_fpgaHandleGetObject(fpga_token handle, const char *name,
fpga_object *object, int flags)
{
char objpath[SYSFS_PATH_MAX];
fpga_result res = FPGA_EXCEPTION;
ASSERT_NOT_NULL(handle);
ASSERT_NOT_NULL(name);
res = cat_handle_sysfs_path(objpath, handle, name);
if (res) {
return res;
}
return make_sysfs_object(objpath, name, object, flags, handle);
}
fpga_result __FPGA_API__ xfpga_fpgaObjectGetObject(fpga_object parent, const char *name,
fpga_object *object, int flags)
{
char objpath[SYSFS_PATH_MAX] = {0};
fpga_result res = FPGA_EXCEPTION;
ASSERT_NOT_NULL(parent);
ASSERT_NOT_NULL(name);
struct _fpga_object *_obj = (struct _fpga_object *)parent;
if (_obj->type == FPGA_SYSFS_FILE) {
return FPGA_INVALID_PARAM;
}
res = cat_sysfs_path(objpath, _obj->path);
if (res) {
return res;
}
res = cat_sysfs_path(objpath, "/");
if (res) {
return res;
}
res = cat_sysfs_path(objpath, name);
if (res) {
return res;
}
return make_sysfs_object(objpath, name, object, flags, _obj->handle);
}
fpga_result __FPGA_API__ xfpga_fpgaDestroyObject(fpga_object *obj)
{
if (NULL == obj || NULL == *obj) {
FPGA_MSG("Invalid object pointer");
return FPGA_INVALID_PARAM;
}
struct _fpga_object *_obj = (struct _fpga_object *)*obj;
FREE_IF(_obj->path);
FREE_IF(_obj->name);
FREE_IF(_obj->buffer);
while (_obj->size && _obj->objects) {
if (xfpga_fpgaDestroyObject(&_obj->objects[--_obj->size])) {
FPGA_ERR("Error freeing subobject");
}
}
FREE_IF(_obj->objects);
free(_obj);
*obj = NULL;
return FPGA_OK;
}
fpga_result __FPGA_API__ xfpga_fpgaObjectGetSize(fpga_object obj,
uint32_t *size,
int flags)
{
fpga_result res = FPGA_OK;
ASSERT_NOT_NULL(obj);
ASSERT_NOT_NULL(size);
if (flags & FPGA_OBJECT_SYNC) {
res = sync_object(obj);
if (res) {
return res;
}
}
struct _fpga_object *_obj = (struct _fpga_object *)obj;
*size = _obj->size;
return res;
}
fpga_result __FPGA_API__ xfpga_fpgaObjectRead64(fpga_object obj,
uint64_t *value,
int flags)
{
struct _fpga_object *_obj = (struct _fpga_object *)obj;
fpga_result res = FPGA_OK;
if (_obj->type != FPGA_SYSFS_FILE) {
return FPGA_INVALID_PARAM;
}
if (flags & FPGA_OBJECT_SYNC) {
res = sync_object(obj);
}
if (res) {
return res;
}
if (flags & FPGA_OBJECT_RAW) {
*value = *(uint64_t *)_obj->buffer;
} else {
*value = strtoull((char *)_obj->buffer, NULL, 0);
}
return FPGA_OK;
}
fpga_result __FPGA_API__ xfpga_fpgaObjectRead(fpga_object obj,
uint8_t *buffer,
size_t offset,
size_t len,
int flags)
{
struct _fpga_object *_obj = (struct _fpga_object *)obj;
fpga_result res = FPGA_OK;
ASSERT_NOT_NULL(obj);
ASSERT_NOT_NULL(buffer);
if (_obj->type != FPGA_SYSFS_FILE) {
return FPGA_INVALID_PARAM;
}
if (offset + len > _obj->size) {
return FPGA_INVALID_PARAM;
}
if (flags & FPGA_OBJECT_SYNC) {
res = sync_object(obj);
if (res) {
return res;
}
}
if (offset + len > _obj->size) {
FPGA_ERR("Bytes requested exceed object size");
return FPGA_INVALID_PARAM;
}
memcpy_s(buffer, len, _obj->buffer + offset, len);
return FPGA_OK;
}
fpga_result __FPGA_API__ xfpga_fpgaObjectWrite64(fpga_object obj,
uint64_t value,
int flags)
{
struct _fpga_object *_obj = (struct _fpga_object *)obj;
size_t bytes_written = 0;
int fd = -1;
fpga_result res = FPGA_OK;
errno_t err;
ASSERT_NOT_NULL(obj);
ASSERT_NOT_NULL(_obj->handle);
if (_obj->type != FPGA_SYSFS_FILE) {
return FPGA_INVALID_PARAM;
}
res = handle_check_and_lock(_obj->handle);
if (res != FPGA_OK) {
return res;
}
if (_obj->max_size) {
memset_s(_obj->buffer, _obj->max_size, 0);
}
if (flags & FPGA_OBJECT_RAW) {
_obj->size = sizeof(uint64_t);
*(uint64_t *)_obj->buffer = value;
} else {
snprintf_s_l((char *)_obj->buffer, _obj->max_size, "0x%" PRIx64,
value);
_obj->size = (size_t)strlen((const char *)_obj->buffer);
}
fd = open(_obj->path, _obj->perm);
if (fd < 0) {
FPGA_ERR("Error opening %s: %s", _obj->path, strerror(errno));
res = FPGA_EXCEPTION;
goto out_unlock;
}
lseek(fd, 0, SEEK_SET);
bytes_written = eintr_write(fd, _obj->buffer, _obj->size);
if (bytes_written != _obj->size) {
FPGA_ERR("Did not write 64-bit value: %s", strerror(errno));
res = FPGA_EXCEPTION;
}
out_unlock:
if (fd >= 0)
close(fd);
err = pthread_mutex_unlock(
&((struct _fpga_handle *)_obj->handle)->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(errno));
res = FPGA_EXCEPTION;
}
return res;
}
| 1 | 17,270 | What if .. appears, but not as the first character, eg "errors/../../../../../../../../../../../"? | OPAE-opae-sdk | c |
@@ -81,7 +81,7 @@ class Guidance < ActiveRecord::Base
# Returns whether or not a given user can view a given guidance
# we define guidances viewable to a user by those owned by a guidance group:
- # owned by the managing curation center
+ # owned by the default orgs
# owned by a funder organisation
# owned by an organisation, of which the user is a member
# | 1 | # frozen_string_literal: true
# Guidance provides information from organisations to Users, helping them when
# answering questions. (e.g. "Here's how to think about your data
# protection responsibilities...")
#
# == Schema Information
#
# Table name: guidances
#
# id :integer not null, primary key
# published :boolean
# text :text
# created_at :datetime not null
# updated_at :datetime not null
# guidance_group_id :integer
#
# Indexes
#
# index_guidances_on_guidance_group_id (guidance_group_id)
#
# Foreign Keys
#
# fk_rails_... (guidance_group_id => guidance_groups.id)
#
# [+Project:+] DMPRoadmap
# [+Description:+]
# This class keeps the information organisations enter to support users
# when answering questions.
# It always belongs to a guidance group class and it can be linked directly
# to a question or through one or more themes
# [+Created:+] 07/07/2014
# [+Copyright:+] Digital Curation Centre and California Digital Library
class Guidance < ActiveRecord::Base
include GlobalHelpers
include ValidationMessages
include ValidationValues
# ================
# = Associations =
# ================
belongs_to :guidance_group
has_and_belongs_to_many :themes, join_table: "themes_in_guidance"
# ===============
# = Validations =
# ===============
validates :text, presence: { message: PRESENCE_MESSAGE }
validates :guidance_group, presence: { message: PRESENCE_MESSAGE }
validates :published, inclusion: { message: INCLUSION_MESSAGE,
in: BOOLEAN_VALUES }
validates :themes, presence: { message: PRESENCE_MESSAGE }, if: :published?
# Retrieves every guidance associated to an org
scope :by_org, -> (org) {
joins(:guidance_group).merge(GuidanceGroup.by_org(org))
}
scope :search, -> (term) {
search_pattern = "%#{term}%"
joins(:guidance_group)
.where("lower(guidances.text) LIKE lower(?) OR " +
"lower(guidance_groups.name) LIKE lower(?)",
search_pattern,
search_pattern)
}
# =================
# = Class methods =
# =================
# Returns whether or not a given user can view a given guidance
# we define guidances viewable to a user by those owned by a guidance group:
# owned by the managing curation center
# owned by a funder organisation
# owned by an organisation, of which the user is a member
#
# id - The Integer id for a guidance
# user - A User object
#
# Returns Boolean
def self.can_view?(user, id)
guidance = Guidance.find_by(id: id)
viewable = false
unless guidance.nil?
unless guidance.guidance_group.nil?
# guidances are viewable if they are owned by the user's org
if guidance.guidance_group.org == user.org
viewable = true
end
# guidance groups are viewable if they are owned by the Managing
# Curation Center
if Org.managing_orgs.include?(guidance.guidance_group.org)
viewable = true
end
# guidance groups are viewable if they are owned by a funder
if Org.funder.include?(guidance.guidance_group.org)
viewable = true
end
end
end
return viewable
end
# Returns a list of all guidances which a specified user can view
# we define guidances viewable to a user by those owned by a guidance group:
# owned by the Managing Curation Center
# owned by a funder organisation
# owned by an organisation, of which the user is a member
#
# user - A User object
#
# Returns Array
def self.all_viewable(user)
managing_groups = Org.includes(guidance_groups: :guidances)
.managing_orgs.collect { |o| o.guidance_groups }
# find all groups owned by a Funder organisation
funder_groups = Org.includes(guidance_groups: :guidances)
.funder.collect { |org| org.guidance_groups }
# find all groups owned by any of the user's organisations
organisation_groups = user.org.guidance_groups
# find all guidances belonging to any of the viewable groups
all_viewable_groups = (managing_groups +
funder_groups +
organisation_groups).flatten
all_viewable_guidances = all_viewable_groups.collect do |group|
group.guidances
end
# pass the list of viewable guidances to the view
return all_viewable_guidances.flatten
end
# Determine if a guidance is in a group which belongs to a specified
# organisation
#
# org_id - The Integer id for an organisation
#
# Returns Boolean
def in_group_belonging_to?(org_id)
unless guidance_group.nil?
if guidance_group.org.id == org_id
return true
end
end
return false
end
end
| 1 | 18,853 | Thanks, this should make things a bit easier for people who pick up the codebase but aren't a `curation center` | DMPRoadmap-roadmap | rb |
@@ -122,6 +122,8 @@ public class AzkabanWebServer extends AzkabanServer {
private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
private static final String DEFAULT_STATIC_DIR = "";
+
+ @Deprecated
private static AzkabanWebServer app;
private final VelocityEngine velocityEngine;
| 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.webapp;
import static azkaban.ServiceProvider.SERVICE_PROVIDER;
import static java.util.Objects.requireNonNull;
import azkaban.AzkabanCommonModule;
import azkaban.Constants;
import azkaban.database.AzkabanDatabaseSetup;
import azkaban.executor.ExecutorManager;
import azkaban.jmx.JmxExecutorManager;
import azkaban.jmx.JmxJettyServer;
import azkaban.jmx.JmxTriggerManager;
import azkaban.metrics.MetricsManager;
import azkaban.project.ProjectManager;
import azkaban.scheduler.ScheduleManager;
import azkaban.server.AzkabanServer;
import azkaban.server.session.SessionCache;
import azkaban.trigger.TriggerManager;
import azkaban.trigger.TriggerManagerException;
import azkaban.trigger.builtin.BasicTimeChecker;
import azkaban.trigger.builtin.CreateTriggerAction;
import azkaban.trigger.builtin.ExecuteFlowAction;
import azkaban.trigger.builtin.ExecutionChecker;
import azkaban.trigger.builtin.KillExecutionAction;
import azkaban.trigger.builtin.SlaAlertAction;
import azkaban.trigger.builtin.SlaChecker;
import azkaban.user.UserManager;
import azkaban.utils.FileIOUtils;
import azkaban.utils.Props;
import azkaban.utils.PropsUtils;
import azkaban.utils.StdOutErrRedirect;
import azkaban.utils.Utils;
import azkaban.webapp.plugin.PluginRegistry;
import azkaban.webapp.plugin.TriggerPlugin;
import azkaban.webapp.plugin.ViewerPlugin;
import azkaban.webapp.servlet.AbstractAzkabanServlet;
import azkaban.webapp.servlet.ExecutorServlet;
import azkaban.webapp.servlet.HistoryServlet;
import azkaban.webapp.servlet.IndexRedirectServlet;
import azkaban.webapp.servlet.JMXHttpServlet;
import azkaban.webapp.servlet.ProjectManagerServlet;
import azkaban.webapp.servlet.ProjectServlet;
import azkaban.webapp.servlet.ScheduleServlet;
import azkaban.webapp.servlet.StatsServlet;
import azkaban.webapp.servlet.TriggerManagerServlet;
import com.google.inject.Guice;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.linkedin.restli.server.RestliServlet;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Constructor;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.log4j.jmx.HierarchyDynamicMBean;
import org.apache.velocity.app.VelocityEngine;
import org.joda.time.DateTimeZone;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.DefaultServlet;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
/**
* The Azkaban Jetty server class
*
* Global azkaban properties for setup. All of them are optional unless
* otherwise marked: azkaban.name - The displayed name of this instance.
* azkaban.label - Short descriptor of this Azkaban instance. azkaban.color -
* Theme color azkaban.temp.dir - Temp dir used by Azkaban for various file
* uses. web.resource.dir - The directory that contains the static web files.
* default.timezone.id - The timezone code. I.E. America/Los Angeles
*
* user.manager.class - The UserManager class used for the user manager. Default
* is XmlUserManager. project.manager.class - The ProjectManager to load
* projects project.global.properties - The base properties inherited by all
* projects and jobs
*
* jetty.maxThreads - # of threads for jetty jetty.ssl.port - The ssl port used
* for sessionizing. jetty.keystore - Jetty keystore . jetty.keypassword - Jetty
* keystore password jetty.truststore - Jetty truststore jetty.trustpassword -
* Jetty truststore password
*/
public class AzkabanWebServer extends AzkabanServer {
public static final String DEFAULT_CONF_PATH = "conf";
private static final String AZKABAN_ACCESS_LOGGER_NAME =
"azkaban.webapp.servlet.LoginAbstractAzkabanServlet";
private static final Logger logger = Logger.getLogger(AzkabanWebServer.class);
private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
private static final String DEFAULT_STATIC_DIR = "";
private static AzkabanWebServer app;
private final VelocityEngine velocityEngine;
private final Server server;
private final UserManager userManager;
private final ProjectManager projectManager;
private final ExecutorManager executorManager;
private final ScheduleManager scheduleManager;
private final TriggerManager triggerManager;
private final MetricsManager metricsManager;
private final Props props;
private final SessionCache sessionCache;
private final List<ObjectName> registeredMBeans = new ArrayList<>();
//queuedThreadPool is mainly used to monitor jetty threadpool.
private QueuedThreadPool queuedThreadPool;
private Map<String, TriggerPlugin> triggerPlugins;
private MBeanServer mbeanServer;
@Inject
public AzkabanWebServer(final Props props,
final Server server,
final ExecutorManager executorManager,
final ProjectManager projectManager,
final TriggerManager triggerManager,
final MetricsManager metricsManager,
final SessionCache sessionCache,
final UserManager userManager,
final ScheduleManager scheduleManager,
final VelocityEngine velocityEngine) {
this.props = requireNonNull(props, "props is null.");
this.server = requireNonNull(server, "server is null.");
this.executorManager = requireNonNull(executorManager, "executorManager is null.");
this.projectManager = requireNonNull(projectManager, "projectManager is null.");
this.triggerManager = requireNonNull(triggerManager, "triggerManager is null.");
this.metricsManager = requireNonNull(metricsManager, "metricsManager is null.");
this.sessionCache = requireNonNull(sessionCache, "sessionCache is null.");
this.userManager = requireNonNull(userManager, "userManager is null.");
this.scheduleManager = requireNonNull(scheduleManager, "scheduleManager is null.");
this.velocityEngine = requireNonNull(velocityEngine, "velocityEngine is null.");
loadBuiltinCheckersAndActions();
// load all trigger agents here
final String triggerPluginDir =
props.getString("trigger.plugin.dir", "plugins/triggers");
new PluginCheckerAndActionsLoader().load(triggerPluginDir);
// Setup time zone
if (props.containsKey(DEFAULT_TIMEZONE_ID)) {
final String timezone = props.getString(DEFAULT_TIMEZONE_ID);
System.setProperty("user.timezone", timezone);
TimeZone.setDefault(TimeZone.getTimeZone(timezone));
DateTimeZone.setDefault(DateTimeZone.forID(timezone));
logger.info("Setting timezone to " + timezone);
}
configureMBeanServer();
}
public static AzkabanWebServer getInstance() {
return app;
}
/**
* Azkaban using Jetty
*/
public static void main(final String[] args) throws Exception {
// Redirect all std out and err messages into log4j
StdOutErrRedirect.redirectOutAndErrToLog();
logger.info("Starting Jetty Azkaban Web Server...");
final Props props = AzkabanServer.loadProps(args);
if (props == null) {
logger.error("Azkaban Properties not loaded. Exiting..");
System.exit(1);
}
/* Initialize Guice Injector */
final Injector injector = Guice
.createInjector(new AzkabanCommonModule(props), new AzkabanWebServerModule());
SERVICE_PROVIDER.setInjector(injector);
launch(injector.getInstance(AzkabanWebServer.class));
}
public static void launch(final AzkabanWebServer webServer) throws Exception {
/* This creates the Web Server instance */
app = webServer;
// TODO refactor code into ServerProvider
prepareAndStartServer(webServer.getServerProps(), app.server);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
logTopMemoryConsumers();
} catch (final Exception e) {
logger.info(("Exception when logging top memory consumers"), e);
}
logger.info("Shutting down http server...");
try {
app.close();
} catch (final Exception e) {
logger.error("Error while shutting down http server.", e);
}
logger.info("kk thx bye.");
}
public void logTopMemoryConsumers() throws Exception, IOException {
if (new File("/bin/bash").exists() && new File("/bin/ps").exists()
&& new File("/usr/bin/head").exists()) {
logger.info("logging top memeory consumer");
final java.lang.ProcessBuilder processBuilder =
new java.lang.ProcessBuilder("/bin/bash", "-c",
"/bin/ps aux --sort -rss | /usr/bin/head");
final Process p = processBuilder.start();
p.waitFor();
final InputStream is = p.getInputStream();
final java.io.BufferedReader reader =
new java.io.BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8));
String line = null;
while ((line = reader.readLine()) != null) {
logger.info(line);
}
is.close();
}
}
});
}
private static void prepareAndStartServer(final Props azkabanSettings, final Server server)
throws Exception {
validateDatabaseVersion(azkabanSettings);
configureRoutes(server, azkabanSettings);
if (azkabanSettings.getBoolean(Constants.ConfigurationKeys.IS_METRICS_ENABLED, false)) {
app.startWebMetrics();
}
try {
server.start();
logger.info("Server started");
} catch (final Exception e) {
logger.warn(e);
Utils.croak(e.getMessage(), 1);
}
}
private static void validateDatabaseVersion(final Props azkabanSettings)
throws IOException, SQLException {
final boolean checkDB = azkabanSettings
.getBoolean(AzkabanDatabaseSetup.DATABASE_CHECK_VERSION, false);
if (checkDB) {
final AzkabanDatabaseSetup setup = new AzkabanDatabaseSetup(azkabanSettings);
setup.loadTableInfo();
if (setup.needsUpdating()) {
logger.error("Database is out of date.");
setup.printUpgradePlan();
logger.error("Exiting with error.");
System.exit(-1);
}
}
}
private static void configureRoutes(final Server server, final Props azkabanSettings)
throws TriggerManagerException {
final int maxThreads = azkabanSettings
.getInt("jetty.maxThreads", Constants.DEFAULT_JETTY_MAX_THREAD_COUNT);
final QueuedThreadPool httpThreadPool = new QueuedThreadPool(maxThreads);
app.setThreadPool(httpThreadPool);
server.setThreadPool(httpThreadPool);
final String staticDir =
azkabanSettings.getString("web.resource.dir", DEFAULT_STATIC_DIR);
logger.info("Setting up web resource dir " + staticDir);
final Context root = new Context(server, "/", Context.SESSIONS);
root.setMaxFormContentSize(MAX_FORM_CONTENT_SIZE);
final String defaultServletPath =
azkabanSettings.getString("azkaban.default.servlet.path", "/index");
root.setResourceBase(staticDir);
final ServletHolder indexRedirect =
new ServletHolder(new IndexRedirectServlet(defaultServletPath));
root.addServlet(indexRedirect, "/");
final ServletHolder index = new ServletHolder(new ProjectServlet());
root.addServlet(index, "/index");
final ServletHolder staticServlet = new ServletHolder(new DefaultServlet());
root.addServlet(staticServlet, "/css/*");
root.addServlet(staticServlet, "/js/*");
root.addServlet(staticServlet, "/images/*");
root.addServlet(staticServlet, "/fonts/*");
root.addServlet(staticServlet, "/favicon.ico");
root.addServlet(new ServletHolder(new ProjectManagerServlet()), "/manager");
root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor");
root.addServlet(new ServletHolder(new HistoryServlet()), "/history");
root.addServlet(new ServletHolder(new ScheduleServlet()), "/schedule");
root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx");
root.addServlet(new ServletHolder(new TriggerManagerServlet()), "/triggers");
root.addServlet(new ServletHolder(new StatsServlet()), "/stats");
final ServletHolder restliHolder = new ServletHolder(new RestliServlet());
restliHolder.setInitParameter("resourcePackages", "azkaban.restli");
root.addServlet(restliHolder, "/restli/*");
final String viewerPluginDir =
azkabanSettings.getString("viewer.plugin.dir", "plugins/viewer");
loadViewerPlugins(root, viewerPluginDir, app.getVelocityEngine());
// triggerplugin
final Map<String, TriggerPlugin> triggerPlugins =
new TriggerPluginLoader(azkabanSettings).loadTriggerPlugins(root);
app.setTriggerPlugins(triggerPlugins);
// always have basic time trigger
// TODO: find something else to do the job
app.getTriggerManager().start();
root.setAttribute(Constants.AZKABAN_SERVLET_CONTEXT_KEY, app);
}
private static void loadViewerPlugins(final Context root, final String pluginPath,
final VelocityEngine ve) {
final File viewerPluginPath = new File(pluginPath);
if (!viewerPluginPath.exists()) {
return;
}
final ClassLoader parentLoader = AzkabanWebServer.class.getClassLoader();
final File[] pluginDirs = viewerPluginPath.listFiles();
final ArrayList<String> jarPaths = new ArrayList<>();
for (final File pluginDir : pluginDirs) {
if (!pluginDir.exists()) {
logger.error("Error viewer plugin path " + pluginDir.getPath()
+ " doesn't exist.");
continue;
}
if (!pluginDir.isDirectory()) {
logger.error("The plugin path " + pluginDir + " is not a directory.");
continue;
}
// Load the conf directory
final File propertiesDir = new File(pluginDir, "conf");
Props pluginProps = null;
if (propertiesDir.exists() && propertiesDir.isDirectory()) {
final File propertiesFile = new File(propertiesDir, "plugin.properties");
final File propertiesOverrideFile =
new File(propertiesDir, "override.properties");
if (propertiesFile.exists()) {
if (propertiesOverrideFile.exists()) {
pluginProps =
PropsUtils.loadProps(null, propertiesFile,
propertiesOverrideFile);
} else {
pluginProps = PropsUtils.loadProps(null, propertiesFile);
}
} else {
logger.error("Plugin conf file " + propertiesFile + " not found.");
continue;
}
} else {
logger.error("Plugin conf path " + propertiesDir + " not found.");
continue;
}
final String pluginName = pluginProps.getString("viewer.name");
final String pluginWebPath = pluginProps.getString("viewer.path");
final String pluginJobTypes = pluginProps.getString("viewer.jobtypes", null);
final int pluginOrder = pluginProps.getInt("viewer.order", 0);
final boolean pluginHidden = pluginProps.getBoolean("viewer.hidden", false);
final List<String> extLibClasspath =
pluginProps.getStringList("viewer.external.classpaths",
(List<String>) null);
final String pluginClass = pluginProps.getString("viewer.servlet.class");
if (pluginClass == null) {
logger.error("Viewer class is not set.");
} else {
logger.info("Plugin class " + pluginClass);
}
URLClassLoader urlClassLoader = null;
final File libDir = new File(pluginDir, "lib");
if (libDir.exists() && libDir.isDirectory()) {
final File[] files = libDir.listFiles();
final ArrayList<URL> urls = new ArrayList<>();
for (int i = 0; i < files.length; ++i) {
try {
final URL url = files[i].toURI().toURL();
urls.add(url);
} catch (final MalformedURLException e) {
logger.error(e);
}
}
// Load any external libraries.
if (extLibClasspath != null) {
for (final String extLib : extLibClasspath) {
final File extLibFile = new File(pluginDir, extLib);
if (extLibFile.exists()) {
if (extLibFile.isDirectory()) {
// extLibFile is a directory; load all the files in the
// directory.
final File[] extLibFiles = extLibFile.listFiles();
for (int i = 0; i < extLibFiles.length; ++i) {
try {
final URL url = extLibFiles[i].toURI().toURL();
urls.add(url);
} catch (final MalformedURLException e) {
logger.error(e);
}
}
} else { // extLibFile is a file
try {
final URL url = extLibFile.toURI().toURL();
urls.add(url);
} catch (final MalformedURLException e) {
logger.error(e);
}
}
} else {
logger.error("External library path "
+ extLibFile.getAbsolutePath() + " not found.");
continue;
}
}
}
urlClassLoader =
new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader);
} else {
logger
.error("Library path " + libDir.getAbsolutePath() + " not found.");
continue;
}
Class<?> viewerClass = null;
try {
viewerClass = urlClassLoader.loadClass(pluginClass);
} catch (final ClassNotFoundException e) {
logger.error("Class " + pluginClass + " not found.");
continue;
}
final String source = FileIOUtils.getSourcePathFromClass(viewerClass);
logger.info("Source jar " + source);
jarPaths.add("jar:file:" + source);
Constructor<?> constructor = null;
try {
constructor = viewerClass.getConstructor(Props.class);
} catch (final NoSuchMethodException e) {
logger.error("Constructor not found in " + pluginClass);
continue;
}
Object obj = null;
try {
obj = constructor.newInstance(pluginProps);
} catch (final Exception e) {
logger.error(e);
logger.error(e.getCause());
}
if (!(obj instanceof AbstractAzkabanServlet)) {
logger.error("The object is not an AbstractAzkabanServlet");
continue;
}
final AbstractAzkabanServlet avServlet = (AbstractAzkabanServlet) obj;
root.addServlet(new ServletHolder(avServlet), "/" + pluginWebPath + "/*");
PluginRegistry.getRegistry().register(
new ViewerPlugin(pluginName, pluginWebPath, pluginOrder,
pluginHidden, pluginJobTypes));
}
// Velocity needs the jar resource paths to be set.
final String jarResourcePath = StringUtils.join(jarPaths, ", ");
logger.info("Setting jar resource path " + jarResourcePath);
ve.addProperty("jar.resource.loader.path", jarResourcePath);
}
/**
* Loads the Azkaban property file from the AZKABAN_HOME conf directory
*/
private static Props loadConfigurationFromAzkabanHome() {
final String azkabanHome = System.getenv("AZKABAN_HOME");
if (azkabanHome == null) {
logger.error("AZKABAN_HOME not set. Will try default.");
return null;
}
if (!new File(azkabanHome).isDirectory()
|| !new File(azkabanHome).canRead()) {
logger.error(azkabanHome + " is not a readable directory.");
return null;
}
final File confPath = new File(azkabanHome, DEFAULT_CONF_PATH);
if (!confPath.exists() || !confPath.isDirectory() || !confPath.canRead()) {
logger
.error(azkabanHome + " does not contain a readable conf directory.");
return null;
}
return loadAzkabanConfigurationFromDirectory(confPath);
}
private void startWebMetrics() throws Exception {
// The number of idle threads in Jetty thread pool
this.metricsManager.addGauge("JETTY-NumIdleThreads", this.queuedThreadPool::getIdleThreads);
// The number of threads in Jetty thread pool. The formula is:
// threads = idleThreads + busyThreads
this.metricsManager.addGauge("JETTY-NumTotalThreads", this.queuedThreadPool::getThreads);
// The number of requests queued in the Jetty thread pool.
this.metricsManager.addGauge("JETTY-NumQueueSize", this.queuedThreadPool::getQueueSize);
this.metricsManager.addGauge("WEB-NumQueuedFlows", this.executorManager::getQueuedFlowSize);
/*
* TODO: Currently {@link ExecutorManager#getRunningFlows()} includes both running and non-dispatched flows.
* Originally we would like to do a subtraction between getRunningFlows and {@link ExecutorManager#getQueuedFlowSize()},
* in order to have the correct runnable flows.
* However, both getRunningFlows and getQueuedFlowSize are not synchronized, such that we can not make
* a thread safe subtraction. We need to fix this in the future.
*/
this.metricsManager
.addGauge("WEB-NumRunningFlows", () -> this.executorManager.getRunningFlows().size());
logger.info("starting reporting Web Server Metrics");
this.metricsManager.startReporting("AZ-WEB", this.props);
}
private void loadBuiltinCheckersAndActions() {
logger.info("Loading built-in checker and action types");
ExecuteFlowAction.setExecutorManager(this.executorManager);
ExecuteFlowAction.setProjectManager(this.projectManager);
ExecuteFlowAction.setTriggerManager(this.triggerManager);
KillExecutionAction.setExecutorManager(this.executorManager);
CreateTriggerAction.setTriggerManager(this.triggerManager);
ExecutionChecker.setExecutorManager(this.executorManager);
this.triggerManager.registerCheckerType(BasicTimeChecker.type, BasicTimeChecker.class);
this.triggerManager.registerCheckerType(SlaChecker.type, SlaChecker.class);
this.triggerManager.registerCheckerType(ExecutionChecker.type, ExecutionChecker.class);
this.triggerManager.registerActionType(ExecuteFlowAction.type, ExecuteFlowAction.class);
this.triggerManager.registerActionType(KillExecutionAction.type, KillExecutionAction.class);
this.triggerManager.registerActionType(SlaAlertAction.type, SlaAlertAction.class);
this.triggerManager.registerActionType(CreateTriggerAction.type, CreateTriggerAction.class);
}
/**
* Returns the web session cache.
*/
@Override
public SessionCache getSessionCache() {
return this.sessionCache;
}
/**
* Returns the velocity engine for pages to use.
*/
@Override
public VelocityEngine getVelocityEngine() {
return this.velocityEngine;
}
@Override
public UserManager getUserManager() {
return this.userManager;
}
public ProjectManager getProjectManager() {
return this.projectManager;
}
public ExecutorManager getExecutorManager() {
return this.executorManager;
}
public ScheduleManager getScheduleManager() {
return this.scheduleManager;
}
public TriggerManager getTriggerManager() {
return this.triggerManager;
}
/**
* Returns the global azkaban properties
*/
@Override
public Props getServerProps() {
return this.props;
}
public Map<String, TriggerPlugin> getTriggerPlugins() {
return this.triggerPlugins;
}
private void setTriggerPlugins(final Map<String, TriggerPlugin> triggerPlugins) {
this.triggerPlugins = triggerPlugins;
}
private void configureMBeanServer() {
logger.info("Registering MBeans...");
this.mbeanServer = ManagementFactory.getPlatformMBeanServer();
registerMbean("jetty", new JmxJettyServer(this.server));
registerMbean("triggerManager", new JmxTriggerManager(this.triggerManager));
if (this.executorManager instanceof ExecutorManager) {
registerMbean("executorManager", new JmxExecutorManager(
(ExecutorManager) this.executorManager));
}
// Register Log4J loggers as JMX beans so the log level can be
// updated via JConsole or Java VisualVM
final HierarchyDynamicMBean log4jMBean = new HierarchyDynamicMBean();
registerMbean("log4jmxbean", log4jMBean);
final ObjectName accessLogLoggerObjName =
log4jMBean.addLoggerMBean(AZKABAN_ACCESS_LOGGER_NAME);
if (accessLogLoggerObjName == null) {
System.out
.println(
"************* loginLoggerObjName is null, make sure there is a logger with name "
+ AZKABAN_ACCESS_LOGGER_NAME);
} else {
System.out.println("******** loginLoggerObjName: "
+ accessLogLoggerObjName.getCanonicalName());
}
}
public void close() {
try {
for (final ObjectName name : this.registeredMBeans) {
this.mbeanServer.unregisterMBean(name);
logger.info("Jmx MBean " + name.getCanonicalName() + " unregistered.");
}
} catch (final Exception e) {
logger.error("Failed to cleanup MBeanServer", e);
}
this.scheduleManager.shutdown();
this.executorManager.shutdown();
try {
this.server.stop();
} catch (final Exception e) {
// Catch all while closing server
logger.error(e);
}
this.server.destroy();
}
private void registerMbean(final String name, final Object mbean) {
final Class<?> mbeanClass = mbean.getClass();
final ObjectName mbeanName;
try {
mbeanName = new ObjectName(mbeanClass.getName() + ":name=" + name);
this.mbeanServer.registerMBean(mbean, mbeanName);
logger.info("Bean " + mbeanClass.getCanonicalName() + " registered.");
this.registeredMBeans.add(mbeanName);
} catch (final Exception e) {
logger.error("Error registering mbean " + mbeanClass.getCanonicalName(),
e);
}
}
public List<ObjectName> getMbeanNames() {
return this.registeredMBeans;
}
public MBeanInfo getMBeanInfo(final ObjectName name) {
try {
return this.mbeanServer.getMBeanInfo(name);
} catch (final Exception e) {
logger.error(e);
return null;
}
}
public Object getMBeanAttribute(final ObjectName name, final String attribute) {
try {
return this.mbeanServer.getAttribute(name, attribute);
} catch (final Exception e) {
logger.error(e);
return null;
}
}
private void setThreadPool(final QueuedThreadPool queuedThreadPool) {
this.queuedThreadPool = queuedThreadPool;
}
}
| 1 | 14,219 | why not removing it? | azkaban-azkaban | java |
@@ -84,9 +84,9 @@ func NewCliApp() *cli.App {
EnvVar: "TEMPORAL_CLI_TLS_CA",
},
cli.BoolFlag{
- Name: FlagTLSEnableHostVerification,
- Usage: "validates hostname of temporal cluster against server certificate",
- EnvVar: "TEMPORAL_CLI_TLS_ENABLE_HOST_VERIFICATION",
+ Name: FlagTLSDisableHostVerification,
+ Usage: "whether to validates hostname of temporal cluster against server certificate",
+ EnvVar: "TEMPORAL_CLI_TLS_DISABLE_HOST_VERIFICATION",
},
cli.StringFlag{
Name: FlagTLSServerName, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cli
import (
"github.com/urfave/cli"
"go.temporal.io/server/common/headers"
)
// SetFactory is used to set the ClientFactory global
func SetFactory(factory ClientFactory) {
cFactory = factory
}
// NewCliApp instantiates a new instance of the CLI application.
func NewCliApp() *cli.App {
app := cli.NewApp()
app.Name = "tctl"
app.Usage = "A command-line tool for Temporal users"
app.Version = headers.CLIVersion
app.Flags = []cli.Flag{
cli.StringFlag{
Name: FlagAddressWithAlias,
Value: "",
Usage: "host:port for Temporal frontend service",
EnvVar: "TEMPORAL_CLI_ADDRESS",
},
cli.StringFlag{
Name: FlagNamespaceWithAlias,
Value: "default",
Usage: "Temporal workflow namespace",
EnvVar: "TEMPORAL_CLI_NAMESPACE",
},
cli.IntFlag{
Name: FlagContextTimeoutWithAlias,
Value: defaultContextTimeoutInSeconds,
Usage: "optional timeout for context of RPC call in seconds",
EnvVar: "TEMPORAL_CONTEXT_TIMEOUT",
},
cli.BoolFlag{
Name: FlagAutoConfirm,
Usage: "automatically confirm all prompts",
Hidden: true,
},
cli.StringFlag{
Name: FlagTLSCertPath,
Value: "",
Usage: "path to x509 certificate",
EnvVar: "TEMPORAL_CLI_TLS_CERT",
},
cli.StringFlag{
Name: FlagTLSKeyPath,
Value: "",
Usage: "path to private key",
EnvVar: "TEMPORAL_CLI_TLS_KEY",
},
cli.StringFlag{
Name: FlagTLSCaPath,
Value: "",
Usage: "path to server CA certificate",
EnvVar: "TEMPORAL_CLI_TLS_CA",
},
cli.BoolFlag{
Name: FlagTLSEnableHostVerification,
Usage: "validates hostname of temporal cluster against server certificate",
EnvVar: "TEMPORAL_CLI_TLS_ENABLE_HOST_VERIFICATION",
},
cli.StringFlag{
Name: FlagTLSServerName,
Value: "",
Usage: "override for target server name",
EnvVar: "TEMPORAL_CLI_TLS_SERVER_NAME",
},
}
app.Commands = []cli.Command{
{
Name: "namespace",
Aliases: []string{"n"},
Usage: "Operate Temporal namespace",
Subcommands: newNamespaceCommands(),
},
{
Name: "workflow",
Aliases: []string{"wf"},
Usage: "Operate Temporal workflow",
Subcommands: newWorkflowCommands(),
},
{
Name: "activity",
Aliases: []string{"act"},
Usage: "operate activities of workflow",
Subcommands: newActivityCommands(),
},
{
Name: "taskqueue",
Aliases: []string{"tq"},
Usage: "Operate Temporal task queue",
Subcommands: newTaskQueueCommands(),
},
{
Name: "batch",
Usage: "batch operation on a list of workflows from query.",
Subcommands: newBatchCommands(),
},
{
Name: "admin",
Aliases: []string{"adm"},
Usage: "Run admin operation",
Subcommands: []cli.Command{
{
Name: "workflow",
Aliases: []string{"wf"},
Usage: "Run admin operation on workflow",
Subcommands: newAdminWorkflowCommands(),
},
{
Name: "shard",
Aliases: []string{"shar"},
Usage: "Run admin operation on specific shard",
Subcommands: newAdminShardManagementCommands(),
},
{
Name: "history_host",
Aliases: []string{"hist"},
Usage: "Run admin operation on history host",
Subcommands: newAdminHistoryHostCommands(),
},
{
Name: "namespace",
Aliases: []string{"d"},
Usage: "Run admin operation on namespace",
Subcommands: newAdminNamespaceCommands(),
},
{
Name: "elasticsearch",
Aliases: []string{"es"},
Usage: "Run admin operation on ElasticSearch",
Subcommands: newAdminElasticSearchCommands(),
},
{
Name: "taskqueue",
Aliases: []string{"tq"},
Usage: "Run admin operation on taskQueue",
Subcommands: newAdminTaskQueueCommands(),
},
{
Name: "membership",
Usage: "Run admin operation on membership",
Subcommands: newAdminMembershipCommands(),
},
{
Name: "cluster",
Aliases: []string{"cl"},
Usage: "Run admin operation on cluster",
Subcommands: newAdminClusterCommands(),
},
{
Name: "dlq",
Aliases: []string{"dlq"},
Usage: "Run admin operation on DLQ",
Subcommands: newAdminDLQCommands(),
},
{
Name: "db",
Aliases: []string{"db"},
Usage: "Run admin operations on database",
Subcommands: newDBCommands(),
},
{
Name: "decode",
Usage: "Decode payload",
Subcommands: newDecodeCommands(),
},
},
},
{
Name: "cluster",
Aliases: []string{"cl"},
Usage: "Operate Temporal cluster",
Subcommands: newClusterCommands(),
},
}
// set builder if not customized
if cFactory == nil {
SetFactory(NewClientFactory())
}
return app
}
| 1 | 11,627 | same nit here: maybe word as - "disables validation of the temporal cluster's server certificate" | temporalio-temporal | go |
@@ -59,6 +59,7 @@ public class TestFlinkCatalogDatabase extends FlinkCatalogTestBase {
@Test
public void testDefaultDatabase() {
sql("USE CATALOG %s", catalogName);
+ sql("show tables");
Assert.assertEquals("Should use the current catalog", getTableEnv().getCurrentCatalog(), catalogName);
Assert.assertEquals("Should use the configured default namespace", | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import java.io.File;
import java.util.List;
import java.util.Map;
import org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException;
import org.apache.iceberg.AssertHelpers;
import org.apache.iceberg.Schema;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.types.Types;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
public class TestFlinkCatalogDatabase extends FlinkCatalogTestBase {
public TestFlinkCatalogDatabase(String catalogName, String[] baseNamepace) {
super(catalogName, baseNamepace);
}
@After
public void clean() {
sql("DROP TABLE IF EXISTS %s.tl", flinkDatabase);
sql("DROP DATABASE IF EXISTS %s", flinkDatabase);
}
@Test
public void testCreateNamespace() {
Assert.assertFalse(
"Database should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s", flinkDatabase);
Assert.assertTrue("Database should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
}
@Test
public void testDefaultDatabase() {
sql("USE CATALOG %s", catalogName);
Assert.assertEquals("Should use the current catalog", getTableEnv().getCurrentCatalog(), catalogName);
Assert.assertEquals("Should use the configured default namespace",
getTableEnv().getCurrentDatabase(), "default");
}
@Test
public void testDropEmptyDatabase() {
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s", flinkDatabase);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("DROP DATABASE %s", flinkDatabase);
Assert.assertFalse(
"Namespace should have been dropped",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
}
@Test
public void testDropNonEmptyNamespace() {
Assume.assumeFalse("Hadoop catalog throws IOException: Directory is not empty.", isHadoopCatalog);
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s", flinkDatabase);
validationCatalog.createTable(
TableIdentifier.of(icebergNamespace, "tl"),
new Schema(Types.NestedField.optional(0, "id", Types.LongType.get())));
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
Assert.assertTrue("Table should exist", validationCatalog.tableExists(TableIdentifier.of(icebergNamespace, "tl")));
AssertHelpers.assertThrowsCause(
"Should fail if trying to delete a non-empty database",
DatabaseNotEmptyException.class,
String.format("Database %s in catalog %s is not empty.", DATABASE, catalogName),
() -> sql("DROP DATABASE %s", flinkDatabase));
sql("DROP TABLE %s.tl", flinkDatabase);
}
@Test
public void testListTables() {
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s", flinkDatabase);
sql("USE CATALOG %s", catalogName);
sql("USE %s", DATABASE);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
Assert.assertEquals("Should not list any tables", 0, sql("SHOW TABLES").size());
validationCatalog.createTable(
TableIdentifier.of(icebergNamespace, "tl"),
new Schema(Types.NestedField.optional(0, "id", Types.LongType.get())));
List<Object[]> tables = sql("SHOW TABLES");
Assert.assertEquals("Only 1 table", 1, tables.size());
Assert.assertEquals("Table name should match", "tl", tables.get(0)[0]);
}
@Test
public void testListNamespace() {
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s", flinkDatabase);
sql("USE CATALOG %s", catalogName);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
List<Object[]> databases = sql("SHOW DATABASES");
if (isHadoopCatalog) {
Assert.assertEquals("Should have 1 database", 1, databases.size());
Assert.assertEquals("Should have only db database", "db", databases.get(0)[0]);
if (baseNamespace.length > 0) {
// test namespace not belongs to this catalog
validationNamespaceCatalog.createNamespace(Namespace.of(baseNamespace[0], "UNKNOWN_NAMESPACE"));
databases = sql("SHOW DATABASES");
Assert.assertEquals("Should have 1 database", 1, databases.size());
Assert.assertEquals("Should have only db database", "db", databases.get(0)[0]);
}
} else {
// If there are multiple classes extends FlinkTestBase, TestHiveMetastore may loose the creation for default
// database. See HiveMetaStore.HMSHandler.init.
Assert.assertTrue("Should have db database", databases.stream().anyMatch(d -> d[0].equals("db")));
}
}
@Test
public void testCreateNamespaceWithMetadata() {
Assume.assumeFalse("HadoopCatalog does not support namespace metadata", isHadoopCatalog);
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s WITH ('prop'='value')", flinkDatabase);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
Map<String, String> nsMetadata = validationNamespaceCatalog.loadNamespaceMetadata(icebergNamespace);
Assert.assertEquals("Namespace should have expected prop value", "value", nsMetadata.get("prop"));
}
@Test
public void testCreateNamespaceWithComment() {
Assume.assumeFalse("HadoopCatalog does not support namespace metadata", isHadoopCatalog);
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s COMMENT 'namespace doc'", flinkDatabase);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
Map<String, String> nsMetadata = validationNamespaceCatalog.loadNamespaceMetadata(icebergNamespace);
Assert.assertEquals("Namespace should have expected comment", "namespace doc", nsMetadata.get("comment"));
}
@Test
public void testCreateNamespaceWithLocation() throws Exception {
Assume.assumeFalse("HadoopCatalog does not support namespace metadata", isHadoopCatalog);
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
File location = TEMPORARY_FOLDER.newFile();
Assert.assertTrue(location.delete());
sql("CREATE DATABASE %s WITH ('location'='%s')", flinkDatabase, location);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
Map<String, String> nsMetadata = validationNamespaceCatalog.loadNamespaceMetadata(icebergNamespace);
Assert.assertEquals("Namespace should have expected location",
"file:" + location.getPath(), nsMetadata.get("location"));
}
@Test
public void testSetProperties() {
Assume.assumeFalse("HadoopCatalog does not support namespace metadata", isHadoopCatalog);
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
sql("CREATE DATABASE %s", flinkDatabase);
Assert.assertTrue("Namespace should exist", validationNamespaceCatalog.namespaceExists(icebergNamespace));
Map<String, String> defaultMetadata = validationNamespaceCatalog.loadNamespaceMetadata(icebergNamespace);
Assert.assertFalse("Default metadata should not have custom property", defaultMetadata.containsKey("prop"));
sql("ALTER DATABASE %s SET ('prop'='value')", flinkDatabase);
Map<String, String> nsMetadata = validationNamespaceCatalog.loadNamespaceMetadata(icebergNamespace);
Assert.assertEquals("Namespace should have expected prop value", "value", nsMetadata.get("prop"));
}
@Test
public void testHadoopNotSupportMeta() {
Assume.assumeTrue("HadoopCatalog does not support namespace metadata", isHadoopCatalog);
Assert.assertFalse(
"Namespace should not already exist",
validationNamespaceCatalog.namespaceExists(icebergNamespace));
AssertHelpers.assertThrowsCause(
"Should fail if trying to create database with location in hadoop catalog.",
UnsupportedOperationException.class,
String.format("Cannot create namespace %s: metadata is not supported", icebergNamespace),
() -> sql("CREATE DATABASE %s WITH ('prop'='value')", flinkDatabase));
}
}
| 1 | 24,398 | Nit: other statements use all caps for SQL reserved words. Should this be `SHOW TABLES`? | apache-iceberg | java |
@@ -60,7 +60,7 @@ static bool is_a_code_line (const unsigned char *line)
static bool isLuaIdentifier (char c)
{
- return (bool) !(isspace(c) || c == '(' || c == ')' || c == '=');
+ return (bool) !(isspace(c) || c == '(' || c == ')' || c == '=' || c == '.' || c == ':');
}
static void extract_next_token (const char *begin, const char *end_sentinel, vString *name) | 1 | /*
* Copyright (c) 2000-2001, Max Ischenko <[email protected]>.
*
* This source code is released for free distribution under the terms of the
* GNU General Public License version 2 or (at your option) any later version.
*
* This module contains functions for generating tags for Lua language.
*/
/*
* INCLUDE FILES
*/
#include "general.h" /* must always come first */
#include <string.h>
#include "debug.h"
#include "parse.h"
#include "read.h"
#include "routines.h"
#include "vstring.h"
/*
* DATA DEFINITIONS
*/
typedef enum {
K_FUNCTION
} luaKind;
static kindDefinition LuaKinds [] = {
{ true, 'f', "function", "functions" }
};
/*
* FUNCTION DEFINITIONS
*/
/*
* Helper function.
* Returns 1 if line looks like a line of Lua code.
*
* TODO: Recognize UNIX bang notation.
* (Lua treat first line as a comment if it starts with #!)
*
*/
static bool is_a_code_line (const unsigned char *line)
{
bool result;
const unsigned char *p = line;
while (isspace ((int) *p))
p++;
if (p [0] == '\0')
result = false;
else if (p [0] == '-' && p [1] == '-')
result = false;
else
result = true;
return result;
}
static bool isLuaIdentifier (char c)
{
return (bool) !(isspace(c) || c == '(' || c == ')' || c == '=');
}
static void extract_next_token (const char *begin, const char *end_sentinel, vString *name)
{
if (begin == NULL || end_sentinel == NULL)
return;
Assert (begin <= end_sentinel);
/* Both on '(' */
if (begin == end_sentinel)
return;
/* Trim prefixed white spaces */
while (isspace ((int) *begin))
begin++;
/* Both on '(' */
if (begin == end_sentinel)
return;
const char *end = end_sentinel - 1;
/* Trim suffixed white spaces */
while (isspace ((int) *end))
end--;
Assert (begin <= end);
for (const char *c = begin; c <= end; ++c)
{
if (isLuaIdentifier (*c))
vStringPut (name, (int) *c);
else
{
/* An unexpected character is found
* between "function" and "(" */
vStringClear (name);
return;
}
}
makeSimpleTag (name, K_FUNCTION);
vStringClear (name);
}
static void extract_prev_token (const char *end, const char *begin_sentinel, vString *name)
{
const char *begin;
if (end == NULL || begin_sentinel == NULL)
return;
if (! (begin_sentinel <= end))
return;
while (isspace ((int) *end))
{
end--;
if (! (begin_sentinel <= end))
return;
}
begin = end;
while (begin_sentinel <= begin && isLuaIdentifier (*begin))
begin--;
if (end - begin)
{
vStringNCatS (name, begin + 1, end - begin);
makeSimpleTag (name, K_FUNCTION);
vStringClear (name);
}
}
static void findLuaTags (void)
{
vString *name = vStringNew ();
const unsigned char *line;
while ((line = readLineFromInputFile ()) != NULL)
{
const char *p, *q;
if (! is_a_code_line (line))
continue;
p = (const char*) strstr ((const char*) line, "function");
if (p == NULL)
continue;
q = strchr ((const char*) line, '=');
if (q == NULL) {
p = p + 8; /* skip the `function' word */
/* We expect [ \t(] */
if (! (*p == '(' || isspace ((int)*p)))
continue;
q = strchr ((const char*) p, '(');
if (q)
extract_next_token (p, q, name);
} else if (
(*(q+1) != '=') /* ignore `if type(v) == "function" then ...' */
&& (q < p) /* ignore "function" ~= */
) {
p = (const char*) &line[0];
if (p < q)
extract_prev_token (q - 1, p, name);
}
}
vStringDelete (name);
}
extern parserDefinition* LuaParser (void)
{
static const char* const extensions [] = { "lua", NULL };
parserDefinition* def = parserNew ("Lua");
def->kindTable = LuaKinds;
def->kindCount = ARRAY_SIZE (LuaKinds);
def->extensions = extensions;
def->parser = findLuaTags;
return def;
}
| 1 | 21,314 | Do we need this? isLuaIdentifier() is used not only in extract_next_token() but also in extract_prev_toke(). I wonder whether the change for isLuaIdentifier() has an impact on extract_prev_toke() or not. If you are not sure, keep isLuaIdentifier() as is. If you are sure, could you write your conviction to the commit log. | universal-ctags-ctags | c |
@@ -942,7 +942,7 @@ func ConfigureDefaultMTUs(hostMTU int, c *Config) {
c.VXLANMTU = hostMTU - vxlanMTUOverhead
}
if c.Wireguard.MTU == 0 {
- if c.KubernetesProvider == config.ProviderAKS && c.RouteSource == "WorkloadIPs" {
+ if c.Wireguard.EncryptHostTraffic {
// The default MTU on Azure is 1500, but the underlying network stack will fragment packets at 1400 bytes,
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu
// for details. | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"k8s.io/client-go/kubernetes"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/arp"
"github.com/projectcalico/felix/bpf/conntrack"
"github.com/projectcalico/felix/bpf/failsafes"
bpfipsets "github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf/nat"
bpfproxy "github.com/projectcalico/felix/bpf/proxy"
"github.com/projectcalico/felix/bpf/routes"
"github.com/projectcalico/felix/bpf/state"
"github.com/projectcalico/felix/bpf/tc"
"github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/jitter"
"github.com/projectcalico/felix/labelindex"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/throttle"
"github.com/projectcalico/felix/wireguard"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
cprometheus "github.com/projectcalico/libcalico-go/lib/prometheus"
"github.com/projectcalico/libcalico-go/lib/set"
)
const (
// msgPeekLimit is the maximum number of messages we'll try to grab from the to-dataplane
// channel before we apply the changes. Higher values allow us to batch up more work on
// the channel for greater throughput when we're under load (at cost of higher latency).
msgPeekLimit = 100
// Interface name used by kube-proxy to bind service ips.
KubeIPVSInterface = "kube-ipvs0"
)
var (
countDataplaneSyncErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_int_dataplane_failures",
Help: "Number of times dataplane updates failed and will be retried.",
})
countMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_int_dataplane_messages",
Help: "Number dataplane messages by type.",
}, []string{"type"})
summaryApplyTime = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_apply_time_seconds",
Help: "Time in seconds that it took to apply a dataplane update.",
})
summaryBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_msg_batch_size",
Help: "Number of messages processed in each batch. Higher values indicate we're " +
"doing more batching to try to keep up.",
})
summaryIfaceBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_iface_msg_batch_size",
Help: "Number of interface state messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
summaryAddrBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_addr_msg_batch_size",
Help: "Number of interface address messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
processStartTime time.Time
zeroKey = wgtypes.Key{}
)
func init() {
prometheus.MustRegister(countDataplaneSyncErrors)
prometheus.MustRegister(summaryApplyTime)
prometheus.MustRegister(countMessages)
prometheus.MustRegister(summaryBatchSize)
prometheus.MustRegister(summaryIfaceBatchSize)
prometheus.MustRegister(summaryAddrBatchSize)
processStartTime = time.Now()
}
type Config struct {
Hostname string
IPv6Enabled bool
RuleRendererOverride rules.RuleRenderer
IPIPMTU int
VXLANMTU int
VXLANPort int
MaxIPSetSize int
IptablesBackend string
IPSetsRefreshInterval time.Duration
RouteRefreshInterval time.Duration
DeviceRouteSourceAddress net.IP
DeviceRouteProtocol int
RemoveExternalRoutes bool
IptablesRefreshInterval time.Duration
IptablesPostWriteCheckInterval time.Duration
IptablesInsertMode string
IptablesLockFilePath string
IptablesLockTimeout time.Duration
IptablesLockProbeInterval time.Duration
XDPRefreshInterval time.Duration
Wireguard wireguard.Config
NetlinkTimeout time.Duration
RulesConfig rules.Config
IfaceMonitorConfig ifacemonitor.Config
StatusReportingInterval time.Duration
ConfigChangedRestartCallback func()
FatalErrorRestartCallback func(error)
PostInSyncCallback func()
HealthAggregator *health.HealthAggregator
RouteTableManager *idalloc.IndexAllocator
DebugSimulateDataplaneHangAfter time.Duration
ExternalNodesCidrs []string
BPFEnabled bool
BPFDisableUnprivileged bool
BPFKubeProxyIptablesCleanupEnabled bool
BPFLogLevel string
BPFExtToServiceConnmark int
BPFDataIfacePattern *regexp.Regexp
XDPEnabled bool
XDPAllowGeneric bool
BPFConntrackTimeouts conntrack.Timeouts
BPFCgroupV2 string
BPFConnTimeLBEnabled bool
BPFMapRepin bool
BPFNodePortDSREnabled bool
KubeProxyMinSyncPeriod time.Duration
KubeProxyEndpointSlicesEnabled bool
SidecarAccelerationEnabled bool
LookPathOverride func(file string) (string, error)
KubeClientSet *kubernetes.Clientset
FeatureDetectOverrides map[string]string
// Populated with the smallest host MTU based on auto-detection.
hostMTU int
MTUIfacePattern *regexp.Regexp
RouteSource string
KubernetesProvider config.Provider
}
type UpdateBatchResolver interface {
// Opportunity for a manager component to resolve state that depends jointly on the updates
// that it has seen since the preceding CompleteDeferredWork call. Processing here can
// include passing resolved state to other managers. It should not include any actual
// dataplane updates yet. (Those should be actioned in CompleteDeferredWork.)
ResolveUpdateBatch() error
}
// InternalDataplane implements an in-process Felix dataplane driver based on iptables
// and ipsets. It communicates with the datastore-facing part of Felix via the
// Send/RecvMessage methods, which operate on the protobuf-defined API objects.
//
// Architecture
//
// The internal dataplane driver is organised around a main event loop, which handles
// update events from the datastore and dataplane.
//
// Each pass around the main loop has two phases. In the first phase, updates are fanned
// out to "manager" objects, which calculate the changes that are needed and pass them to
// the dataplane programming layer. In the second phase, the dataplane layer applies the
// updates in a consistent sequence. The second phase is skipped until the datastore is
// in sync; this ensures that the first update to the dataplane applies a consistent
// snapshot.
//
// Having the dataplane layer batch updates has several advantages. It is much more
// efficient to batch updates, since each call to iptables/ipsets has a high fixed cost.
// In addition, it allows for different managers to make updates without having to
// coordinate on their sequencing.
//
// Requirements on the API
//
// The internal dataplane does not do consistency checks on the incoming data (as the
// old Python-based driver used to do). It expects to be told about dependent resources
// before they are needed and for their lifetime to exceed that of the resources that
// depend on them. For example, it is important the the datastore layer send an
// IP set create event before it sends a rule that references that IP set.
type InternalDataplane struct {
toDataplane chan interface{}
fromDataplane chan interface{}
allIptablesTables []*iptables.Table
iptablesMangleTables []*iptables.Table
iptablesNATTables []*iptables.Table
iptablesRawTables []*iptables.Table
iptablesFilterTables []*iptables.Table
ipSets []ipsetsDataplane
ipipManager *ipipManager
wireguardManager *wireguardManager
ifaceMonitor *ifacemonitor.InterfaceMonitor
ifaceUpdates chan *ifaceUpdate
ifaceAddrUpdates chan *ifaceAddrsUpdate
endpointStatusCombiner *endpointStatusCombiner
allManagers []Manager
managersWithRouteTables []ManagerWithRouteTables
ruleRenderer rules.RuleRenderer
// dataplaneNeedsSync is set if the dataplane is dirty in some way, i.e. we need to
// call apply().
dataplaneNeedsSync bool
// forceIPSetsRefresh is set by the IP sets refresh timer to indicate that we should
// check the IP sets in the dataplane.
forceIPSetsRefresh bool
// forceRouteRefresh is set by the route refresh timer to indicate that we should
// check the routes in the dataplane.
forceRouteRefresh bool
// forceXDPRefresh is set by the XDP refresh timer to indicate that we should
// check the XDP state in the dataplane.
forceXDPRefresh bool
// doneFirstApply is set after we finish the first update to the dataplane. It indicates
// that the dataplane should now be in sync.
doneFirstApply bool
reschedTimer *time.Timer
reschedC <-chan time.Time
applyThrottle *throttle.Throttle
config Config
debugHangC <-chan time.Time
xdpState *xdpState
sockmapState *sockmapState
endpointsSourceV4 endpointsSource
ipsetsSourceV4 ipsetsSource
callbacks *callbacks
loopSummarizer *logutils.Summarizer
}
const (
healthName = "int_dataplane"
healthInterval = 10 * time.Second
ipipMTUOverhead = 20
vxlanMTUOverhead = 50
wireguardMTUOverhead = 60
aksMTUOverhead = 100
)
func NewIntDataplaneDriver(config Config) *InternalDataplane {
log.WithField("config", config).Info("Creating internal dataplane driver.")
ruleRenderer := config.RuleRendererOverride
if ruleRenderer == nil {
ruleRenderer = rules.NewRenderer(config.RulesConfig)
}
epMarkMapper := rules.NewEndpointMarkMapper(
config.RulesConfig.IptablesMarkEndpoint,
config.RulesConfig.IptablesMarkNonCaliEndpoint)
// Auto-detect host MTU.
hostMTU, err := findHostMTU(config.MTUIfacePattern)
if err != nil {
log.WithError(err).Fatal("Unable to detect host MTU, shutting down")
return nil
}
ConfigureDefaultMTUs(hostMTU, &config)
podMTU := determinePodMTU(config)
if err := writeMTUFile(podMTU); err != nil {
log.WithError(err).Error("Failed to write MTU file, pod MTU may not be properly set")
}
dp := &InternalDataplane{
toDataplane: make(chan interface{}, msgPeekLimit),
fromDataplane: make(chan interface{}, 100),
ruleRenderer: ruleRenderer,
ifaceMonitor: ifacemonitor.New(config.IfaceMonitorConfig, config.FatalErrorRestartCallback),
ifaceUpdates: make(chan *ifaceUpdate, 100),
ifaceAddrUpdates: make(chan *ifaceAddrsUpdate, 100),
config: config,
applyThrottle: throttle.New(10),
loopSummarizer: logutils.NewSummarizer("dataplane reconciliation loops"),
}
dp.applyThrottle.Refill() // Allow the first apply() immediately.
dp.ifaceMonitor.StateCallback = dp.onIfaceStateChange
dp.ifaceMonitor.AddrCallback = dp.onIfaceAddrsChange
backendMode := iptables.DetectBackend(config.LookPathOverride, iptables.NewRealCmd, config.IptablesBackend)
// Most iptables tables need the same options.
iptablesOptions := iptables.TableOptions{
HistoricChainPrefixes: rules.AllHistoricChainNamePrefixes,
InsertMode: config.IptablesInsertMode,
RefreshInterval: config.IptablesRefreshInterval,
PostWriteInterval: config.IptablesPostWriteCheckInterval,
LockTimeout: config.IptablesLockTimeout,
LockProbeInterval: config.IptablesLockProbeInterval,
BackendMode: backendMode,
LookPathOverride: config.LookPathOverride,
OnStillAlive: dp.reportHealth,
OpRecorder: dp.loopSummarizer,
}
if config.BPFEnabled && config.BPFKubeProxyIptablesCleanupEnabled {
// If BPF-mode is enabled, clean up kube-proxy's rules too.
log.Info("BPF enabled, configuring iptables layer to clean up kube-proxy's rules.")
iptablesOptions.ExtraCleanupRegexPattern = rules.KubeProxyInsertRuleRegex
iptablesOptions.HistoricChainPrefixes = append(iptablesOptions.HistoricChainPrefixes, rules.KubeProxyChainPrefixes...)
}
// However, the NAT tables need an extra cleanup regex.
iptablesNATOptions := iptablesOptions
if iptablesNATOptions.ExtraCleanupRegexPattern == "" {
iptablesNATOptions.ExtraCleanupRegexPattern = rules.HistoricInsertedNATRuleRegex
} else {
iptablesNATOptions.ExtraCleanupRegexPattern += "|" + rules.HistoricInsertedNATRuleRegex
}
featureDetector := iptables.NewFeatureDetector(config.FeatureDetectOverrides)
iptablesFeatures := featureDetector.GetFeatures()
var iptablesLock sync.Locker
if iptablesFeatures.RestoreSupportsLock {
log.Debug("Calico implementation of iptables lock disabled (because detected version of " +
"iptables-restore will use its own implementation).")
iptablesLock = dummyLock{}
} else if config.IptablesLockTimeout <= 0 {
log.Debug("Calico implementation of iptables lock disabled (by configuration).")
iptablesLock = dummyLock{}
} else {
// Create the shared iptables lock. This allows us to block other processes from
// manipulating iptables while we make our updates. We use a shared lock because we
// actually do multiple updates in parallel (but to different tables), which is safe.
log.WithField("timeout", config.IptablesLockTimeout).Debug(
"Calico implementation of iptables lock enabled")
iptablesLock = iptables.NewSharedLock(
config.IptablesLockFilePath,
config.IptablesLockTimeout,
config.IptablesLockProbeInterval,
)
}
mangleTableV4 := iptables.NewTable(
"mangle",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
natTableV4 := iptables.NewTable(
"nat",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV4 := iptables.NewTable(
"raw",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
filterTableV4 := iptables.NewTable(
"filter",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
ipSetsConfigV4 := config.RulesConfig.IPSetConfigV4
ipSetsV4 := ipsets.NewIPSets(ipSetsConfigV4, dp.loopSummarizer)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV4)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV4)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV4)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV4)
dp.ipSets = append(dp.ipSets, ipSetsV4)
if config.RulesConfig.VXLANEnabled {
routeTableVXLAN := routetable.New([]string{"^vxlan.calico$"}, 4, true, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, true, 0,
dp.loopSummarizer)
vxlanManager := newVXLANManager(
ipSetsV4,
routeTableVXLAN,
"vxlan.calico",
config,
dp.loopSummarizer,
)
go vxlanManager.KeepVXLANDeviceInSync(config.VXLANMTU, iptablesFeatures.ChecksumOffloadBroken, 10*time.Second)
dp.RegisterManager(vxlanManager)
} else {
cleanUpVXLANDevice()
}
dp.endpointStatusCombiner = newEndpointStatusCombiner(dp.fromDataplane, config.IPv6Enabled)
callbacks := newCallbacks()
dp.callbacks = callbacks
if !config.BPFEnabled && config.XDPEnabled {
if err := bpf.SupportsXDP(); err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
st, err := NewXDPState(config.XDPAllowGeneric)
if err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
dp.xdpState = st
dp.xdpState.PopulateCallbacks(callbacks)
dp.RegisterManager(st)
log.Info("XDP acceleration enabled.")
}
}
} else {
log.Info("XDP acceleration disabled.")
}
// TODO Integrate XDP and BPF infra.
if !config.BPFEnabled && dp.xdpState == nil {
xdpState, err := NewXDPState(config.XDPAllowGeneric)
if err == nil {
if err := xdpState.WipeXDP(); err != nil {
log.WithError(err).Warn("Failed to cleanup preexisting XDP state")
}
}
// if we can't create an XDP state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if config.SidecarAccelerationEnabled {
if err := bpf.SupportsSockmap(); err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
st, err := NewSockmapState()
if err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
dp.sockmapState = st
dp.sockmapState.PopulateCallbacks(callbacks)
if err := dp.sockmapState.SetupSockmapAcceleration(); err != nil {
dp.sockmapState = nil
log.WithError(err).Warn("Failed to set up Sockmap acceleration")
} else {
log.Info("Sockmap acceleration enabled.")
}
}
}
}
if dp.sockmapState == nil {
st, err := NewSockmapState()
if err == nil {
st.WipeSockmap(bpf.FindInBPFFSOnly)
}
// if we can't create a sockmap state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if !config.BPFEnabled {
// BPF mode disabled, create the iptables-only managers.
ipsetsManager := newIPSetsManager(ipSetsV4, config.MaxIPSetSize)
dp.RegisterManager(ipsetsManager)
dp.ipsetsSourceV4 = ipsetsManager
// TODO Connect host IP manager to BPF
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV4,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV4, mangleTableV4, filterTableV4, ruleRenderer, 4))
// Clean up any leftover BPF state.
err := nat.RemoveConnectTimeLoadBalancer("")
if err != nil {
log.WithError(err).Info("Failed to remove BPF connect-time load balancer, ignoring.")
}
tc.CleanUpProgramsAndPins()
}
interfaceRegexes := make([]string, len(config.RulesConfig.WorkloadIfacePrefixes))
for i, r := range config.RulesConfig.WorkloadIfacePrefixes {
interfaceRegexes[i] = "^" + r + ".*"
}
bpfMapContext := &bpf.MapContext{
RepinningEnabled: config.BPFMapRepin,
}
var (
bpfEndpointManager *bpfEndpointManager
)
if config.BPFEnabled {
log.Info("BPF enabled, starting BPF endpoint manager and map manager.")
// Register map managers first since they create the maps that will be used by the endpoint manager.
// Important that we create the maps before we load a BPF program with TC since we make sure the map
// metadata name is set whereas TC doesn't set that field.
ipSetIDAllocator := idalloc.New()
ipSetsMap := bpfipsets.Map(bpfMapContext)
err := ipSetsMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ipsets BPF map.")
}
ipSetsV4 := bpfipsets.NewBPFIPSets(
ipSetsConfigV4,
ipSetIDAllocator,
ipSetsMap,
dp.loopSummarizer,
)
dp.ipSets = append(dp.ipSets, ipSetsV4)
dp.RegisterManager(newIPSetsManager(ipSetsV4, config.MaxIPSetSize))
bpfRTMgr := newBPFRouteManager(config.Hostname, config.ExternalNodesCidrs, bpfMapContext, dp.loopSummarizer)
dp.RegisterManager(bpfRTMgr)
// Forwarding into an IPIP tunnel fails silently because IPIP tunnels are L3 devices and support for
// L3 devices in BPF is not available yet. Disable the FIB lookup in that case.
fibLookupEnabled := !config.RulesConfig.IPIPEnabled
stateMap := state.Map(bpfMapContext)
err = stateMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create state BPF map.")
}
arpMap := arp.Map(bpfMapContext)
err = arpMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ARP BPF map.")
}
// The failsafe manager sets up the failsafe port map. It's important that it is registered before the
// endpoint managers so that the map is brought up to date before they run for the first time.
failsafesMap := failsafes.Map(bpfMapContext)
err = failsafesMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create failsafe port BPF map.")
}
failsafeMgr := failsafes.NewManager(
failsafesMap,
config.RulesConfig.FailsafeInboundHostPorts,
config.RulesConfig.FailsafeOutboundHostPorts,
dp.loopSummarizer,
)
dp.RegisterManager(failsafeMgr)
workloadIfaceRegex := regexp.MustCompile(strings.Join(interfaceRegexes, "|"))
bpfEndpointManager = newBPFEndpointManager(
config.BPFLogLevel,
config.Hostname,
fibLookupEnabled,
config.RulesConfig.EndpointToHostAction,
config.BPFDataIfacePattern,
workloadIfaceRegex,
ipSetIDAllocator,
config.VXLANMTU,
uint16(config.VXLANPort),
config.BPFNodePortDSREnabled,
config.BPFExtToServiceConnmark,
ipSetsMap,
stateMap,
ruleRenderer,
filterTableV4,
dp.reportHealth,
dp.loopSummarizer,
)
dp.RegisterManager(bpfEndpointManager)
// Pre-create the NAT maps so that later operations can assume access.
frontendMap := nat.FrontendMap(bpfMapContext)
err = frontendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT frontend BPF map.")
}
backendMap := nat.BackendMap(bpfMapContext)
err = backendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend BPF map.")
}
backendAffinityMap := nat.AffinityMap(bpfMapContext)
err = backendAffinityMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend affinity BPF map.")
}
routeMap := routes.Map(bpfMapContext)
err = routeMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create routes BPF map.")
}
ctMap := conntrack.Map(bpfMapContext)
err = ctMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create conntrack BPF map.")
}
conntrackScanner := conntrack.NewScanner(ctMap,
conntrack.NewLivenessScanner(config.BPFConntrackTimeouts, config.BPFNodePortDSREnabled))
// Before we start, scan for all finished / timed out connections to
// free up the conntrack table asap as it may take time to sync up the
// proxy and kick off the first full cleaner scan.
conntrackScanner.Scan()
bpfproxyOpts := []bpfproxy.Option{
bpfproxy.WithMinSyncPeriod(config.KubeProxyMinSyncPeriod),
}
if config.KubeProxyEndpointSlicesEnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithEndpointsSlices())
}
if config.BPFNodePortDSREnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithDSREnabled())
}
if config.KubeClientSet != nil {
// We have a Kubernetes connection, start watching services and populating the NAT maps.
kp, err := bpfproxy.StartKubeProxy(
config.KubeClientSet,
config.Hostname,
frontendMap,
backendMap,
backendAffinityMap,
ctMap,
bpfproxyOpts...,
)
if err != nil {
log.WithError(err).Panic("Failed to start kube-proxy.")
}
bpfRTMgr.setHostIPUpdatesCallBack(kp.OnHostIPsUpdate)
bpfRTMgr.setRoutesCallBacks(kp.OnRouteUpdate, kp.OnRouteDelete)
conntrackScanner.AddUnlocked(conntrack.NewStaleNATScanner(kp))
conntrackScanner.Start()
} else {
log.Info("BPF enabled but no Kubernetes client available, unable to run kube-proxy module.")
}
if config.BPFConnTimeLBEnabled {
// Activate the connect-time load balancer.
err = nat.InstallConnectTimeLoadBalancer(frontendMap, backendMap, routeMap, config.BPFCgroupV2, config.BPFLogLevel)
if err != nil {
log.WithError(err).Panic("BPFConnTimeLBEnabled but failed to attach connect-time load balancer, bailing out.")
}
} else {
// Deactivate the connect-time load balancer.
err = nat.RemoveConnectTimeLoadBalancer(config.BPFCgroupV2)
if err != nil {
log.WithError(err).Warn("Failed to detach connect-time load balancer. Ignoring.")
}
}
}
routeTableV4 := routetable.New(interfaceRegexes, 4, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
epManager := newEndpointManager(
rawTableV4,
mangleTableV4,
filterTableV4,
ruleRenderer,
routeTableV4,
4,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
bpfEndpointManager,
callbacks)
dp.RegisterManager(epManager)
dp.endpointsSourceV4 = epManager
dp.RegisterManager(newFloatingIPManager(natTableV4, ruleRenderer, 4))
dp.RegisterManager(newMasqManager(ipSetsV4, natTableV4, ruleRenderer, config.MaxIPSetSize, 4))
if config.RulesConfig.IPIPEnabled {
// Add a manger to keep the all-hosts IP set up to date.
dp.ipipManager = newIPIPManager(ipSetsV4, config.MaxIPSetSize, config.ExternalNodesCidrs)
dp.RegisterManager(dp.ipipManager) // IPv4-only
}
// Add a manager for wireguard configuration. This is added irrespective of whether wireguard is actually enabled
// because it may need to tidy up some of the routing rules when disabled.
cryptoRouteTableWireguard := wireguard.New(config.Hostname, &config.Wireguard, config.NetlinkTimeout,
config.DeviceRouteProtocol, func(publicKey wgtypes.Key) error {
if publicKey == zeroKey {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: ""}
} else {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: publicKey.String()}
}
return nil
},
dp.loopSummarizer)
dp.wireguardManager = newWireguardManager(cryptoRouteTableWireguard, config)
dp.RegisterManager(dp.wireguardManager) // IPv4-only
dp.RegisterManager(newServiceLoopManager(filterTableV4, ruleRenderer, 4))
if config.IPv6Enabled {
mangleTableV6 := iptables.NewTable(
"mangle",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
natTableV6 := iptables.NewTable(
"nat",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV6 := iptables.NewTable(
"raw",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
filterTableV6 := iptables.NewTable(
"filter",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
ipSetsConfigV6 := config.RulesConfig.IPSetConfigV6
ipSetsV6 := ipsets.NewIPSets(ipSetsConfigV6, dp.loopSummarizer)
dp.ipSets = append(dp.ipSets, ipSetsV6)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV6)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV6)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV6)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV6)
routeTableV6 := routetable.New(
interfaceRegexes, 6, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
if !config.BPFEnabled {
dp.RegisterManager(newIPSetsManager(ipSetsV6, config.MaxIPSetSize))
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV6,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV6, mangleTableV6, filterTableV6, ruleRenderer, 6))
}
dp.RegisterManager(newEndpointManager(
rawTableV6,
mangleTableV6,
filterTableV6,
ruleRenderer,
routeTableV6,
6,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
nil,
callbacks))
dp.RegisterManager(newFloatingIPManager(natTableV6, ruleRenderer, 6))
dp.RegisterManager(newMasqManager(ipSetsV6, natTableV6, ruleRenderer, config.MaxIPSetSize, 6))
dp.RegisterManager(newServiceLoopManager(filterTableV6, ruleRenderer, 6))
}
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesMangleTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesNATTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesFilterTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesRawTables...)
// Register that we will report liveness and readiness.
if config.HealthAggregator != nil {
log.Info("Registering to report health.")
config.HealthAggregator.RegisterReporter(
healthName,
&health.HealthReport{Live: true, Ready: true},
healthInterval*2,
)
}
if config.DebugSimulateDataplaneHangAfter != 0 {
log.WithField("delay", config.DebugSimulateDataplaneHangAfter).Warn(
"Simulating a dataplane hang.")
dp.debugHangC = time.After(config.DebugSimulateDataplaneHangAfter)
}
return dp
}
// findHostMTU auto-detects the smallest host interface MTU.
func findHostMTU(matchRegex *regexp.Regexp) (int, error) {
// Find all the interfaces on the host.
links, err := netlink.LinkList()
if err != nil {
log.WithError(err).Error("Failed to list interfaces. Unable to auto-detect MTU")
return 0, err
}
// Iterate through them, keeping track of the lowest MTU.
smallest := 0
for _, l := range links {
// Skip links that we know are not external interfaces.
fields := log.Fields{"mtu": l.Attrs().MTU, "name": l.Attrs().Name}
if matchRegex == nil || !matchRegex.MatchString(l.Attrs().Name) {
log.WithFields(fields).Debug("Skipping interface for MTU detection")
continue
}
log.WithFields(fields).Debug("Examining link for MTU calculation")
if l.Attrs().MTU < smallest || smallest == 0 {
smallest = l.Attrs().MTU
}
}
if smallest == 0 {
// We failed to find a usable interface. Default the MTU of the host
// to 1460 - the smallest among common cloud providers.
log.Warn("Failed to auto-detect host MTU - no interfaces matched the MTU interface pattern. To use auto-MTU, set mtuIfacePattern to match your host's interfaces")
return 1460, nil
}
return smallest, nil
}
// writeMTUFile writes the smallest MTU among enabled encapsulation types to disk
// for use by other components (e.g., CNI plugin).
func writeMTUFile(mtu int) error {
// Make sure directory exists.
if err := os.MkdirAll("/var/lib/calico", os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory /var/lib/calico: %s", err)
}
// Write the smallest MTU to disk so other components can rely on this calculation consistently.
filename := "/var/lib/calico/mtu"
log.Debugf("Writing %d to "+filename, mtu)
if err := ioutil.WriteFile(filename, []byte(fmt.Sprintf("%d", mtu)), 0644); err != nil {
log.WithError(err).Error("Unable to write to " + filename)
return err
}
return nil
}
// determinePodMTU looks at the configured MTUs and enabled encapsulations to determine which
// value for MTU should be used for pod interfaces.
func determinePodMTU(config Config) int {
// Determine the smallest MTU among enabled encap methods. If none of the encap methods are
// enabled, we'll just use the host's MTU.
mtu := 0
type mtuState struct {
mtu int
enabled bool
}
for _, s := range []mtuState{
{config.IPIPMTU, config.RulesConfig.IPIPEnabled},
{config.VXLANMTU, config.RulesConfig.VXLANEnabled},
{config.Wireguard.MTU, config.Wireguard.Enabled},
} {
if s.enabled && s.mtu != 0 && (s.mtu < mtu || mtu == 0) {
mtu = s.mtu
}
}
if mtu == 0 {
// No enabled encapsulation. Just use the host MTU.
mtu = config.hostMTU
} else if mtu > config.hostMTU {
fields := logrus.Fields{"mtu": mtu, "hostMTU": config.hostMTU}
log.WithFields(fields).Warn("Configured MTU is larger than detected host interface MTU")
}
log.WithField("mtu", mtu).Info("Determined pod MTU")
return mtu
}
// ConfigureDefaultMTUs defaults any MTU configurations that have not been set.
// We default the values even if the encap is not enabled, in order to match behavior from earlier versions of Calico.
// However, they MTU will only be considered for allocation to pod interfaces if the encap is enabled.
func ConfigureDefaultMTUs(hostMTU int, c *Config) {
c.hostMTU = hostMTU
if c.IPIPMTU == 0 {
log.Debug("Defaulting IPIP MTU based on host")
c.IPIPMTU = hostMTU - ipipMTUOverhead
}
if c.VXLANMTU == 0 {
log.Debug("Defaulting VXLAN MTU based on host")
c.VXLANMTU = hostMTU - vxlanMTUOverhead
}
if c.Wireguard.MTU == 0 {
if c.KubernetesProvider == config.ProviderAKS && c.RouteSource == "WorkloadIPs" {
// The default MTU on Azure is 1500, but the underlying network stack will fragment packets at 1400 bytes,
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu
// for details.
// Additionally, Wireguard sets the DF bit on its packets, and so if the MTU is set too high large packets
// will be dropped. Therefore it is necessary to allow for the difference between the MTU of the host and
// the underlying network.
log.Debug("Defaulting Wireguard MTU based on host and AKS with WorkloadIPs")
c.Wireguard.MTU = hostMTU - aksMTUOverhead - wireguardMTUOverhead
} else {
log.Debug("Defaulting Wireguard MTU based on host")
c.Wireguard.MTU = hostMTU - wireguardMTUOverhead
}
}
}
func cleanUpVXLANDevice() {
// If VXLAN is not enabled, check to see if there is a VXLAN device and delete it if there is.
log.Debug("Checking if we need to clean up the VXLAN device")
link, err := netlink.LinkByName("vxlan.calico")
if err != nil {
if _, ok := err.(netlink.LinkNotFoundError); ok {
log.Debug("VXLAN disabled and no VXLAN device found")
return
}
log.WithError(err).Warnf("VXLAN disabled and failed to query VXLAN device. Ignoring.")
return
}
if err = netlink.LinkDel(link); err != nil {
log.WithError(err).Error("VXLAN disabled and failed to delete unwanted VXLAN device. Ignoring.")
}
}
type Manager interface {
// OnUpdate is called for each protobuf message from the datastore. May either directly
// send updates to the IPSets and iptables.Table objects (which will queue the updates
// until the main loop instructs them to act) or (for efficiency) may wait until
// a call to CompleteDeferredWork() to flush updates to the dataplane.
OnUpdate(protoBufMsg interface{})
// Called before the main loop flushes updates to the dataplane to allow for batched
// work to be completed.
CompleteDeferredWork() error
}
type ManagerWithRouteTables interface {
Manager
GetRouteTableSyncers() []routeTableSyncer
}
func (d *InternalDataplane) routeTableSyncers() []routeTableSyncer {
var rts []routeTableSyncer
for _, mrts := range d.managersWithRouteTables {
rts = append(rts, mrts.GetRouteTableSyncers()...)
}
return rts
}
func (d *InternalDataplane) RegisterManager(mgr Manager) {
switch mgr := mgr.(type) {
case ManagerWithRouteTables:
// Used to log the whole manager out here but if we do that then we cause races if the manager has
// other threads or locks.
log.WithField("manager", reflect.TypeOf(mgr).Name()).Debug("registering ManagerWithRouteTables")
d.managersWithRouteTables = append(d.managersWithRouteTables, mgr)
}
d.allManagers = append(d.allManagers, mgr)
}
func (d *InternalDataplane) Start() {
// Do our start-of-day configuration.
d.doStaticDataplaneConfig()
// Then, start the worker threads.
go d.loopUpdatingDataplane()
go d.loopReportingStatus()
go d.ifaceMonitor.MonitorInterfaces()
go d.monitorHostMTU()
}
// onIfaceStateChange is our interface monitor callback. It gets called from the monitor's thread.
func (d *InternalDataplane) onIfaceStateChange(ifaceName string, state ifacemonitor.State, ifIndex int) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifIndex": ifIndex,
"state": state,
}).Info("Linux interface state changed.")
d.ifaceUpdates <- &ifaceUpdate{
Name: ifaceName,
State: state,
Index: ifIndex,
}
}
type ifaceUpdate struct {
Name string
State ifacemonitor.State
Index int
}
// Check if current felix ipvs config is correct when felix gets an kube-ipvs0 interface update.
// If KubeIPVSInterface is UP and felix ipvs support is disabled (kube-proxy switched from iptables to ipvs mode),
// or if KubeIPVSInterface is DOWN and felix ipvs support is enabled (kube-proxy switched from ipvs to iptables mode),
// restart felix to pick up correct ipvs support mode.
func (d *InternalDataplane) checkIPVSConfigOnStateUpdate(state ifacemonitor.State) {
if (!d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateUp) ||
(d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateDown) {
log.WithFields(log.Fields{
"ipvsIfaceState": state,
"ipvsSupport": d.config.RulesConfig.KubeIPVSSupportEnabled,
}).Info("kube-proxy mode changed. Restart felix.")
d.config.ConfigChangedRestartCallback()
}
}
// onIfaceAddrsChange is our interface address monitor callback. It gets called
// from the monitor's thread.
func (d *InternalDataplane) onIfaceAddrsChange(ifaceName string, addrs set.Set) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"addrs": addrs,
}).Info("Linux interface addrs changed.")
d.ifaceAddrUpdates <- &ifaceAddrsUpdate{
Name: ifaceName,
Addrs: addrs,
}
}
type ifaceAddrsUpdate struct {
Name string
Addrs set.Set
}
func (d *InternalDataplane) SendMessage(msg interface{}) error {
d.toDataplane <- msg
return nil
}
func (d *InternalDataplane) RecvMessage() (interface{}, error) {
return <-d.fromDataplane, nil
}
func (d *InternalDataplane) monitorHostMTU() {
for {
mtu, err := findHostMTU(d.config.MTUIfacePattern)
if err != nil {
log.WithError(err).Error("Error detecting host MTU")
} else if d.config.hostMTU != mtu {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{lclogutils.FieldForceFlush: true}).Info("Host MTU changed")
d.config.ConfigChangedRestartCallback()
}
time.Sleep(30 * time.Second)
}
}
// doStaticDataplaneConfig sets up the kernel and our static iptables chains. Should be called
// once at start of day before starting the main loop. The actual iptables programming is deferred
// to the main loop.
func (d *InternalDataplane) doStaticDataplaneConfig() {
// Check/configure global kernel parameters.
d.configureKernel()
if d.config.BPFEnabled {
d.setUpIptablesBPF()
} else {
d.setUpIptablesNormal()
}
if d.config.RulesConfig.IPIPEnabled {
log.Info("IPIP enabled, starting thread to keep tunnel configuration in sync.")
go d.ipipManager.KeepIPIPDeviceInSync(
d.config.IPIPMTU,
d.config.RulesConfig.IPIPTunnelAddress,
)
} else {
log.Info("IPIP disabled. Not starting tunnel update thread.")
}
}
func (d *InternalDataplane) setUpIptablesBPF() {
rulesConfig := d.config.RulesConfig
for _, t := range d.iptablesFilterTables {
fwdRules := []iptables.Rule{
{
// Bypass is a strong signal from the BPF program, it means that the flow is approved
// by the program at both ingress and egress.
Comment: []string{"Pre-approved by BPF programs."},
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypass, tc.MarkSeenBypassMask),
Action: iptables.AcceptAction{},
},
}
var inputRules, outputRules []iptables.Rule
// Handle packets for flows that pre-date the BPF programs. The BPF program doesn't have any conntrack
// state for these so it allows them to fall through to iptables with a mark set.
inputRules = append(inputRules,
iptables.Rule{
Match: iptables.Match().
MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask).
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Accept packets from flows that pre-date BPF."},
Action: iptables.AcceptAction{},
},
iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask),
Comment: []string{"Drop packets from unknown flows."},
Action: iptables.DropAction{},
},
)
// Mark traffic leaving the host that already has an established linux conntrack entry.
outputRules = append(outputRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established host flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
fwdRules = append(fwdRules,
// Drop packets that have come from a workload but have not been through our BPF program.
iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
Comment: []string{"From workload without BPF seen mark"},
},
)
if rulesConfig.EndpointToHostAction == "ACCEPT" {
// Only need to worry about ACCEPT here. Drop gets compiled into the BPF program and
// RETURN would be a no-op since there's nothing to RETURN from.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").MarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.AcceptAction{},
})
}
// Catch any workload to host packets that haven't been through the BPF program.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
})
}
if t.IPVersion == 6 {
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// In BPF mode, we don't support IPv6 yet. Drop it.
fwdRules = append(fwdRules, iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.DropAction{},
Comment: []string{"To workload, drop IPv6."},
})
}
} else {
// Let the BPF programs know if Linux conntrack knows about the flow.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
// The packet may be about to go to a local workload. However, the local workload may not have a BPF
// program attached (yet). To catch that case, we send the packet through a dispatch chain. We only
// add interfaces to the dispatch chain if the BPF program is in place.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.JumpAction{Target: rules.ChainToWorkloadDispatch},
Comment: []string{"To workload, check workload is known."},
},
)
}
// Need a final rule to accept traffic that is from a workload and going somewhere else.
// Otherwise, if iptables has a DROP policy on the forward chain, the packet will get dropped.
// This rule must come after the to-workload jump rules above to ensure that we don't accept too
// early before the destination is checked.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().InInterface(prefix + "+"),
Action: iptables.AcceptAction{},
Comment: []string{"To workload, mark has already been verified."},
},
)
}
}
t.InsertOrAppendRules("INPUT", inputRules)
t.InsertOrAppendRules("FORWARD", fwdRules)
t.InsertOrAppendRules("OUTPUT", outputRules)
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATPostroutingChains(t.IPVersion))
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
}
for _, t := range d.iptablesRawTables {
// Do not RPF check what is marked as to be skipped by RPF check.
rpfRules := []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassSkipRPF, tc.MarkSeenBypassSkipRPFMask),
Action: iptables.ReturnAction{},
}}
// For anything we approved for forward, permit accept_local as it is
// traffic encapped for NodePort, ICMP replies etc. - stuff we trust.
rpfRules = append(rpfRules, iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassForward, tc.MarksMask).RPFCheckPassed(true),
Action: iptables.ReturnAction{},
})
// Do the full RPF check and dis-allow accept_local for anything else.
rpfRules = append(rpfRules, rules.RPFilter(t.IPVersion, tc.MarkSeen, tc.MarkSeenMask,
rulesConfig.OpenStackSpecialCasesEnabled, false)...)
rpfChain := []*iptables.Chain{{
Name: rules.ChainNamePrefix + "RPF",
Rules: rpfRules,
}}
t.UpdateChains(rpfChain)
var rawRules []iptables.Rule
if t.IPVersion == 4 && rulesConfig.WireguardEnabled && len(rulesConfig.WireguardInterfaceName) > 0 &&
rulesConfig.RouteSource == "WorkloadIPs" {
// Set a mark on packets coming from any interface except for lo, wireguard, or pod veths to ensure the RPF
// check allows it.
log.Debug("Adding Wireguard iptables rule chain")
rawRules = append(rawRules, iptables.Rule{
Match: nil,
Action: iptables.JumpAction{Target: rules.ChainSetWireguardIncomingMark},
})
t.UpdateChain(d.ruleRenderer.WireguardIncomingMarkChain())
}
rawRules = append(rawRules, iptables.Rule{
Action: iptables.JumpAction{Target: rpfChain[0].Name},
})
rawChains := []*iptables.Chain{{
Name: rules.ChainRawPrerouting,
Rules: rawRules,
}}
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
}
if d.config.BPFExtToServiceConnmark != 0 {
mark := uint32(d.config.BPFExtToServiceConnmark)
for _, t := range d.iptablesMangleTables {
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(
tc.MarkSeen|mark,
tc.MarkSeenMask|mark,
),
Comment: []string{"Mark connections with ExtToServiceConnmark"},
Action: iptables.SetConnMarkAction{Mark: mark, Mask: mark},
}})
}
}
}
func (d *InternalDataplane) setUpIptablesNormal() {
for _, t := range d.iptablesRawTables {
rawChains := d.ruleRenderer.StaticRawTableChains(t.IPVersion)
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawOutput},
}})
}
for _, t := range d.iptablesFilterTables {
filterChains := d.ruleRenderer.StaticFilterTableChains(t.IPVersion)
t.UpdateChains(filterChains)
t.InsertOrAppendRules("FORWARD", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterForward},
}})
t.InsertOrAppendRules("INPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterInput},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterOutput},
}})
// Include rules which should be appended to the filter table forward chain.
t.AppendRules("FORWARD", d.ruleRenderer.StaticFilterForwardAppendRules())
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATOutput},
}})
}
for _, t := range d.iptablesMangleTables {
t.UpdateChains(d.ruleRenderer.StaticMangleTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePostrouting},
}})
}
if d.xdpState != nil {
if err := d.setXDPFailsafePorts(); err != nil {
log.Warnf("failed to set XDP failsafe ports, disabling XDP: %v", err)
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
}
func stringToProtocol(protocol string) (labelindex.IPSetPortProtocol, error) {
switch protocol {
case "tcp":
return labelindex.ProtocolTCP, nil
case "udp":
return labelindex.ProtocolUDP, nil
case "sctp":
return labelindex.ProtocolSCTP, nil
}
return labelindex.ProtocolNone, fmt.Errorf("unknown protocol %q", protocol)
}
func (d *InternalDataplane) setXDPFailsafePorts() error {
inboundPorts := d.config.RulesConfig.FailsafeInboundHostPorts
if _, err := d.xdpState.common.bpfLib.NewFailsafeMap(); err != nil {
return err
}
for _, p := range inboundPorts {
proto, err := stringToProtocol(p.Protocol)
if err != nil {
return err
}
if err := d.xdpState.common.bpfLib.UpdateFailsafeMap(uint8(proto), p.Port); err != nil {
return err
}
}
log.Infof("Set XDP failsafe ports: %+v", inboundPorts)
return nil
}
// shutdownXDPCompletely attempts to disable XDP state. This could fail in cases where XDP isn't working properly.
func (d *InternalDataplane) shutdownXDPCompletely() error {
if d.xdpState == nil {
return nil
}
if d.callbacks != nil {
d.xdpState.DepopulateCallbacks(d.callbacks)
}
// spend 1 second attempting to wipe XDP, in case of a hiccup.
maxTries := 10
waitInterval := 100 * time.Millisecond
var err error
for i := 0; i < maxTries; i++ {
err = d.xdpState.WipeXDP()
if err == nil {
d.xdpState = nil
return nil
}
log.WithError(err).WithField("try", i).Warn("failed to wipe the XDP state")
time.Sleep(waitInterval)
}
return fmt.Errorf("Failed to wipe the XDP state after %v tries over %v seconds: Error %v", maxTries, waitInterval, err)
}
func (d *InternalDataplane) loopUpdatingDataplane() {
log.Info("Started internal iptables dataplane driver loop")
healthTicks := time.NewTicker(healthInterval).C
d.reportHealth()
// Retry any failed operations every 10s.
retryTicker := time.NewTicker(10 * time.Second)
// If configured, start tickers to refresh the IP sets and routing table entries.
var ipSetsRefreshC <-chan time.Time
if d.config.IPSetsRefreshInterval > 0 {
log.WithField("interval", d.config.IptablesRefreshInterval).Info(
"Will refresh IP sets on timer")
refreshTicker := jitter.NewTicker(
d.config.IPSetsRefreshInterval,
d.config.IPSetsRefreshInterval/10,
)
ipSetsRefreshC = refreshTicker.C
}
var routeRefreshC <-chan time.Time
if d.config.RouteRefreshInterval > 0 {
log.WithField("interval", d.config.RouteRefreshInterval).Info(
"Will refresh routes on timer")
refreshTicker := jitter.NewTicker(
d.config.RouteRefreshInterval,
d.config.RouteRefreshInterval/10,
)
routeRefreshC = refreshTicker.C
}
var xdpRefreshC <-chan time.Time
if d.config.XDPRefreshInterval > 0 && d.xdpState != nil {
log.WithField("interval", d.config.XDPRefreshInterval).Info(
"Will refresh XDP on timer")
refreshTicker := jitter.NewTicker(
d.config.XDPRefreshInterval,
d.config.XDPRefreshInterval/10,
)
xdpRefreshC = refreshTicker.C
}
// Fill the apply throttle leaky bucket.
throttleC := jitter.NewTicker(100*time.Millisecond, 10*time.Millisecond).C
beingThrottled := false
datastoreInSync := false
processMsgFromCalcGraph := func(msg interface{}) {
log.WithField("msg", proto.MsgStringer{Msg: msg}).Infof(
"Received %T update from calculation graph", msg)
d.recordMsgStat(msg)
for _, mgr := range d.allManagers {
mgr.OnUpdate(msg)
}
switch msg.(type) {
case *proto.InSync:
log.WithField("timeSinceStart", time.Since(processStartTime)).Info(
"Datastore in sync, flushing the dataplane for the first time...")
datastoreInSync = true
}
}
processIfaceUpdate := func(ifaceUpdate *ifaceUpdate) {
log.WithField("msg", ifaceUpdate).Info("Received interface update")
if ifaceUpdate.Name == KubeIPVSInterface {
d.checkIPVSConfigOnStateUpdate(ifaceUpdate.State)
return
}
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceUpdate)
}
for _, mgr := range d.managersWithRouteTables {
for _, routeTable := range mgr.GetRouteTableSyncers() {
routeTable.OnIfaceStateChanged(ifaceUpdate.Name, ifaceUpdate.State)
}
}
}
processAddrsUpdate := func(ifaceAddrsUpdate *ifaceAddrsUpdate) {
log.WithField("msg", ifaceAddrsUpdate).Info("Received interface addresses update")
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceAddrsUpdate)
}
}
for {
select {
case msg := <-d.toDataplane:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processMsgFromCalcGraph(msg)
msgLoop1:
for i := 0; i < msgPeekLimit; i++ {
select {
case msg := <-d.toDataplane:
processMsgFromCalcGraph(msg)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop1
}
}
d.dataplaneNeedsSync = true
summaryBatchSize.Observe(float64(batchSize))
case ifaceUpdate := <-d.ifaceUpdates:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processIfaceUpdate(ifaceUpdate)
msgLoop2:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceUpdate := <-d.ifaceUpdates:
processIfaceUpdate(ifaceUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop2
}
}
d.dataplaneNeedsSync = true
summaryIfaceBatchSize.Observe(float64(batchSize))
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
batchSize := 1
processAddrsUpdate(ifaceAddrsUpdate)
msgLoop3:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
processAddrsUpdate(ifaceAddrsUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop3
}
}
summaryAddrBatchSize.Observe(float64(batchSize))
d.dataplaneNeedsSync = true
case <-ipSetsRefreshC:
log.Debug("Refreshing IP sets state")
d.forceIPSetsRefresh = true
d.dataplaneNeedsSync = true
case <-routeRefreshC:
log.Debug("Refreshing routes")
d.forceRouteRefresh = true
d.dataplaneNeedsSync = true
case <-xdpRefreshC:
log.Debug("Refreshing XDP")
d.forceXDPRefresh = true
d.dataplaneNeedsSync = true
case <-d.reschedC:
log.Debug("Reschedule kick received")
d.dataplaneNeedsSync = true
// nil out the channel to record that the timer is now inactive.
d.reschedC = nil
case <-throttleC:
d.applyThrottle.Refill()
case <-healthTicks:
d.reportHealth()
case <-retryTicker.C:
case <-d.debugHangC:
log.Warning("Debug hang simulation timer popped, hanging the dataplane!!")
time.Sleep(1 * time.Hour)
log.Panic("Woke up after 1 hour, something's probably wrong with the test.")
}
if datastoreInSync && d.dataplaneNeedsSync {
// Dataplane is out-of-sync, check if we're throttled.
if d.applyThrottle.Admit() {
if beingThrottled && d.applyThrottle.WouldAdmit() {
log.Info("Dataplane updates no longer throttled")
beingThrottled = false
}
log.Debug("Applying dataplane updates")
applyStart := time.Now()
// Actually apply the changes to the dataplane.
d.apply()
// Record stats.
applyTime := time.Since(applyStart)
summaryApplyTime.Observe(applyTime.Seconds())
if d.dataplaneNeedsSync {
// Dataplane is still dirty, record an error.
countDataplaneSyncErrors.Inc()
}
d.loopSummarizer.EndOfIteration(applyTime)
if !d.doneFirstApply {
log.WithField(
"secsSinceStart", time.Since(processStartTime).Seconds(),
).Info("Completed first update to dataplane.")
d.loopSummarizer.RecordOperation("first-update")
d.doneFirstApply = true
if d.config.PostInSyncCallback != nil {
d.config.PostInSyncCallback()
}
}
d.reportHealth()
} else {
if !beingThrottled {
log.Info("Dataplane updates throttled")
beingThrottled = true
}
}
}
}
}
func (d *InternalDataplane) configureKernel() {
// Attempt to modprobe nf_conntrack_proto_sctp. In some kernels this is a
// module that needs to be loaded, otherwise all SCTP packets are marked
// INVALID by conntrack and dropped by Calico's rules. However, some kernels
// (confirmed in Ubuntu 19.10's build of 5.3.0-24-generic) include this
// conntrack without it being a kernel module, and so modprobe will fail.
// Log result at INFO level for troubleshooting, but otherwise ignore any
// failed modprobe calls.
mp := newModProbe(moduleConntrackSCTP, newRealCmd)
out, err := mp.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleConntrackSCTP)
log.Info("Making sure IPv4 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv4/ip_forward", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv4 forwarding sysctl")
}
if d.config.IPv6Enabled {
log.Info("Making sure IPv6 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv6/conf/all/forwarding", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv6 forwarding sysctl")
}
}
if d.config.BPFEnabled && d.config.BPFDisableUnprivileged {
log.Info("BPF enabled, disabling unprivileged BPF usage.")
err := writeProcSys("/proc/sys/kernel/unprivileged_bpf_disabled", "1")
if err != nil {
log.WithError(err).Error("Failed to set unprivileged_bpf_disabled sysctl")
}
}
if d.config.Wireguard.Enabled {
// wireguard module is available in linux kernel >= 5.6
mpwg := newModProbe(moduleWireguard, newRealCmd)
out, err = mpwg.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleWireguard)
}
}
func (d *InternalDataplane) recordMsgStat(msg interface{}) {
typeName := reflect.ValueOf(msg).Elem().Type().Name()
countMessages.WithLabelValues(typeName).Inc()
}
func (d *InternalDataplane) apply() {
// Update sequencing is important here because iptables rules have dependencies on ipsets.
// Creating a rule that references an unknown IP set fails, as does deleting an IP set that
// is in use.
// Unset the needs-sync flag, we'll set it again if something fails.
d.dataplaneNeedsSync = false
// First, give the managers a chance to resolve any state based on the preceding batch of
// updates. In some cases, e.g. EndpointManager, this can result in an update to another
// manager (BPFEndpointManager.OnHEPUpdate) that must happen before either of those managers
// begins its dataplane programming updates.
for _, mgr := range d.allManagers {
if handler, ok := mgr.(UpdateBatchResolver); ok {
err := handler.ResolveUpdateBatch()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't resolve update batch for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
}
// Now allow managers to complete the dataplane programming updates that they need.
for _, mgr := range d.allManagers {
err := mgr.CompleteDeferredWork()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't complete deferred work for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
if d.xdpState != nil {
if d.forceXDPRefresh {
// Refresh timer popped.
d.xdpState.QueueResync()
d.forceXDPRefresh = false
}
var applyXDPError error
d.xdpState.ProcessPendingDiffState(d.endpointsSourceV4)
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
} else {
err := d.xdpState.ProcessMemberUpdates()
d.xdpState.DropPendingDiffState()
if err != nil {
log.WithError(err).Warning("Failed to process XDP member updates, will resync later...")
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
}
}
d.xdpState.UpdateState()
}
if applyXDPError != nil {
log.WithError(applyXDPError).Info("Applying XDP actions did not succeed, disabling XDP")
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
d.reportHealth()
if d.forceRouteRefresh {
// Refresh timer popped.
for _, r := range d.routeTableSyncers() {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceRouteRefresh = false
}
if d.forceIPSetsRefresh {
// Refresh timer popped.
for _, r := range d.ipSets {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceIPSetsRefresh = false
}
// Next, create/update IP sets. We defer deletions of IP sets until after we update
// iptables.
var ipSetsWG sync.WaitGroup
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(ipSets ipsetsDataplane) {
ipSets.ApplyUpdates()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
// Update the routing table in parallel with the other updates. We'll wait for it to finish
// before we return.
var routesWG sync.WaitGroup
for _, r := range d.routeTableSyncers() {
routesWG.Add(1)
go func(r routeTableSyncer) {
err := r.Apply()
if err != nil {
log.Warn("Failed to synchronize routing table, will retry...")
d.dataplaneNeedsSync = true
}
d.reportHealth()
routesWG.Done()
}(r)
}
// Wait for the IP sets update to finish. We can't update iptables until it has.
ipSetsWG.Wait()
// Update iptables, this should sever any references to now-unused IP sets.
var reschedDelayMutex sync.Mutex
var reschedDelay time.Duration
var iptablesWG sync.WaitGroup
for _, t := range d.allIptablesTables {
iptablesWG.Add(1)
go func(t *iptables.Table) {
tableReschedAfter := t.Apply()
reschedDelayMutex.Lock()
defer reschedDelayMutex.Unlock()
if tableReschedAfter != 0 && (reschedDelay == 0 || tableReschedAfter < reschedDelay) {
reschedDelay = tableReschedAfter
}
d.reportHealth()
iptablesWG.Done()
}(t)
}
iptablesWG.Wait()
// Now clean up any left-over IP sets.
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(s ipsetsDataplane) {
s.ApplyDeletions()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
ipSetsWG.Wait()
// Wait for the route updates to finish.
routesWG.Wait()
// And publish and status updates.
d.endpointStatusCombiner.Apply()
// Set up any needed rescheduling kick.
if d.reschedC != nil {
// We have an active rescheduling timer, stop it so we can restart it with a
// different timeout below if it is still needed.
// This snippet comes from the docs for Timer.Stop().
if !d.reschedTimer.Stop() {
// Timer had already popped, drain its channel.
<-d.reschedC
}
// Nil out our copy of the channel to record that the timer is inactive.
d.reschedC = nil
}
if reschedDelay != 0 {
// We need to reschedule.
log.WithField("delay", reschedDelay).Debug("Asked to reschedule.")
if d.reschedTimer == nil {
// First time, create the timer.
d.reschedTimer = time.NewTimer(reschedDelay)
} else {
// Have an existing timer, reset it.
d.reschedTimer.Reset(reschedDelay)
}
d.reschedC = d.reschedTimer.C
}
}
func (d *InternalDataplane) applyXDPActions() error {
var err error = nil
for i := 0; i < 10; i++ {
err = d.xdpState.ResyncIfNeeded(d.ipsetsSourceV4)
if err != nil {
return err
}
if err = d.xdpState.ApplyBPFActions(d.ipsetsSourceV4); err == nil {
return nil
} else {
log.WithError(err).Info("Applying XDP BPF actions did not succeed, will retry with resync...")
}
}
return err
}
func (d *InternalDataplane) loopReportingStatus() {
log.Info("Started internal status report thread")
if d.config.StatusReportingInterval <= 0 {
log.Info("Process status reports disabled")
return
}
// Wait before first report so that we don't check in if we're in a tight cyclic restart.
time.Sleep(10 * time.Second)
for {
uptimeSecs := time.Since(processStartTime).Seconds()
d.fromDataplane <- &proto.ProcessStatusUpdate{
IsoTimestamp: time.Now().UTC().Format(time.RFC3339),
Uptime: uptimeSecs,
}
time.Sleep(d.config.StatusReportingInterval)
}
}
// iptablesTable is a shim interface for iptables.Table.
type iptablesTable interface {
UpdateChain(chain *iptables.Chain)
UpdateChains([]*iptables.Chain)
RemoveChains([]*iptables.Chain)
RemoveChainByName(name string)
}
func (d *InternalDataplane) reportHealth() {
if d.config.HealthAggregator != nil {
d.config.HealthAggregator.Report(
healthName,
&health.HealthReport{Live: true, Ready: d.doneFirstApply},
)
}
}
type dummyLock struct{}
func (d dummyLock) Lock() {
}
func (d dummyLock) Unlock() {
}
| 1 | 19,217 | This line should actually be: `if c.KubernetesProvider == config.ProviderAKS && c.Wireguard.EncryptHostTraffic {` because we only need to tweak the MTU like this on AKS. | projectcalico-felix | c |
@@ -71,7 +71,7 @@ var (
const (
projectFlagDescription = "Name of the project."
- appFlagDescription = "Name of the application."
+ svcFlagDescription = "Name of the service."
envFlagDescription = "Name of the environment."
pipelineFlagDescription = "Name of the pipeline."
profileFlagDescription = "Name of the profile." | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"fmt"
"strconv"
"strings"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
)
// Long flag names.
const (
// Common flags.
projectFlag = "project"
nameFlag = "name"
appFlag = "app"
envFlag = "env"
appTypeFlag = "app-type"
profileFlag = "profile"
yesFlag = "yes"
jsonFlag = "json"
// Command specific flags.
dockerFileFlag = "dockerfile"
imageTagFlag = "tag"
resourceTagsFlag = "resource-tags"
stackOutputDirFlag = "output-dir"
limitFlag = "limit"
followFlag = "follow"
sinceFlag = "since"
startTimeFlag = "start-time"
endTimeFlag = "end-time"
envProfilesFlag = "env-profiles"
prodEnvFlag = "prod"
deployFlag = "deploy"
resourcesFlag = "resources"
githubURLFlag = "github-url"
githubAccessTokenFlag = "github-access-token"
gitBranchFlag = "git-branch"
envsFlag = "environments"
domainNameFlag = "domain"
localAppFlag = "local"
deleteSecretFlag = "delete-secret"
appPortFlag = "port"
)
// Short flag names.
// A short flag only exists if the flag is mandatory by the command.
const (
projectFlagShort = "p"
nameFlagShort = "n"
appFlagShort = "a"
envFlagShort = "e"
appTypeFlagShort = "t"
dockerFileFlagShort = "d"
githubURLFlagShort = "u"
githubAccessTokenFlagShort = "t"
gitBranchFlagShort = "b"
envsFlagShort = "e"
)
// Descriptions for flags.
var (
appTypeFlagDescription = fmt.Sprintf(`Type of application to create. Must be one of:
%s`, strings.Join(quoteAll(manifest.ServiceTypes), ", "))
)
const (
projectFlagDescription = "Name of the project."
appFlagDescription = "Name of the application."
envFlagDescription = "Name of the environment."
pipelineFlagDescription = "Name of the pipeline."
profileFlagDescription = "Name of the profile."
yesFlagDescription = "Skips confirmation prompt."
jsonFlagDescription = "Optional. Outputs in JSON format."
dockerFileFlagDescription = "Path to the Dockerfile."
imageTagFlagDescription = `Optional. The application's image tag.`
resourceTagsFlagDescription = `Optional. Labels with a key and value separated with commas.
Allows you to categorize resources.`
stackOutputDirFlagDescription = "Optional. Writes the stack template and template configuration to a directory."
prodEnvFlagDescription = "If the environment contains production services."
limitFlagDescription = "Optional. The maximum number of log events returned."
followFlagDescription = "Optional. Specifies if the logs should be streamed."
sinceFlagDescription = `Optional. Only return logs newer than a relative duration like 5s, 2m, or 3h.
Defaults to all logs. Only one of start-time / since may be used.`
startTimeFlagDescription = `Optional. Only return logs after a specific date (RFC3339).
Defaults to all logs. Only one of start-time / since may be used.`
endTimeFlagDescription = `Optional. Only return logs before a specific date (RFC3339).
Defaults to all logs. Only one of end-time / follow may be used.`
deployTestFlagDescription = `Deploy your application to a "test" environment.`
githubURLFlagDescription = "GitHub repository URL for your application."
githubAccessTokenFlagDescription = "GitHub personal access token for your repository."
gitBranchFlagDescription = "Branch used to trigger your pipeline."
pipelineEnvsFlagDescription = "Environments to add to the pipeline."
domainNameFlagDescription = "Optional. Your existing custom domain name."
resourcesFlagDescription = "Optional. Show the resources of your application."
localAppFlagDescription = "Only show applications in the current directory."
envProfilesFlagDescription = "Optional. Environments and the profile to use to delete the environment."
deleteSecretFlagDescription = "Deletes AWS Secrets Manager secret associated with a pipeline source repository."
appPortFlagDescription = "Optional. The port on which your Dockerfile listens."
)
func quoteAll(elems []string) []string {
quotedElems := make([]string, len(elems))
for i, el := range elems {
quotedElems[i] = strconv.Quote(el)
}
return quotedElems
}
| 1 | 13,083 | Do we need to change this flag as well? | aws-copilot-cli | go |
@@ -10,7 +10,9 @@ module.exports = function (grunt) {
return grunt.file.read(fn);
}).join('\n');
- grunt.file.write(fileset.dest, 'exports.source = ' + JSON.stringify(source) + ';');
+ var file = grunt.file.read(fileset.dest);
+
+ grunt.file.write(fileset.dest, file + 'module.exports.source = ' + JSON.stringify(source) + ';');
});
});
}; | 1 | /*jshint node: true */
'use strict';
module.exports = function (grunt) {
grunt.registerMultiTask('nodeify', function () {
grunt.task.requires('configure');
grunt.task.requires('concat:engine');
this.files.forEach(function (fileset) {
var source = fileset.src.map(function (fn) {
return grunt.file.read(fn);
}).join('\n');
grunt.file.write(fileset.dest, 'exports.source = ' + JSON.stringify(source) + ';');
});
});
};
| 1 | 10,742 | Including the source twice here makes the filesize jump to 432kb. Is there any way to minimize repeating it? | dequelabs-axe-core | js |
@@ -1656,7 +1656,7 @@ class TargetLocator {
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
- setParameter('name', nameOrHandle),
+ setParameter('handle', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const Session = require('./session').Session;
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|promise.Promise<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|promise.Promise<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Creates a new WebDriver client, which provides control over a browser.
*
* Every command.Command returns a {@link promise.Promise} that
* represents the result of that command. Callbacks may be registered on this
* object to manipulate the command result or catch an expected error. Any
* commands scheduled with a callback are considered sub-commands and will
* execute before the next command in the current frame. For example:
*
* var message = [];
* driver.call(message.push, message, 'a').then(function() {
* driver.call(message.push, message, 'b');
* });
* driver.call(message.push, message, 'c');
* driver.call(function() {
* alert('message is abc? ' + (message.join('') == 'abc'));
* });
*
*/
class WebDriver {
/**
* @param {!(Session|promise.Promise<!Session>)} session Either a
* known session or a promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
*/
constructor(session, executor, opt_flow) {
/** @private {!promise.Promise<!Session>} */
this.session_ = promise.fulfilled(session);
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {input.FileDetector} */
this.fileDetector_ = null;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(executor, capabilities, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
return new WebDriver(session, executor, flow);
}
/**
* @return {!promise.ControlFlow} The control flow used by this
* instance.
*/
controlFlow() {
return this.flow_;
}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {
var self = this;
checkHasNotQuit();
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(function() {
// A call to WebDriver.quit() may have been scheduled in the same event
// loop as this |command|, which would prevent us from detecting that the
// driver has quit above. Therefore, we need to make another quick check.
// We still check above so we can fail as early as possible.
checkHasNotQuit();
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(self, value));
}, description);
function checkHasNotQuit() {
if (!self.session_) {
throw new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be ' +
'used.');
}
}
}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {
return this.executor_;
}
/**
* @return {!promise.Promise<!Session>} A promise for this client's
* session.
*/
getSession() {
return this.session_;
}
/**
* @return {!promise.Promise<!Capabilities>} A promise
* that will resolve with the this instance's capabilities.
*/
getCapabilities() {
return this.session_.then(session => session.getCapabilities());
}
/**
* Schedules a command to quit the current session. After calling quit, this
* instance will be invalidated and may no longer be used to issue commands
* against the browser.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attemnpting to use a driver post-quit.
return result.finally(() => delete this.session_);
}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {
return new actions.ActionSequence(this);
}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {
return new actions.TouchSequence(this);
}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('[email protected]');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|promise.Promise<T>)} fn The function to
* execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Promise<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
let flow = this.flow_;
return flow.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisified. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisified the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(promise.Promise<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Promise<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @template T
*/
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!promise.Promise} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Promise.<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/**
* Schedules a command to close the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {
return this.navigate().to(url);
}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #isElementPresent} instead.
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Promise.<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Promise.<!Array.<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Promise<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {
return new Options(this);
}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {
return new Navigation(this);
}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Promise<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Promise<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
*/
implicitlyWait(ms) {
return this._scheduleCommand(ms, 'implicit', 'implicitlyWait');
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the script timeout has been set.
*/
setScriptTimeout(ms) {
return this._scheduleCommand(ms, 'script', 'setScriptTimeout');
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the timeout has been set.
*/
pageLoadTimeout(ms) {
return this._scheduleCommand(ms, 'page load', 'pageLoadTimeout');
}
_scheduleCommand(ms, timeoutIdentifier, timeoutName) {
return this.driver_.schedule(
new command.Command(command.Name.SET_TIMEOUT).
setParameter('type', timeoutIdentifier).
setParameter('ms', ms),
`WebDriver.manage().timeouts().${timeoutName}(${ms})`);
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Promise.<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Promise<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Promise.<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Promise<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
setParameter('name', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.id_ = promise.fulfilled(id);
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return promise.fulfilled(true);
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Promise<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this.getId());
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Promise<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the keysequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the keysequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analgous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctionation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.driver_.flow_.execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Promise<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Promise.<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Promise.<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dicted by the {@code disabled} attribute.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.Thenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Promise<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/** @override */
this.cancel = el.cancel.bind(el);
/** @override */
this.isPending = el.isPending.bind(el);
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/** @override */
this.finally = el.finally.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.Thenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.text_ = promise.fulfilled(text);
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Promise<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.Thenable.<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/** @override */
this.cancel = alert.cancel.bind(alert);
/** @override */
this.isPending = alert.isPending.bind(alert);
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/** @override */
this.finally = alert.finally.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.Thenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 13,587 | This should only be sent if the driver is speaking to a W3C conformant remote, so we need an if-condition check like we have in the Python bindings. | SeleniumHQ-selenium | rb |
@@ -76,7 +76,15 @@ module Bolt
target = request['target']
plan_vars = shadow_vars('plan', request['plan_vars'], target['facts'])
target_vars = shadow_vars('target', target['variables'], target['facts'])
- topscope_vars = target_vars.merge(plan_vars)
+
+ # Merge plan vars with target vars, while maintaining the order of the plan
+ # vars. It's critical that the order of plan vars is not changed, as Puppet
+ # will deserialize the variables in the order they appear. Variables may
+ # contain local references to variables that appear earlier in a plan. If
+ # these variables are moved before the variable they reference, Puppet will
+ # be unable to deserialize the data and raise an error.
+ topscope_vars = target_vars.reject { |k, _v| plan_vars.key?(k) }.merge(plan_vars)
+
env_conf = { modulepath: request['modulepath'],
facts: target['facts'],
variables: topscope_vars } | 1 | # frozen_string_literal: true
require 'bolt/apply_inventory'
require 'bolt/apply_target'
require 'bolt/config'
require 'bolt/error'
require 'bolt/inventory'
require 'bolt/pal'
require 'bolt/puppetdb'
require 'bolt/util'
Bolt::PAL.load_puppet
require 'bolt/catalog/logging'
module Bolt
class Catalog
def initialize(log_level = 'debug')
@log_level = log_level
end
def with_puppet_settings(overrides = {})
Dir.mktmpdir('bolt') do |dir|
cli = []
Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting|
cli << "--#{setting}" << dir
end
Puppet.settings.send(:clear_everything_for_tests)
# Override module locations, Bolt includes vendored modules in its internal modulepath.
Puppet.settings.override_default(:basemodulepath, '')
Puppet.settings.override_default(:vendormoduledir, '')
Puppet.initialize_settings(cli)
overrides.each do |setting, value|
Puppet.settings[setting] = value
end
# Use a special logdest that serializes all log messages and their level to stderr.
Puppet::Util::Log.newdestination(:stderr)
Puppet.settings[:log_level] = @log_level
yield
end
end
def generate_ast(code, filename = nil)
with_puppet_settings do
Puppet::Pal.in_tmp_environment("bolt_parse") do |pal|
pal.with_catalog_compiler do |compiler|
ast = compiler.parse_string(code, filename)
Puppet::Pops::Serialization::ToDataConverter.convert(ast,
rich_data: true,
symbol_to_string: true)
end
end
end
end
def compile_catalog(request)
pdb_client = Bolt::PuppetDB::Client.new(Bolt::PuppetDB::Config.new(request['pdb_config']))
project = request['project'] || {}
bolt_project = Struct.new(:name, :path).new(project['name'], project['path']) unless project.empty?
inv = Bolt::ApplyInventory.new(request['config'])
puppet_overrides = {
bolt_pdb_client: pdb_client,
bolt_inventory: inv,
bolt_project: bolt_project
}
# Facts will be set by the catalog compiler, so we need to ensure
# that any plan or target variables with the same name are not
# passed into the apply block to avoid a redefinition error.
# Filter out plan and target vars separately and raise a Puppet
# warning if there are any collisions for either. Puppet warning
# is the only way to log a message that will make it back to Bolt
# to be printed.
target = request['target']
plan_vars = shadow_vars('plan', request['plan_vars'], target['facts'])
target_vars = shadow_vars('target', target['variables'], target['facts'])
topscope_vars = target_vars.merge(plan_vars)
env_conf = { modulepath: request['modulepath'],
facts: target['facts'],
variables: topscope_vars }
puppet_settings = {
node_name_value: target['name'],
hiera_config: request['hiera_config']
}
with_puppet_settings(puppet_settings) do
Puppet::Pal.in_tmp_environment('bolt_catalog', env_conf) do |pal|
Puppet.override(puppet_overrides) do
Puppet.lookup(:pal_current_node).trusted_data = target['trusted']
pal.with_catalog_compiler do |compiler|
options = request['puppet_config'] || {}
# Configure language strictness in the CatalogCompiler. We want Bolt to be able
# to compile most Puppet 4+ manifests, so we default to allowing deprecated functions.
Puppet[:strict] = options['strict'] || :warning
Puppet[:strict_variables] = options['strict_variables'] || false
pal_main = request['code_ast'] || request['code_string']
ast = build_program(pal_main)
compiler.evaluate(ast)
compiler.evaluate_ast_node
compiler.compile_additions
compiler.with_json_encoding(&:encode)
end
end
end
end
end
# Warn and remove variables that will be shadowed by facts of the same
# name, which are set in scope earlier.
def shadow_vars(type, vars, facts)
collisions, valid = vars.partition do |k, _|
facts.include?(k)
end
if collisions.any?
names = collisions.map { |k, _| "$#{k}" }.join(', ')
plural = collisions.length == 1 ? '' : 's'
Puppet.warning("#{type.capitalize} variable#{plural} #{names} will be overridden by fact#{plural} " \
"of the same name in the apply block")
end
valid.to_h
end
def build_program(code)
ast = Puppet::Pops::Serialization::FromDataConverter.convert(code)
# This will be a Program when running via `bolt apply`, but will
# only be a subset of the AST when compiling an apply block in a
# plan. In that case, we need to discover the definitions (which
# would ordinarily be stored on the Program) and construct a Program object.
if ast.is_a?(Puppet::Pops::Model::Program)
ast
else
# Node definitions must be at the top level of the apply block.
# That means the apply body either a) consists of just a
# NodeDefinition, b) consists of a BlockExpression which may
# contain NodeDefinitions, or c) doesn't contain NodeDefinitions.
definitions = case ast
when Puppet::Pops::Model::BlockExpression
ast.statements.select { |st| st.is_a?(Puppet::Pops::Model::NodeDefinition) }
when Puppet::Pops::Model::NodeDefinition
[ast]
else
[]
end
Puppet::Pops::Model::Factory.PROGRAM(ast, definitions, ast.locator).model
end
end
end
end
| 1 | 15,564 | Only one line of code? What a simple issue! | puppetlabs-bolt | rb |
@@ -111,10 +111,8 @@ func NewHandler(
status: common.DaemonStatusInitialized,
config: config,
tokenSerializer: common.NewProtoTaskTokenSerializer(),
- rateLimiter: quotas.NewDynamicRateLimiter(
- func() float64 {
- return float64(config.RPS())
- },
+ rateLimiter: quotas.NewDefaultIncomingDynamicRateLimiter(
+ func() float64 { return float64(config.RPS()) },
),
}
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"sync"
"sync/atomic"
"go.temporal.io/server/common/convert"
serviceConfig "go.temporal.io/server/common/service/config"
"go.temporal.io/server/service/history/configs"
"go.temporal.io/server/service/history/events"
"go.temporal.io/server/service/history/shard"
"github.com/pborman/uuid"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/historyservice/v1"
namespacespb "go.temporal.io/server/api/namespace/v1"
replicationspb "go.temporal.io/server/api/replication/v1"
tokenspb "go.temporal.io/server/api/token/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/messaging"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/quotas"
"go.temporal.io/server/common/resource"
serviceerrors "go.temporal.io/server/common/serviceerror"
"go.temporal.io/server/common/task"
)
type (
// Handler - gRPC handler interface for historyservice
Handler struct {
resource.Resource
status int32
controller *shard.ControllerImpl
tokenSerializer common.TaskTokenSerializer
startWG sync.WaitGroup
config *configs.Config
eventNotifier events.Notifier
publisher messaging.Producer
rateLimiter quotas.Limiter
replicationTaskFetchers ReplicationTaskFetchers
queueTaskProcessor queueTaskProcessor
}
)
const (
serviceName = "temporal.api.workflowservice.v1.HistoryService"
)
var (
_ shard.EngineFactory = (*Handler)(nil)
_ historyservice.HistoryServiceServer = (*Handler)(nil)
errNamespaceNotSet = serviceerror.NewInvalidArgument("Namespace not set on request.")
errWorkflowExecutionNotSet = serviceerror.NewInvalidArgument("WorkflowExecution not set on request.")
errTaskQueueNotSet = serviceerror.NewInvalidArgument("Task queue not set.")
errWorkflowIDNotSet = serviceerror.NewInvalidArgument("WorkflowId is not set on request.")
errRunIDNotValid = serviceerror.NewInvalidArgument("RunId is not valid UUID.")
errSourceClusterNotSet = serviceerror.NewInvalidArgument("Source Cluster not set on request.")
errShardIDNotSet = serviceerror.NewInvalidArgument("ShardId not set on request.")
errTimestampNotSet = serviceerror.NewInvalidArgument("Timestamp not set on request.")
errInvalidTaskType = serviceerror.NewInvalidArgument("Invalid task type")
errDeserializeTaskToken = serviceerror.NewInvalidArgument("Error to deserialize task token. Error: %v.")
errHistoryHostThrottle = serviceerror.NewResourceExhausted("History host RPS exceeded.")
errShuttingDown = serviceerror.NewInternal("Shutting down")
)
// NewHandler creates a thrift handler for the history service
func NewHandler(
resource resource.Resource,
config *configs.Config,
) *Handler {
handler := &Handler{
Resource: resource,
status: common.DaemonStatusInitialized,
config: config,
tokenSerializer: common.NewProtoTaskTokenSerializer(),
rateLimiter: quotas.NewDynamicRateLimiter(
func() float64 {
return float64(config.RPS())
},
),
}
// prevent us from trying to serve requests before shard controller is started and ready
handler.startWG.Add(1)
return handler
}
// Start starts the handler
func (h *Handler) Start() {
if !atomic.CompareAndSwapInt32(
&h.status,
common.DaemonStatusInitialized,
common.DaemonStatusStarted,
) {
return
}
if h.GetClusterMetadata().IsGlobalNamespaceEnabled() {
if h.GetClusterMetadata().GetReplicationConsumerConfig().Type == serviceConfig.ReplicationConsumerTypeKafka {
var err error
h.publisher, err = h.GetMessagingClient().NewProducerWithClusterName(h.GetClusterMetadata().GetCurrentClusterName())
if err != nil {
h.GetLogger().Fatal("Creating kafka producer failed", tag.Error(err))
}
}
}
h.replicationTaskFetchers = NewReplicationTaskFetchers(
h.GetLogger(),
h.config,
h.GetClusterMetadata().GetReplicationConsumerConfig(),
h.GetClusterMetadata(),
h.GetClientBean(),
)
h.replicationTaskFetchers.Start()
if h.config.EnablePriorityTaskProcessor() {
var err error
taskPriorityAssigner := newTaskPriorityAssigner(
h.GetClusterMetadata().GetCurrentClusterName(),
h.GetNamespaceCache(),
h.GetLogger(),
h.GetMetricsClient(),
h.config,
)
schedulerType := task.SchedulerType(h.config.TaskSchedulerType())
queueTaskProcessorOptions := &queueTaskProcessorOptions{
schedulerType: schedulerType,
}
switch schedulerType {
case task.SchedulerTypeFIFO:
queueTaskProcessorOptions.fifoSchedulerOptions = &task.FIFOTaskSchedulerOptions{
QueueSize: h.config.TaskSchedulerQueueSize(),
WorkerCount: h.config.TaskSchedulerWorkerCount(),
RetryPolicy: common.CreatePersistanceRetryPolicy(),
}
case task.SchedulerTypeWRR:
queueTaskProcessorOptions.wRRSchedulerOptions = &task.WeightedRoundRobinTaskSchedulerOptions{
Weights: h.config.TaskSchedulerRoundRobinWeights,
QueueSize: h.config.TaskSchedulerQueueSize(),
WorkerCount: h.config.TaskSchedulerWorkerCount(),
RetryPolicy: common.CreatePersistanceRetryPolicy(),
}
default:
h.GetLogger().Fatal("Unknown task scheduler type", tag.Value(schedulerType))
}
h.queueTaskProcessor, err = newQueueTaskProcessor(
taskPriorityAssigner,
queueTaskProcessorOptions,
h.GetLogger(),
h.GetMetricsClient(),
)
if err != nil {
h.GetLogger().Fatal("Creating priority task processor failed", tag.Error(err))
}
h.queueTaskProcessor.Start()
}
h.controller = shard.NewController(
h.Resource,
h,
h.config,
)
h.eventNotifier = events.NewNotifier(h.GetTimeSource(), h.GetMetricsClient(), h.config.GetShardID)
// events notifier must starts before controller
h.eventNotifier.Start()
h.controller.Start()
h.startWG.Done()
}
// Stop stops the handler
func (h *Handler) Stop() {
if !atomic.CompareAndSwapInt32(
&h.status,
common.DaemonStatusStarted,
common.DaemonStatusStopped,
) {
return
}
h.replicationTaskFetchers.Stop()
if h.queueTaskProcessor != nil {
h.queueTaskProcessor.Stop()
}
h.controller.Stop()
h.eventNotifier.Stop()
}
func (h *Handler) isStopped() bool {
return atomic.LoadInt32(&h.status) == common.DaemonStatusStopped
}
// CreateEngine is implementation for HistoryEngineFactory used for creating the engine instance for shard
func (h *Handler) CreateEngine(
shardContext shard.Context,
) shard.Engine {
return NewEngineWithShardContext(
shardContext,
h.GetVisibilityManager(),
h.GetMatchingClient(),
h.GetHistoryClient(),
h.GetSDKClient(),
h.eventNotifier,
h.publisher,
h.config,
h.replicationTaskFetchers,
h.GetMatchingRawClient(),
h.queueTaskProcessor,
)
}
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
func (h *Handler) Check(_ context.Context, request *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
h.GetLogger().Debug("History service health check endpoint (gRPC) reached.")
h.startWG.Wait()
if request.Service != serviceName {
return &healthpb.HealthCheckResponse{
Status: healthpb.HealthCheckResponse_SERVICE_UNKNOWN,
}, nil
}
hs := &healthpb.HealthCheckResponse{
Status: healthpb.HealthCheckResponse_SERVING,
}
return hs, nil
}
func (h *Handler) Watch(*healthpb.HealthCheckRequest, healthpb.Health_WatchServer) error {
return serviceerror.NewUnimplemented("Watch is not implemented.")
}
// RecordActivityTaskHeartbeat - Record Activity Task Heart beat.
func (h *Handler) RecordActivityTaskHeartbeat(ctx context.Context, request *historyservice.RecordActivityTaskHeartbeatRequest) (_ *historyservice.RecordActivityTaskHeartbeatResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordActivityTaskHeartbeatScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
heartbeatRequest := request.HeartbeatRequest
taskToken, err0 := h.tokenSerializer.Deserialize(heartbeatRequest.TaskToken)
if err0 != nil {
return nil, h.error(errDeserializeTaskToken.MessageArgs(err0), scope, namespaceID, "")
}
err0 = validateTaskToken(taskToken)
if err0 != nil {
return nil, h.error(err0, scope, namespaceID, "")
}
workflowID := taskToken.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
response, err2 := engine.RecordActivityTaskHeartbeat(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return response, nil
}
// RecordActivityTaskStarted - Record Activity Task started.
func (h *Handler) RecordActivityTaskStarted(ctx context.Context, request *historyservice.RecordActivityTaskStartedRequest) (_ *historyservice.RecordActivityTaskStartedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordActivityTaskStartedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
if request.GetNamespaceId() == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, workflowID)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, workflowID)
}
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
response, err2 := engine.RecordActivityTaskStarted(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return response, nil
}
// RecordWorkflowTaskStarted - Record Workflow Task started.
func (h *Handler) RecordWorkflowTaskStarted(ctx context.Context, request *historyservice.RecordWorkflowTaskStartedRequest) (_ *historyservice.RecordWorkflowTaskStartedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
h.GetLogger().Debug("RecordWorkflowTaskStarted",
tag.WorkflowNamespaceID(request.GetNamespaceId()),
tag.WorkflowID(request.WorkflowExecution.GetWorkflowId()),
tag.WorkflowRunID(request.WorkflowExecution.GetRunId()),
tag.WorkflowScheduleID(request.GetScheduleId()))
scope := metrics.HistoryRecordWorkflowTaskStartedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, workflowID)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, workflowID)
}
if request.PollRequest == nil || request.PollRequest.TaskQueue.GetName() == "" {
return nil, h.error(errTaskQueueNotSet, scope, namespaceID, workflowID)
}
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
h.GetLogger().Error("RecordWorkflowTaskStarted failed.",
tag.Error(err1),
tag.WorkflowID(request.WorkflowExecution.GetWorkflowId()),
tag.WorkflowRunID(request.WorkflowExecution.GetRunId()),
tag.WorkflowScheduleID(request.GetScheduleId()),
)
return nil, h.error(err1, scope, namespaceID, workflowID)
}
response, err2 := engine.RecordWorkflowTaskStarted(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return response, nil
}
// RespondActivityTaskCompleted - records completion of an activity task
func (h *Handler) RespondActivityTaskCompleted(ctx context.Context, request *historyservice.RespondActivityTaskCompletedRequest) (_ *historyservice.RespondActivityTaskCompletedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondActivityTaskCompletedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
completeRequest := request.CompleteRequest
taskToken, err0 := h.tokenSerializer.Deserialize(completeRequest.TaskToken)
if err0 != nil {
return nil, h.error(errDeserializeTaskToken.MessageArgs(err0), scope, namespaceID, "")
}
err0 = validateTaskToken(taskToken)
if err0 != nil {
return nil, h.error(err0, scope, namespaceID, "")
}
workflowID := taskToken.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RespondActivityTaskCompleted(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RespondActivityTaskCompletedResponse{}, nil
}
// RespondActivityTaskFailed - records failure of an activity task
func (h *Handler) RespondActivityTaskFailed(ctx context.Context, request *historyservice.RespondActivityTaskFailedRequest) (_ *historyservice.RespondActivityTaskFailedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondActivityTaskFailedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
failRequest := request.FailedRequest
taskToken, err0 := h.tokenSerializer.Deserialize(failRequest.TaskToken)
if err0 != nil {
return nil, h.error(errDeserializeTaskToken.MessageArgs(err0), scope, namespaceID, "")
}
err0 = validateTaskToken(taskToken)
if err0 != nil {
return nil, h.error(err0, scope, namespaceID, "")
}
workflowID := taskToken.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RespondActivityTaskFailed(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RespondActivityTaskFailedResponse{}, nil
}
// RespondActivityTaskCanceled - records failure of an activity task
func (h *Handler) RespondActivityTaskCanceled(ctx context.Context, request *historyservice.RespondActivityTaskCanceledRequest) (_ *historyservice.RespondActivityTaskCanceledResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondActivityTaskCanceledScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
cancelRequest := request.CancelRequest
taskToken, err0 := h.tokenSerializer.Deserialize(cancelRequest.TaskToken)
if err0 != nil {
return nil, h.error(errDeserializeTaskToken.MessageArgs(err0), scope, namespaceID, "")
}
err0 = validateTaskToken(taskToken)
if err0 != nil {
return nil, h.error(err0, scope, namespaceID, "")
}
workflowID := taskToken.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RespondActivityTaskCanceled(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RespondActivityTaskCanceledResponse{}, nil
}
// RespondWorkflowTaskCompleted - records completion of a workflow task
func (h *Handler) RespondWorkflowTaskCompleted(ctx context.Context, request *historyservice.RespondWorkflowTaskCompletedRequest) (_ *historyservice.RespondWorkflowTaskCompletedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondWorkflowTaskCompletedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
completeRequest := request.CompleteRequest
if len(completeRequest.Commands) == 0 {
h.GetMetricsClient().IncCounter(scope, metrics.EmptyCompletionCommandsCounter)
}
token, err0 := h.tokenSerializer.Deserialize(completeRequest.TaskToken)
if err0 != nil {
return nil, h.error(errDeserializeTaskToken.MessageArgs(err0), scope, namespaceID, "")
}
h.GetLogger().Debug("RespondWorkflowTaskCompleted",
tag.WorkflowNamespaceID(token.GetNamespaceId()),
tag.WorkflowID(token.GetWorkflowId()),
tag.WorkflowRunID(token.GetRunId()),
tag.WorkflowScheduleID(token.GetScheduleId()))
err0 = validateTaskToken(token)
if err0 != nil {
return nil, h.error(err0, scope, namespaceID, "")
}
workflowID := token.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
response, err2 := engine.RespondWorkflowTaskCompleted(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return response, nil
}
// RespondWorkflowTaskFailed - failed response to workflow task
func (h *Handler) RespondWorkflowTaskFailed(ctx context.Context, request *historyservice.RespondWorkflowTaskFailedRequest) (_ *historyservice.RespondWorkflowTaskFailedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondWorkflowTaskFailedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
failedRequest := request.FailedRequest
token, err0 := h.tokenSerializer.Deserialize(failedRequest.TaskToken)
if err0 != nil {
return nil, h.error(errDeserializeTaskToken.MessageArgs(err0), scope, namespaceID, "")
}
h.GetLogger().Debug("RespondWorkflowTaskFailed",
tag.WorkflowNamespaceID(token.GetNamespaceId()),
tag.WorkflowID(token.GetWorkflowId()),
tag.WorkflowRunID(token.GetRunId()),
tag.WorkflowScheduleID(token.GetScheduleId()))
if failedRequest.GetCause() == enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND {
h.GetLogger().Info("Non-Deterministic Error", tag.WorkflowNamespaceID(token.GetNamespaceId()), tag.WorkflowID(token.GetWorkflowId()), tag.WorkflowRunID(token.GetRunId()))
namespace, err := h.GetNamespaceCache().GetNamespaceName(token.GetNamespaceId())
var namespaceTag metrics.Tag
if err == nil {
namespaceTag = metrics.NamespaceTag(namespace)
} else {
namespaceTag = metrics.NamespaceUnknownTag()
}
h.GetMetricsClient().Scope(scope, namespaceTag).IncCounter(metrics.ServiceErrNonDeterministicCounter)
}
err0 = validateTaskToken(token)
if err0 != nil {
return nil, h.error(err0, scope, namespaceID, "")
}
workflowID := token.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RespondWorkflowTaskFailed(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RespondWorkflowTaskFailedResponse{}, nil
}
// StartWorkflowExecution - creates a new workflow execution
func (h *Handler) StartWorkflowExecution(ctx context.Context, request *historyservice.StartWorkflowExecutionRequest) (_ *historyservice.StartWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryStartWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
startRequest := request.StartRequest
workflowID := startRequest.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
response, err2 := engine.StartWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return response, nil
}
// DescribeHistoryHost returns information about the internal states of a history host
func (h *Handler) DescribeHistoryHost(_ context.Context, _ *historyservice.DescribeHistoryHostRequest) (_ *historyservice.DescribeHistoryHostResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
itemsInCacheByIDCount, itemsInCacheByNameCount := h.GetNamespaceCache().GetCacheSize()
status := ""
switch h.controller.Status() {
case common.DaemonStatusInitialized:
status = "initialized"
case common.DaemonStatusStarted:
status = "started"
case common.DaemonStatusStopped:
status = "stopped"
}
resp := &historyservice.DescribeHistoryHostResponse{
ShardsNumber: int32(h.controller.NumShards()),
ShardIds: h.controller.ShardIDs(),
NamespaceCache: &namespacespb.NamespaceCacheInfo{
ItemsInCacheByIdCount: itemsInCacheByIDCount,
ItemsInCacheByNameCount: itemsInCacheByNameCount,
},
ShardControllerStatus: status,
Address: h.GetHostInfo().GetAddress(),
}
return resp, nil
}
// RemoveTask returns information about the internal states of a history host
func (h *Handler) RemoveTask(_ context.Context, request *historyservice.RemoveTaskRequest) (_ *historyservice.RemoveTaskResponse, retError error) {
executionMgr, err := h.GetExecutionManager(request.GetShardId())
if err != nil {
return nil, err
}
switch request.GetCategory() {
case enumsspb.TASK_CATEGORY_TRANSFER:
err = executionMgr.CompleteTransferTask(&persistence.CompleteTransferTaskRequest{
TaskID: request.GetTaskId(),
})
case enumsspb.TASK_CATEGORY_VISIBILITY:
err = executionMgr.CompleteVisibilityTask(&persistence.CompleteVisibilityTaskRequest{
TaskID: request.GetTaskId(),
})
case enumsspb.TASK_CATEGORY_TIMER:
err = executionMgr.CompleteTimerTask(&persistence.CompleteTimerTaskRequest{
VisibilityTimestamp: timestamp.TimeValue(request.GetVisibilityTime()),
TaskID: request.GetTaskId(),
})
case enumsspb.TASK_CATEGORY_REPLICATION:
err = executionMgr.CompleteReplicationTask(&persistence.CompleteReplicationTaskRequest{
TaskID: request.GetTaskId(),
})
default:
err = errInvalidTaskType
}
return &historyservice.RemoveTaskResponse{}, err
}
// CloseShard closes a shard hosted by this instance
func (h *Handler) CloseShard(_ context.Context, request *historyservice.CloseShardRequest) (_ *historyservice.CloseShardResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.controller.RemoveEngineForShard(request.GetShardId(), nil)
return &historyservice.CloseShardResponse{}, nil
}
// DescribeMutableState - returns the internal analysis of workflow execution state
func (h *Handler) DescribeMutableState(ctx context.Context, request *historyservice.DescribeMutableStateRequest) (_ *historyservice.DescribeMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordActivityTaskHeartbeatScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
workflowExecution := request.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
resp, err2 := engine.DescribeMutableState(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return resp, nil
}
// GetMutableState - returns the id of the next event in the execution's history
func (h *Handler) GetMutableState(ctx context.Context, request *historyservice.GetMutableStateRequest) (_ *historyservice.GetMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryGetMutableStateScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
resp, err2 := engine.GetMutableState(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return resp, nil
}
// PollMutableState - returns the id of the next event in the execution's history
func (h *Handler) PollMutableState(ctx context.Context, request *historyservice.PollMutableStateRequest) (_ *historyservice.PollMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryPollMutableStateScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
resp, err2 := engine.PollMutableState(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return resp, nil
}
// DescribeWorkflowExecution returns information about the specified workflow execution.
func (h *Handler) DescribeWorkflowExecution(ctx context.Context, request *historyservice.DescribeWorkflowExecutionRequest) (_ *historyservice.DescribeWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryDescribeWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.Request.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
resp, err2 := engine.DescribeWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return resp, nil
}
// RequestCancelWorkflowExecution - requests cancellation of a workflow
func (h *Handler) RequestCancelWorkflowExecution(ctx context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest) (_ *historyservice.RequestCancelWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRequestCancelWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" || request.CancelRequest.GetNamespace() == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
cancelRequest := request.CancelRequest
h.GetLogger().Debug("RequestCancelWorkflowExecution",
tag.WorkflowNamespace(cancelRequest.GetNamespace()),
tag.WorkflowNamespaceID(request.GetNamespaceId()),
tag.WorkflowID(cancelRequest.WorkflowExecution.GetWorkflowId()),
tag.WorkflowRunID(cancelRequest.WorkflowExecution.GetRunId()))
workflowID := cancelRequest.WorkflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RequestCancelWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RequestCancelWorkflowExecutionResponse{}, nil
}
// SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in
// WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution.
func (h *Handler) SignalWorkflowExecution(ctx context.Context, request *historyservice.SignalWorkflowExecutionRequest) (_ *historyservice.SignalWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySignalWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.SignalRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.SignalWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.SignalWorkflowExecutionResponse{}, nil
}
// SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.
// If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history
// and a workflow task being created for the execution.
// If workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled
// event recorded in history, and a workflow task being created for the execution
func (h *Handler) SignalWithStartWorkflowExecution(ctx context.Context, request *historyservice.SignalWithStartWorkflowExecutionRequest) (_ *historyservice.SignalWithStartWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySignalWithStartWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
signalWithStartRequest := request.SignalWithStartRequest
workflowID := signalWithStartRequest.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
for {
resp, err2 := engine.SignalWithStartWorkflowExecution(ctx, request)
if err2 == nil {
return resp, nil
}
// Two simultaneous SignalWithStart requests might try to start a workflow at the same time.
// This can result in one of the requests failing with one of two possible errors:
// 1) If it is a brand new WF ID, one of the requests can fail with WorkflowExecutionAlreadyStartedError
// (createMode is persistence.CreateWorkflowModeBrandNew)
// 2) If it an already existing WF ID, one of the requests can fail with a CurrentWorkflowConditionFailedError
// (createMode is persisetence.CreateWorkflowModeWorkflowIDReuse)
// If either error occurs, just go ahead and retry. It should succeed on the subsequent attempt.
// For simplicity, we keep trying unless the context finishes or we get an error that is not one of the
// two mentioned above.
_, isExecutionAlreadyStartedErr := err2.(*persistence.WorkflowExecutionAlreadyStartedError)
_, isWorkflowConditionFailedErr := err2.(*persistence.CurrentWorkflowConditionFailedError)
isContextDone := false
select {
case <-ctx.Done():
isContextDone = true
if ctxErr := ctx.Err(); ctxErr != nil {
err2 = ctxErr
}
default:
}
if (!isExecutionAlreadyStartedErr && !isWorkflowConditionFailedErr) || isContextDone {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
}
}
// RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently
// used to clean execution info when signal workflow task finished.
func (h *Handler) RemoveSignalMutableState(ctx context.Context, request *historyservice.RemoveSignalMutableStateRequest) (_ *historyservice.RemoveSignalMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRemoveSignalMutableStateScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RemoveSignalMutableState(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RemoveSignalMutableStateResponse{}, nil
}
// TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event
// in the history and immediately terminating the execution instance.
func (h *Handler) TerminateWorkflowExecution(ctx context.Context, request *historyservice.TerminateWorkflowExecutionRequest) (_ *historyservice.TerminateWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryTerminateWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.TerminateRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.TerminateWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.TerminateWorkflowExecutionResponse{}, nil
}
// ResetWorkflowExecution reset an existing workflow execution
// in the history and immediately terminating the execution instance.
func (h *Handler) ResetWorkflowExecution(ctx context.Context, request *historyservice.ResetWorkflowExecutionRequest) (_ *historyservice.ResetWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryResetWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.ResetRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
resp, err2 := engine.ResetWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return resp, nil
}
// QueryWorkflow queries a workflow.
func (h *Handler) QueryWorkflow(ctx context.Context, request *historyservice.QueryWorkflowRequest) (_ *historyservice.QueryWorkflowResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryQueryWorkflowScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowID := request.GetRequest().GetExecution().GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
resp, err2 := engine.QueryWorkflow(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return resp, nil
}
// ScheduleWorkflowTask is used for creating a workflow task for already started workflow execution. This is mainly
// used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts
// child execution without creating the workflow task and then calls this API after updating the mutable state of
// parent execution.
func (h *Handler) ScheduleWorkflowTask(ctx context.Context, request *historyservice.ScheduleWorkflowTaskRequest) (_ *historyservice.ScheduleWorkflowTaskResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryScheduleWorkflowTaskScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
if request.WorkflowExecution == nil {
return nil, h.error(errWorkflowExecutionNotSet, scope, namespaceID, "")
}
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.ScheduleWorkflowTask(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.ScheduleWorkflowTaskResponse{}, nil
}
// RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.
// This is mainly called by transfer queue processor during the processing of DeleteExecution task.
func (h *Handler) RecordChildExecutionCompleted(ctx context.Context, request *historyservice.RecordChildExecutionCompletedRequest) (_ *historyservice.RecordChildExecutionCompletedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordChildExecutionCompletedScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
if request.WorkflowExecution == nil {
return nil, h.error(errWorkflowExecutionNotSet, scope, namespaceID, "")
}
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.RecordChildExecutionCompleted(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.RecordChildExecutionCompletedResponse{}, nil
}
// ResetStickyTaskQueue reset the volatile information in mutable state of a given workflow.
// Volatile information are the information related to client, such as:
// 1. StickyTaskQueue
// 2. StickyScheduleToStartTimeout
func (h *Handler) ResetStickyTaskQueue(ctx context.Context, request *historyservice.ResetStickyTaskQueueRequest) (_ *historyservice.ResetStickyTaskQueueResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryResetStickyTaskQueueScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowID := request.Execution.GetWorkflowId()
engine, err := h.controller.GetEngine(namespaceID, workflowID)
if err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
resp, err := engine.ResetStickyTaskQueue(ctx, request)
if err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
return resp, nil
}
// ReplicateEventsV2 is called by processor to replicate history events for passive namespaces
func (h *Handler) ReplicateEventsV2(ctx context.Context, request *historyservice.ReplicateEventsV2Request) (_ *historyservice.ReplicateEventsV2Response, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
if h.isStopped() {
return nil, errShuttingDown
}
scope := metrics.HistoryReplicateEventsScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
namespaceID := request.GetNamespaceId()
if namespaceID == "" {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(namespaceID, workflowID)
if err1 != nil {
return nil, h.error(err1, scope, namespaceID, workflowID)
}
err2 := engine.ReplicateEventsV2(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, namespaceID, workflowID)
}
return &historyservice.ReplicateEventsV2Response{}, nil
}
// SyncShardStatus is called by processor to sync history shard information from another cluster
func (h *Handler) SyncShardStatus(ctx context.Context, request *historyservice.SyncShardStatusRequest) (_ *historyservice.SyncShardStatusResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySyncShardStatusScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, "", "")
}
if request.GetSourceCluster() == "" {
return nil, h.error(errSourceClusterNotSet, scope, "", "")
}
if request.GetShardId() == 0 {
return nil, h.error(errShardIDNotSet, scope, "", "")
}
if timestamp.TimeValue(request.GetStatusTime()).IsZero() {
return nil, h.error(errTimestampNotSet, scope, "", "")
}
// shard ID is already provided in the request
engine, err := h.controller.GetEngineForShard(int32(request.GetShardId()))
if err != nil {
return nil, h.error(err, scope, "", "")
}
err = engine.SyncShardStatus(ctx, request)
if err != nil {
return nil, h.error(err, scope, "", "")
}
return &historyservice.SyncShardStatusResponse{}, nil
}
// SyncActivity is called by processor to sync activity
func (h *Handler) SyncActivity(ctx context.Context, request *historyservice.SyncActivityRequest) (_ *historyservice.SyncActivityResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySyncActivityScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
if request.GetNamespaceId() == "" || uuid.Parse(request.GetNamespaceId()) == nil {
return nil, h.error(errNamespaceNotSet, scope, namespaceID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, namespaceID, "")
}
if request.GetWorkflowId() == "" {
return nil, h.error(errWorkflowIDNotSet, scope, namespaceID, "")
}
if request.GetRunId() == "" || uuid.Parse(request.GetRunId()) == nil {
return nil, h.error(errRunIDNotValid, scope, namespaceID, "")
}
workflowID := request.GetWorkflowId()
engine, err := h.controller.GetEngine(namespaceID, workflowID)
if err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
err = engine.SyncActivity(ctx, request)
if err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
return &historyservice.SyncActivityResponse{}, nil
}
// GetReplicationMessages is called by remote peers to get replicated messages for cross DC replication
func (h *Handler) GetReplicationMessages(ctx context.Context, request *historyservice.GetReplicationMessagesRequest) (_ *historyservice.GetReplicationMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
h.GetLogger().Debug("Received GetReplicationMessages call.")
scope := metrics.HistoryGetReplicationMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
var wg sync.WaitGroup
wg.Add(len(request.Tokens))
result := new(sync.Map)
for _, token := range request.Tokens {
go func(token *replicationspb.ReplicationToken) {
defer wg.Done()
engine, err := h.controller.GetEngineForShard(token.GetShardId())
if err != nil {
h.GetLogger().Warn("History engine not found for shard", tag.Error(err))
return
}
tasks, err := engine.GetReplicationMessages(
ctx,
request.GetClusterName(),
token.GetLastRetrievedMessageId(),
)
if err != nil {
h.GetLogger().Warn("Failed to get replication tasks for shard", tag.Error(err))
return
}
result.Store(token.GetShardId(), tasks)
}(token)
}
wg.Wait()
messagesByShard := make(map[int32]*replicationspb.ReplicationMessages)
result.Range(func(key, value interface{}) bool {
shardID := key.(int32)
tasks := value.(*replicationspb.ReplicationMessages)
messagesByShard[shardID] = tasks
return true
})
h.GetLogger().Debug("GetReplicationMessages succeeded.")
return &historyservice.GetReplicationMessagesResponse{ShardMessages: messagesByShard}, nil
}
// GetDLQReplicationMessages is called by remote peers to get replicated messages for DLQ merging
func (h *Handler) GetDLQReplicationMessages(ctx context.Context, request *historyservice.GetDLQReplicationMessagesRequest) (_ *historyservice.GetDLQReplicationMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryGetDLQReplicationMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
taskInfoPerShard := map[int32][]*replicationspb.ReplicationTaskInfo{}
// do batch based on workflow ID and run ID
for _, taskInfo := range request.GetTaskInfos() {
shardID := h.config.GetShardID(
taskInfo.GetNamespaceId(),
taskInfo.GetWorkflowId(),
)
if _, ok := taskInfoPerShard[shardID]; !ok {
taskInfoPerShard[shardID] = []*replicationspb.ReplicationTaskInfo{}
}
taskInfoPerShard[shardID] = append(taskInfoPerShard[shardID], taskInfo)
}
var wg sync.WaitGroup
wg.Add(len(taskInfoPerShard))
tasksChan := make(chan *replicationspb.ReplicationTask, len(request.GetTaskInfos()))
handleTaskInfoPerShard := func(taskInfos []*replicationspb.ReplicationTaskInfo) {
defer wg.Done()
if len(taskInfos) == 0 {
return
}
engine, err := h.controller.GetEngine(
taskInfos[0].GetNamespaceId(),
taskInfos[0].GetWorkflowId(),
)
if err != nil {
h.GetLogger().Warn("History engine not found for workflow ID.", tag.Error(err))
return
}
tasks, err := engine.GetDLQReplicationMessages(
ctx,
taskInfos,
)
if err != nil {
h.GetLogger().Error("Failed to get dlq replication tasks.", tag.Error(err))
return
}
for _, task := range tasks {
tasksChan <- task
}
}
for _, replicationTaskInfos := range taskInfoPerShard {
go handleTaskInfoPerShard(replicationTaskInfos)
}
wg.Wait()
close(tasksChan)
replicationTasks := make([]*replicationspb.ReplicationTask, 0, len(tasksChan))
for task := range tasksChan {
replicationTasks = append(replicationTasks, task)
}
return &historyservice.GetDLQReplicationMessagesResponse{
ReplicationTasks: replicationTasks,
}, nil
}
// ReapplyEvents applies stale events to the current workflow and the current run
func (h *Handler) ReapplyEvents(ctx context.Context, request *historyservice.ReapplyEventsRequest) (_ *historyservice.ReapplyEventsResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryReapplyEventsScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
workflowID := request.GetRequest().GetWorkflowExecution().GetWorkflowId()
engine, err := h.controller.GetEngine(namespaceID, workflowID)
if err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
// deserialize history event object
historyEvents, err := h.GetPayloadSerializer().DeserializeEvents(&commonpb.DataBlob{
EncodingType: enumspb.ENCODING_TYPE_PROTO3,
Data: request.GetRequest().GetEvents().GetData(),
})
if err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
execution := request.GetRequest().GetWorkflowExecution()
if err := engine.ReapplyEvents(
ctx,
request.GetNamespaceId(),
execution.GetWorkflowId(),
execution.GetRunId(),
historyEvents,
); err != nil {
return nil, h.error(err, scope, namespaceID, workflowID)
}
return &historyservice.ReapplyEventsResponse{}, nil
}
func (h *Handler) GetDLQMessages(ctx context.Context, request *historyservice.GetDLQMessagesRequest) (_ *historyservice.GetDLQMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryReadDLQMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
engine, err := h.controller.GetEngineForShard(request.GetShardId())
if err != nil {
err = h.error(err, scope, "", "")
return nil, err
}
resp, err := engine.GetDLQMessages(ctx, request)
if err != nil {
err = h.error(err, scope, "", "")
return nil, err
}
return resp, nil
}
func (h *Handler) PurgeDLQMessages(ctx context.Context, request *historyservice.PurgeDLQMessagesRequest) (_ *historyservice.PurgeDLQMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryPurgeDLQMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
engine, err := h.controller.GetEngineForShard(request.GetShardId())
if err != nil {
err = h.error(err, scope, "", "")
return nil, err
}
err = engine.PurgeDLQMessages(ctx, request)
if err != nil {
err = h.error(err, scope, "", "")
return nil, err
}
return &historyservice.PurgeDLQMessagesResponse{}, nil
}
func (h *Handler) MergeDLQMessages(ctx context.Context, request *historyservice.MergeDLQMessagesRequest) (_ *historyservice.MergeDLQMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
if h.isStopped() {
return nil, errShuttingDown
}
scope := metrics.HistoryMergeDLQMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
engine, err := h.controller.GetEngineForShard(request.GetShardId())
if err != nil {
err = h.error(err, scope, "", "")
return nil, err
}
resp, err := engine.MergeDLQMessages(ctx, request)
if err != nil {
err = h.error(err, scope, "", "")
return nil, err
}
return resp, nil
}
func (h *Handler) RefreshWorkflowTasks(ctx context.Context, request *historyservice.RefreshWorkflowTasksRequest) (_ *historyservice.RefreshWorkflowTasksResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRefreshWorkflowTasksScope
h.GetMetricsClient().IncCounter(scope, metrics.ServiceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.ServiceLatency)
defer sw.Stop()
if h.isStopped() {
return nil, errShuttingDown
}
namespaceID := request.GetNamespaceId()
execution := request.GetRequest().GetExecution()
workflowID := execution.GetWorkflowId()
engine, err := h.controller.GetEngine(namespaceID, workflowID)
if err != nil {
err = h.error(err, scope, namespaceID, workflowID)
return nil, err
}
err = engine.RefreshWorkflowTasks(
ctx,
namespaceID,
commonpb.WorkflowExecution{
WorkflowId: execution.WorkflowId,
RunId: execution.RunId,
},
)
if err != nil {
err = h.error(err, scope, namespaceID, workflowID)
return nil, err
}
return &historyservice.RefreshWorkflowTasksResponse{}, nil
}
// convertError is a helper method to convert ShardOwnershipLostError from persistence layer returned by various
// HistoryEngine API calls to ShardOwnershipLost error return by HistoryService for client to be redirected to the
// correct shard.
func (h *Handler) convertError(err error) error {
switch err.(type) {
case *persistence.ShardOwnershipLostError:
shardID := err.(*persistence.ShardOwnershipLostError).ShardID
info, err := h.GetHistoryServiceResolver().Lookup(convert.Int32ToString(shardID))
if err == nil {
return serviceerrors.NewShardOwnershipLost(h.GetHostInfo().GetAddress(), info.GetAddress())
}
return serviceerrors.NewShardOwnershipLost(h.GetHostInfo().GetAddress(), "<unknown>")
case *persistence.WorkflowExecutionAlreadyStartedError:
err := err.(*persistence.WorkflowExecutionAlreadyStartedError)
return serviceerror.NewInternal(err.Msg)
case *persistence.CurrentWorkflowConditionFailedError:
err := err.(*persistence.CurrentWorkflowConditionFailedError)
return serviceerror.NewInternal(err.Msg)
case *persistence.TransactionSizeLimitError:
err := err.(*persistence.TransactionSizeLimitError)
return serviceerror.NewInvalidArgument(err.Msg)
}
return err
}
func (h *Handler) updateErrorMetric(
scope int,
namespaceID string,
workflowID string,
err error,
) {
if common.IsContextDeadlineExceededErr(err) || common.IsContextCanceledErr(err) {
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrContextTimeoutCounter)
return
}
switch err := err.(type) {
case *serviceerrors.ShardOwnershipLost:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrShardOwnershipLostCounter)
case *serviceerrors.TaskAlreadyStarted:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrTaskAlreadyStartedCounter)
case *serviceerror.InvalidArgument:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrInvalidArgumentCounter)
case *serviceerror.NamespaceNotActive:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrInvalidArgumentCounter)
case *serviceerror.WorkflowExecutionAlreadyStarted:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrExecutionAlreadyStartedCounter)
case *serviceerror.NotFound:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrNotFoundCounter)
case *serviceerror.CancellationAlreadyRequested:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrCancellationAlreadyRequestedCounter)
case *serviceerror.ResourceExhausted:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrResourceExhaustedCounter)
case *serviceerrors.RetryReplication:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceErrRetryTaskCounter)
case *serviceerror.Internal:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceFailures)
h.GetLogger().Error("Internal service error",
tag.Error(err),
tag.WorkflowID(workflowID),
tag.WorkflowNamespaceID(namespaceID))
default:
h.GetMetricsClient().IncCounter(scope, metrics.ServiceFailures)
h.getLoggerWithTags(namespaceID, workflowID).Error("Uncategorized error", tag.Error(err))
}
}
func (h *Handler) error(
err error,
scope int,
namespaceID string,
workflowID string,
) error {
err = h.convertError(err)
h.updateErrorMetric(scope, namespaceID, workflowID, err)
return err
}
func (h *Handler) getLoggerWithTags(
namespaceID string,
workflowID string,
) log.Logger {
logger := h.GetLogger()
if namespaceID != "" {
logger = logger.WithTags(tag.WorkflowNamespaceID(namespaceID))
}
if workflowID != "" {
logger = logger.WithTags(tag.WorkflowID(workflowID))
}
return logger
}
func validateTaskToken(taskToken *tokenspb.Task) error {
if taskToken.GetWorkflowId() == "" {
return errWorkflowIDNotSet
}
return nil
}
| 1 | 11,023 | Conceptual question: why do history and matching need throttler at all? Shouldn't throttling to be handled on FE only? | temporalio-temporal | go |
@@ -126,12 +126,14 @@ func (n *NetworkPolicyController) deleteCNP(old interface{}) {
n.deleteDereferencedAddressGroups(oldInternalNP)
}
-// reprocessCNP is triggered by Namespace ADD/UPDATE/DELETE events when they impact the
-// per-namespace rules of a CNP.
-func (n *NetworkPolicyController) reprocessCNP(cnp *crdv1alpha1.ClusterNetworkPolicy) {
+// reprocessCNP is triggered when a CNP may be impacted by non-ClusterNetworkPolicy events, including Namespace events
+// (for per-namespace rules) and ClusterGroup events (for ClusterGroup reference).
+func (n *NetworkPolicyController) reprocessCNP(cnp *crdv1alpha1.ClusterNetworkPolicy, enqueueAppliedToGroup bool) {
key := internalNetworkPolicyKeyFunc(cnp)
n.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, exist, _ := n.internalNetworkPolicyStore.Get(key)
+ // The internal NetworkPolicy may haven't been created yet. It's fine to skip processing this CNP as addCNP will
+ // create it eventually.
if !exist {
klog.V(2).Infof("Cannot find the original internal NetworkPolicy, skip reprocessCNP")
n.internalNetworkPolicyMutex.Unlock() | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networkpolicy
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"antrea.io/antrea/pkg/apis/controlplane"
crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1"
"antrea.io/antrea/pkg/controller/networkpolicy/store"
antreatypes "antrea.io/antrea/pkg/controller/types"
utilsets "antrea.io/antrea/pkg/util/sets"
)
// addCNP receives ClusterNetworkPolicy ADD events and creates resources
// which can be consumed by agents to configure corresponding rules on the Nodes.
func (n *NetworkPolicyController) addCNP(obj interface{}) {
defer n.heartbeat("addCNP")
cnp := obj.(*crdv1alpha1.ClusterNetworkPolicy)
klog.Infof("Processing ClusterNetworkPolicy %s ADD event", cnp.Name)
// Create an internal NetworkPolicy object corresponding to this
// ClusterNetworkPolicy and enqueue task to internal NetworkPolicy Workqueue.
internalNP := n.processClusterNetworkPolicy(cnp)
klog.V(2).Infof("Creating new internal NetworkPolicy %s for %s", internalNP.Name, internalNP.SourceRef.ToString())
n.internalNetworkPolicyStore.Create(internalNP)
key := internalNetworkPolicyKeyFunc(cnp)
n.enqueueInternalNetworkPolicy(key)
}
// updateCNP receives ClusterNetworkPolicy UPDATE events and updates resources
// which can be consumed by agents to configure corresponding rules on the Nodes.
func (n *NetworkPolicyController) updateCNP(old, cur interface{}) {
defer n.heartbeat("updateCNP")
curCNP := cur.(*crdv1alpha1.ClusterNetworkPolicy)
klog.Infof("Processing ClusterNetworkPolicy %s UPDATE event", curCNP.Name)
// Update an internal NetworkPolicy, corresponding to this NetworkPolicy and
// enqueue task to internal NetworkPolicy Workqueue.
curInternalNP := n.processClusterNetworkPolicy(curCNP)
klog.V(2).Infof("Updating existing internal NetworkPolicy %s for %s", curInternalNP.Name, curInternalNP.SourceRef.ToString())
// Retrieve old crdv1alpha1.NetworkPolicy object.
oldCNP := old.(*crdv1alpha1.ClusterNetworkPolicy)
// Old and current NetworkPolicy share the same key.
key := internalNetworkPolicyKeyFunc(oldCNP)
// Lock access to internal NetworkPolicy store such that concurrent access
// to an internal NetworkPolicy is not allowed. This will avoid the
// case in which an Update to an internal NetworkPolicy object may
// cause the SpanMeta member to be overridden with stale SpanMeta members
// from an older internal NetworkPolicy.
n.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
// Must preserve old internal NetworkPolicy Span.
curInternalNP.SpanMeta = oldInternalNP.SpanMeta
n.internalNetworkPolicyStore.Update(curInternalNP)
// Unlock the internal NetworkPolicy store.
n.internalNetworkPolicyMutex.Unlock()
// Enqueue addressGroup keys to update their Node span.
for _, rule := range curInternalNP.Rules {
for _, addrGroupName := range rule.From.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
for _, addrGroupName := range rule.To.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
}
n.enqueueInternalNetworkPolicy(key)
for _, atg := range oldInternalNP.AppliedToGroups {
// Delete the old AppliedToGroup object if it is not referenced
// by any internal NetworkPolicy.
n.deleteDereferencedAppliedToGroup(atg)
}
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// deleteCNP receives ClusterNetworkPolicy DELETED events and deletes resources
// which can be consumed by agents to delete corresponding rules on the Nodes.
func (n *NetworkPolicyController) deleteCNP(old interface{}) {
cnp, ok := old.(*crdv1alpha1.ClusterNetworkPolicy)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting ClusterNetworkPolicy, invalid type: %v", old)
return
}
cnp, ok = tombstone.Obj.(*crdv1alpha1.ClusterNetworkPolicy)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting ClusterNetworkPolicy, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deleteCNP")
klog.Infof("Processing ClusterNetworkPolicy %s DELETE event", cnp.Name)
key := internalNetworkPolicyKeyFunc(cnp)
// Lock access to internal NetworkPolicy store so that concurrent reprocessCNP
// calls will not re-process and add a CNP that has already been deleted.
n.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
klog.V(2).Infof("Deleting internal NetworkPolicy %s for %s", oldInternalNP.Name, oldInternalNP.SourceRef.ToString())
err := n.internalNetworkPolicyStore.Delete(key)
n.internalNetworkPolicyMutex.Unlock()
if err != nil {
klog.Errorf("Error deleting internal NetworkPolicy during NetworkPolicy %s delete: %v", cnp.Name, err)
return
}
for _, atg := range oldInternalNP.AppliedToGroups {
n.deleteDereferencedAppliedToGroup(atg)
}
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// reprocessCNP is triggered by Namespace ADD/UPDATE/DELETE events when they impact the
// per-namespace rules of a CNP.
func (n *NetworkPolicyController) reprocessCNP(cnp *crdv1alpha1.ClusterNetworkPolicy) {
key := internalNetworkPolicyKeyFunc(cnp)
n.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, exist, _ := n.internalNetworkPolicyStore.Get(key)
if !exist {
klog.V(2).Infof("Cannot find the original internal NetworkPolicy, skip reprocessCNP")
n.internalNetworkPolicyMutex.Unlock()
return
}
defer n.heartbeat("reprocessCNP")
klog.Infof("Processing ClusterNetworkPolicy %s REPROCESS event", cnp.Name)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
curInternalNP := n.processClusterNetworkPolicy(cnp)
// Must preserve old internal NetworkPolicy Span.
curInternalNP.SpanMeta = oldInternalNP.SpanMeta
n.internalNetworkPolicyStore.Update(curInternalNP)
n.internalNetworkPolicyMutex.Unlock()
// Enqueue addressGroup keys to update their Node span.
for _, rule := range curInternalNP.Rules {
for _, addrGroupName := range rule.From.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
for _, addrGroupName := range rule.To.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
}
n.enqueueInternalNetworkPolicy(key)
for _, atg := range oldInternalNP.AppliedToGroups {
// Delete the old AppliedToGroup object if it is not referenced
// by any internal NetworkPolicy.
n.deleteDereferencedAppliedToGroup(atg)
}
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// filterPerNamespaceRuleACNPsByNSLabels gets all ClusterNetworkPolicy names that will need to be
// re-processed based on the entire label set of an added/updated/deleted Namespace.
func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels labels.Set) sets.String {
n.internalNetworkPolicyMutex.Lock()
defer n.internalNetworkPolicyMutex.Unlock()
affectedPolicies := sets.String{}
nps, err := n.internalNetworkPolicyStore.GetByIndex(store.PerNamespaceRuleIndex, store.HasPerNamespaceRule)
if err != nil {
klog.Errorf("Error fetching internal NetworkPolicies that have per-Namespace rules: %v", err)
return affectedPolicies
}
for _, np := range nps {
internalNP := np.(*antreatypes.NetworkPolicy)
for _, sel := range internalNP.PerNamespaceSelectors {
if sel.Matches(nsLabels) {
affectedPolicies.Insert(internalNP.SourceRef.Name)
break
}
}
}
return affectedPolicies
}
// addNamespace receives Namespace ADD events and triggers all ClusterNetworkPolicies that have a
// per-namespace rule applied to this Namespace to be re-processed.
func (n *NetworkPolicyController) addNamespace(obj interface{}) {
defer n.heartbeat("addNamespace")
namespace := obj.(*v1.Namespace)
klog.V(2).Infof("Processing Namespace %s ADD event, labels: %v", namespace.Name, namespace.Labels)
affectedACNPs := n.filterPerNamespaceRuleACNPsByNSLabels(namespace.Labels)
for cnpName := range affectedACNPs {
if cnp, err := n.cnpLister.Get(cnpName); err == nil {
n.reprocessCNP(cnp)
}
}
}
// updateNamespace receives Namespace UPDATE events and triggers all ClusterNetworkPolicies that have a
// per-namespace rule applied to either the original or the new Namespace to be re-processed.
func (n *NetworkPolicyController) updateNamespace(oldObj, curObj interface{}) {
defer n.heartbeat("updateNamespace")
oldNamespace, curNamespace := oldObj.(*v1.Namespace), curObj.(*v1.Namespace)
klog.V(2).Infof("Processing Namespace %s UPDATE event, labels: %v", curNamespace.Name, curNamespace.Labels)
oldLabelSet, curLabelSet := labels.Set(oldNamespace.Labels), labels.Set(curNamespace.Labels)
affectedACNPsByOldLabels := n.filterPerNamespaceRuleACNPsByNSLabels(oldLabelSet)
affectedACNPsByCurLabels := n.filterPerNamespaceRuleACNPsByNSLabels(curLabelSet)
affectedACNPs := utilsets.SymmetricDifferenceString(affectedACNPsByOldLabels, affectedACNPsByCurLabels)
for cnpName := range affectedACNPs {
if cnp, err := n.cnpLister.Get(cnpName); err == nil {
n.reprocessCNP(cnp)
}
}
}
// deleteNamespace receives Namespace DELETE events and triggers all ClusterNetworkPolicies that have a
// per-namespace rule applied to this Namespace to be re-processed.
func (n *NetworkPolicyController) deleteNamespace(old interface{}) {
namespace, ok := old.(*v1.Namespace)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting Namespace, invalid type: %v", old)
return
}
namespace, ok = tombstone.Obj.(*v1.Namespace)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting Namespace, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deleteNamespace")
klog.V(2).Infof("Processing Namespace %s DELETE event, labels: %v", namespace.Name, namespace.Labels)
affectedACNPs := n.filterPerNamespaceRuleACNPsByNSLabels(labels.Set(namespace.Labels))
for _, cnpName := range affectedACNPs.List() {
cnp, err := n.cnpLister.Get(cnpName)
if err != nil {
klog.Errorf("Error getting Antrea ClusterNetworkPolicy %s", cnpName)
continue
}
n.reprocessCNP(cnp)
}
}
// processClusterNetworkPolicy creates an internal NetworkPolicy instance
// corresponding to the crdv1alpha1.ClusterNetworkPolicy object. This method
// does not commit the internal NetworkPolicy in store, instead returns an
// instance to the caller wherein, it will be either stored as a new Object
// in case of ADD event or modified and store the updated instance, in case
// of an UPDATE event.
func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1alpha1.ClusterNetworkPolicy) *antreatypes.NetworkPolicy {
hasPerNamespaceRule := hasPerNamespaceRule(cnp)
// If one of the ACNP rule is a per-namespace rule (a peer in that rule has namspaces.Match set
// to Self), the policy will need to be converted to appliedTo per rule policy, as the appliedTo
// will be different for rules created for each namespace.
appliedToPerRule := len(cnp.Spec.AppliedTo) == 0 || hasPerNamespaceRule
// atgNamesSet tracks all distinct appliedToGroups referred to by the ClusterNetworkPolicy,
// either in the spec section or in ingress/egress rules.
// The span calculation and stale appliedToGroup cleanup logic would work seamlessly for both cases.
atgNamesSet := sets.String{}
// affectedNamespaceSelectors tracks all the appliedTo's namespaceSelectors of per-namespace rules.
// It is used by the PerNamespaceRuleIndex for internalNetworkPolicyStore to filter out internal NPs
// that has per-namespace rules, and in Namespace ADD/UPDATE/DELETE events, trigger ACNPs that selects
// this Namespace's label to be re-processed, and corresponding rules to re-calculate affected Namespaces.
var affectedNamespaceSelectors []labels.Selector
// If appliedTo is set at spec level and the ACNP has per-namespace rules, then each appliedTo needs
// to be split into appliedToGroups for each of its affected Namespace.
var clusterAppliedToAffectedNS []string
// atgForNamespace is the appliedToGroups splitted by Namespaces.
var atgForNamespace []string
if hasPerNamespaceRule && len(cnp.Spec.AppliedTo) > 0 {
for _, at := range cnp.Spec.AppliedTo {
affectedNS, selectors := n.getAffectedNamespacesForAppliedTo(at)
affectedNamespaceSelectors = append(affectedNamespaceSelectors, selectors...)
for _, ns := range affectedNS {
atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector)
atgNamesSet.Insert(atg)
clusterAppliedToAffectedNS = append(clusterAppliedToAffectedNS, ns)
atgForNamespace = append(atgForNamespace, atg)
}
}
}
var rules []controlplane.NetworkPolicyRule
processRules := func(cnpRules []crdv1alpha1.Rule, direction controlplane.Direction) {
for idx, cnpRule := range cnpRules {
services, namedPortExists := toAntreaServicesForCRD(cnpRule.Ports)
clusterPeers, perNSPeers := splitPeersByScope(cnpRule, direction)
addRule := func(peer *controlplane.NetworkPolicyPeer, dir controlplane.Direction, ruleAppliedTos []string) {
rule := controlplane.NetworkPolicyRule{
Direction: dir,
Services: services,
Name: cnpRule.Name,
Action: cnpRule.Action,
Priority: int32(idx),
EnableLogging: cnpRule.EnableLogging,
AppliedToGroups: ruleAppliedTos,
}
if dir == controlplane.DirectionIn {
rule.From = *peer
} else if dir == controlplane.DirectionOut {
rule.To = *peer
}
rules = append(rules, rule)
}
// When a rule's NetworkPolicyPeer is empty, a cluster level rule should be created
// with an Antrea peer matching all addresses.
if len(clusterPeers) > 0 || len(perNSPeers) == 0 {
ruleAppliedTos := cnpRule.AppliedTo
// For ACNPs that have per-namespace rules, cluster-level rules will be created with appliedTo
// set as the spec appliedTo for each rule.
if appliedToPerRule && len(cnp.Spec.AppliedTo) > 0 {
ruleAppliedTos = cnp.Spec.AppliedTo
}
ruleATGNames := n.processClusterAppliedTo(ruleAppliedTos, atgNamesSet)
klog.V(4).Infof("Adding a new cluster-level rule with appliedTos %v for %s", ruleATGNames, cnp.Name)
addRule(n.toAntreaPeerForCRD(clusterPeers, cnp, direction, namedPortExists), direction, ruleATGNames)
}
if len(perNSPeers) > 0 {
if len(cnp.Spec.AppliedTo) > 0 {
// Create a rule for each affected Namespace of appliedTo at spec level
for i := range clusterAppliedToAffectedNS {
klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", clusterAppliedToAffectedNS[i], idx, cnp.Name)
addRule(n.toNamespacedPeerForCRD(perNSPeers, clusterAppliedToAffectedNS[i]), direction, []string{atgForNamespace[i]})
}
} else {
// Create a rule for each affected Namespace of appliedTo at rule level
for _, at := range cnpRule.AppliedTo {
affectedNS, selectors := n.getAffectedNamespacesForAppliedTo(at)
affectedNamespaceSelectors = append(affectedNamespaceSelectors, selectors...)
for _, ns := range affectedNS {
atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector)
atgNamesSet.Insert(atg)
klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name)
addRule(n.toNamespacedPeerForCRD(perNSPeers, ns), direction, []string{atg})
}
}
}
}
}
}
// Compute NetworkPolicyRules for Ingress Rules.
processRules(cnp.Spec.Ingress, controlplane.DirectionIn)
// Compute NetworkPolicyRules for Egress Rules.
processRules(cnp.Spec.Egress, controlplane.DirectionOut)
// Create AppliedToGroup for each AppliedTo present in ClusterNetworkPolicy spec.
if !hasPerNamespaceRule {
n.processClusterAppliedTo(cnp.Spec.AppliedTo, atgNamesSet)
}
tierPriority := n.getTierPriority(cnp.Spec.Tier)
internalNetworkPolicy := &antreatypes.NetworkPolicy{
Name: internalNetworkPolicyKeyFunc(cnp),
Generation: cnp.Generation,
SourceRef: &controlplane.NetworkPolicyReference{
Type: controlplane.AntreaClusterNetworkPolicy,
Name: cnp.Name,
UID: cnp.UID,
},
UID: cnp.UID,
AppliedToGroups: atgNamesSet.List(),
Rules: rules,
Priority: &cnp.Spec.Priority,
TierPriority: &tierPriority,
AppliedToPerRule: appliedToPerRule,
PerNamespaceSelectors: getUniqueNSSelectors(affectedNamespaceSelectors),
}
return internalNetworkPolicy
}
// hasPerNamespaceRule returns true if there is at least one per-namespace rule
func hasPerNamespaceRule(cnp *crdv1alpha1.ClusterNetworkPolicy) bool {
for _, ingress := range cnp.Spec.Ingress {
for _, peer := range ingress.From {
if peer.Namespaces != nil && peer.Namespaces.Match == crdv1alpha1.NamespaceMatchSelf {
return true
}
}
}
for _, egress := range cnp.Spec.Egress {
for _, peer := range egress.To {
if peer.Namespaces != nil && peer.Namespaces.Match == crdv1alpha1.NamespaceMatchSelf {
return true
}
}
}
return false
}
// processClusterAppliedTo processes appliedTo groups in Antrea ClusterNetworkPolicy set
// at cluster level (appliedTo groups which will not need to be split by Namespaces).
func (n *NetworkPolicyController) processClusterAppliedTo(appliedTo []crdv1alpha1.NetworkPolicyPeer, appliedToGroupNamesSet sets.String) []string {
var appliedToGroupNames []string
for _, at := range appliedTo {
var atg string
if at.Group != "" {
atg = n.processAppliedToGroupForCG(at.Group)
} else {
atg = n.createAppliedToGroup("", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector)
}
if atg != "" {
appliedToGroupNames = append(appliedToGroupNames, atg)
appliedToGroupNamesSet.Insert(atg)
}
}
return appliedToGroupNames
}
// splitPeersByScope splits the ClusterNetworkPolicy peers in the rule by whether the peer
// is cluster-scoped or per-namespace.
func splitPeersByScope(rule crdv1alpha1.Rule, dir controlplane.Direction) ([]crdv1alpha1.NetworkPolicyPeer, []crdv1alpha1.NetworkPolicyPeer) {
var clusterPeers, perNSPeers []crdv1alpha1.NetworkPolicyPeer
peers := rule.From
if dir == controlplane.DirectionOut {
peers = rule.To
}
for _, peer := range peers {
if peer.Namespaces != nil && peer.Namespaces.Match == crdv1alpha1.NamespaceMatchSelf {
perNSPeers = append(perNSPeers, peer)
} else {
clusterPeers = append(clusterPeers, peer)
}
}
return clusterPeers, perNSPeers
}
// getAffectedNamespacesForAppliedTo computes the Namespaces currently affected by the appliedTo
// Namespace selectors. It also returns the list of Namespace selectors used to compute affected
// Namespaces.
func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1alpha1.NetworkPolicyPeer) ([]string, []labels.Selector) {
var affectedNS []string
var affectedNamespaceSelectors []labels.Selector
nsLabelSelector := appliedTo.NamespaceSelector
if appliedTo.Group != "" {
cg, err := n.cgLister.Get(appliedTo.Group)
if err != nil {
return affectedNS, affectedNamespaceSelectors
}
if cg.Spec.NamespaceSelector != nil || cg.Spec.PodSelector != nil {
nsLabelSelector = cg.Spec.NamespaceSelector
}
}
nsSel, _ := metav1.LabelSelectorAsSelector(nsLabelSelector)
// An empty nsLabelSelector means select from all Namespaces
if nsLabelSelector == nil {
nsSel = labels.Everything()
}
affectedNamespaceSelectors = append(affectedNamespaceSelectors, nsSel)
namespaces, _ := n.namespaceLister.List(nsSel)
for _, ns := range namespaces {
affectedNS = append(affectedNS, ns.Name)
}
return affectedNS, affectedNamespaceSelectors
}
// getUniqueNSSelectors dedups the Namespace selectors, which are used as index to re-process
// affected ClusterNetworkPolicy when there is Namespace CRUD events. Note that when there is
// an empty selector in the list, this function will simply return a list with only one empty
// selector, because all Namespace events will affect this ClusterNetworkPolicy no matter
// what the other Namespace selectors are.
func getUniqueNSSelectors(selectors []labels.Selector) []labels.Selector {
selectorStrings := sets.String{}
i := 0
for _, sel := range selectors {
if sel.Empty() {
return []labels.Selector{labels.Everything()}
}
if selectorStrings.Has(sel.String()) {
continue
}
selectorStrings.Insert(sel.String())
selectors[i] = sel
i++
}
return selectors[:i]
}
// processRefCG processes the ClusterGroup reference present in the rule and returns the
// NetworkPolicyPeer with the corresponding AddressGroup or IPBlock.
func (n *NetworkPolicyController) processRefCG(g string) (string, []controlplane.IPBlock) {
// Retrieve ClusterGroup for corresponding entry in the rule.
cg, err := n.cgLister.Get(g)
if err != nil {
// The ClusterGroup referred to has not been created yet.
return "", nil
}
key := internalGroupKeyFunc(cg)
// Find the internal Group corresponding to this ClusterGroup
ig, found, _ := n.internalGroupStore.Get(key)
if !found {
// Internal Group was not found. Once the internal Group is created, the sync
// worker for internal group will re-enqueue the ClusterNetworkPolicy processing
// which will trigger the creation of AddressGroup.
return "", nil
}
intGrp := ig.(*antreatypes.Group)
if len(intGrp.IPBlocks) > 0 {
return "", intGrp.IPBlocks
}
agKey := n.createAddressGroupForClusterGroupCRD(intGrp)
// Return if addressGroup was created or found.
return agKey, nil
}
func (n *NetworkPolicyController) processAppliedToGroupForCG(g string) string {
// Retrieve ClusterGroup for corresponding entry in the AppliedToGroup.
cg, err := n.cgLister.Get(g)
if err != nil {
// The ClusterGroup referred to has not been created yet.
return ""
}
key := internalGroupKeyFunc(cg)
// Find the internal Group corresponding to this ClusterGroup
ig, found, _ := n.internalGroupStore.Get(key)
if !found {
// Internal Group was not found. Once the internal Group is created, the sync
// worker for internal group will re-enqueue the ClusterNetworkPolicy processing
// which will trigger the creation of AddressGroup.
return ""
}
intGrp := ig.(*antreatypes.Group)
if len(intGrp.IPBlocks) > 0 {
klog.V(2).Infof("ClusterGroup %s with IPBlocks will not be processed as AppliedTo", g)
return ""
}
return n.createAppliedToGroupForClusterGroupCRD(intGrp)
}
| 1 | 45,214 | Would this be possible: `addCNP` has already processed the CNP to an internalNP, just hasn't added this internalNP to the `internalNetworkPolicyStore`. In this case, `reprocessCNP` will skip processing this CNP and `addCNP` will just add the "old" internalNP to `internalNetworkPolicyStore`. | antrea-io-antrea | go |
@@ -22,6 +22,11 @@ module CartsHelper
current_linear_approval?(cart, user)
end
+ # Todo: Move this to an NCR specific template?
+ def display_restart?(cart, user)
+ cart.ncr? && user == cart.requester && (cart.pending? || cart.rejected?)
+ end
+
def parallel_approval_is_pending?(cart, user)
return false unless cart.parallel?
if approval = Approval.find_by(cart_id: cart.id, user_id: user.id) | 1 | module CartsHelper
# need to pass in the user because the current_user controller helper can't be stubbed
# https://github.com/rspec/rspec-rails/issues/1076
def display_status(cart, user)
if cart.pending?
approvers = cart.currently_awaiting_approvers
if approvers.include?(user)
content_tag('strong', "Please review")
else
names = approvers.map{|approver| approver.full_name }
content_tag('em', "Waiting for review from:") + ' ' + names.join(', ')
end
else
cart.status.titlecase
end
end
def display_response_actions?(cart, user)
return false unless user.approver_of? cart
parallel_approval_is_pending?(cart, user) ||
current_linear_approval?(cart, user)
end
def parallel_approval_is_pending?(cart, user)
return false unless cart.parallel?
if approval = Approval.find_by(cart_id: cart.id, user_id: user.id)
approval.pending?
else
false
end
end
def current_linear_approval?(cart, user)
approval = Approval.find_by(cart_id: cart.id, user_id: user.id)
cart.linear? && cart.ordered_awaiting_approvals.first == approval
end
end
| 1 | 12,507 | Minor: you can access `current_user` in here directly - don't need to pass it in. Unless you prefer passing it explicitly? | 18F-C2 | rb |
@@ -45,6 +45,7 @@ module Mongoid
# @param [ Hash ] attributes The attributes hash to attempt to set.
# @param [ Hash ] options The options defined.
def initialize(association, attributes, options = {})
+ attributes = attributes&.to_h if attributes.respond_to?(:keys)
if attributes.respond_to?(:with_indifferent_access)
@attributes = attributes.with_indifferent_access.sort do |a, b|
a[0].to_i <=> b[0].to_i | 1 | # frozen_string_literal: true
# encoding: utf-8
module Mongoid
module Association
module Nested
class Many
include Buildable
# Builds the association depending on the attributes and the options
# passed to the macro.
#
# This attempts to perform 3 operations, either one of an update of
# the existing association, a replacement of the association with a new
# document, or a removal of the association.
#
# @example Build the nested attrs.
# many.build(person)
#
# @param [ Document ] parent The parent document of the association.
# @param [ Hash ] options The options.
#
# @return [ Array ] The attributes.
def build(parent, options = {})
@existing = parent.send(association.name)
if over_limit?(attributes)
raise Errors::TooManyNestedAttributeRecords.new(existing, options[:limit])
end
attributes.each do |attrs|
if attrs.is_a?(::Hash)
process_attributes(parent, attrs.with_indifferent_access)
else
process_attributes(parent, attrs[1].with_indifferent_access)
end
end
end
# Create the new builder for nested attributes on one-to-many
# associations.
#
# @example Initialize the builder.
# Many.new(association, attributes, options)
#
# @param [ Association ] association The association metadata.
# @param [ Hash ] attributes The attributes hash to attempt to set.
# @param [ Hash ] options The options defined.
def initialize(association, attributes, options = {})
if attributes.respond_to?(:with_indifferent_access)
@attributes = attributes.with_indifferent_access.sort do |a, b|
a[0].to_i <=> b[0].to_i
end
else
@attributes = attributes
end
@association = association
@options = options
@class_name = options[:class_name] ? options[:class_name].constantize : association.klass
end
private
# Can the existing association potentially be deleted?
#
# @example Is the document destroyable?
# destroyable?({ :_destroy => "1" })
#
# @param [ Hash ] attributes The attributes to pull the flag from.
#
# @return [ true, false ] If the association can potentially be deleted.
def destroyable?(attributes)
destroy = attributes.delete(:_destroy)
Nested::DESTROY_FLAGS.include?(destroy) && allow_destroy?
end
# Are the supplied attributes of greater number than the supplied
# limit?
#
# @example Are we over the set limit?
# builder.over_limit?({ "street" => "Bond" })
#
# @param [ Hash ] attributes The attributes being set.
#
# @return [ true, false ] If the attributes exceed the limit.
def over_limit?(attributes)
limit = options[:limit]
limit ? attributes.size > limit : false
end
# Process each set of attributes one at a time for each potential
# new, existing, or ignored document.
#
# @api private
#
# @example Process the attributes
# builder.process_attributes({ "id" => 1, "street" => "Bond" })
#
# @param [ Document ] parent The parent document.
# @param [ Hash ] attrs The single document attributes to process.
#
# @since 2.0.0
def process_attributes(parent, attrs)
return if reject?(parent, attrs)
if id = attrs.extract_id
update_nested_relation(parent, id, attrs)
else
existing.push(Factory.build(@class_name, attrs)) unless destroyable?(attrs)
end
end
# Destroy the child document, needs to do some checking for embedded
# associations and delay the destroy in case parent validation fails.
#
# @api private
#
# @example Destroy the child.
# builder.destroy(parent, relation, doc)
#
# @param [ Document ] parent The parent document.
# @param [ Proxy ] relation The association proxy.
# @param [ Document ] doc The doc to destroy.
#
# @since 3.0.10
def destroy(parent, relation, doc)
doc.flagged_for_destroy = true
if !doc.embedded? || parent.new_record?
destroy_document(relation, doc)
else
parent.flagged_destroys.push(-> { destroy_document(relation, doc) })
end
end
# Destroy the document.
#
# @api private
#
# @example Destroy the document.
# builder.destroy_document(relation, doc)
#
# @param [ Proxy ] relation The association proxy.
# @param [ Document ] doc The document to delete.
#
# @since 3.0.10
def destroy_document(relation, doc)
relation.delete(doc)
doc.destroy unless doc.embedded? || doc.destroyed?
end
# Update the document.
#
# @api private
#
# @example Update the document.
# builder.update_document(doc, {}, options)
#
# @param [ Document ] doc The document to update.
# @param [ Hash ] attrs The attributes.
#
# @since 3.0.10
def update_document(doc, attrs)
attrs.delete_id
if association.embedded?
doc.assign_attributes(attrs)
else
doc.update_attributes(attrs)
end
end
# Update nested association.
#
# @api private
#
# @example Update nested association.
# builder.update_nested_relation(parent, id, attrs)
#
# @param [ Document ] parent The parent document.
# @param [ String, BSON::ObjectId ] id of the related document.
# @param [ Hash ] attrs The single document attributes to process.
#
# @since 6.0.0
def update_nested_relation(parent, id, attrs)
first = existing.first
converted = first ? convert_id(first.class, id) : id
if existing.where(_id: converted).exists?
# document exists in association
doc = existing.find(converted)
if destroyable?(attrs)
destroy(parent, existing, doc)
else
update_document(doc, attrs)
end
else
# push existing document to association
doc = association.klass.unscoped.find(converted)
update_document(doc, attrs)
existing.push(doc) unless destroyable?(attrs)
end
end
end
end
end
end
| 1 | 13,154 | What is the purpose of `&` on this line? | mongodb-mongoid | rb |
@@ -58,5 +58,5 @@ export function isValidPropertySelection( value ) {
* @return {boolean} TRUE if the web data stream ID is valid, otherwise FALSE.
*/
export function isValidWebDataStreamID( webDataStreamID ) {
- return typeof webDataStreamID === 'string' && /\d+/.test( webDataStreamID );
+ return typeof webDataStreamID === 'string' && /^[a-zA-Z0-9]+$/.test( webDataStreamID );
} | 1 | /**
* Validation utilities.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import { PROPERTY_CREATE } from '../datastore/constants';
/**
* Checks whether the given property ID appears to be valid.
*
* @since 1.31.0
*
* @param {*} propertyID Property ID to check.
* @return {boolean} Whether or not the given property ID is valid.
*/
export function isValidPropertyID( propertyID ) {
return typeof propertyID === 'string' && /^\w+$/.test( propertyID );
}
/**
* Checks if the given value is a valid selection for a Property.
*
* @since 1.31.0
*
* @param {?string} value Selected value.
* @return {boolean} True if valid, otherwise false.
*/
export function isValidPropertySelection( value ) {
if ( value === PROPERTY_CREATE ) {
return true;
}
return isValidPropertyID( value );
}
/**
* Checks whether the given web data stream ID appears to be valid.
*
* @since 1.33.0
*
* @param {*} webDataStreamID Web data stream ID to check.
* @return {boolean} TRUE if the web data stream ID is valid, otherwise FALSE.
*/
export function isValidWebDataStreamID( webDataStreamID ) {
return typeof webDataStreamID === 'string' && /\d+/.test( webDataStreamID );
}
| 1 | 38,861 | See above, the `isValidWebDataStreamID` implementation from before is actually correct. What we need here instead is a new `isValidMeasurementID` function. | google-site-kit-wp | js |
@@ -628,7 +628,12 @@ func (s *Server) createLeafNode(conn net.Conn, remote *leafNodeCfg) *client {
c.mu.Unlock()
// Error will be handled below, so ignore here.
- c.parse([]byte(info))
+ err = c.parse([]byte(info))
+ if err != nil {
+ c.Debugf("Error reading remote leafnode's INFO: %s", err)
+ c.closeConnection(ReadError)
+ return nil
+ }
c.mu.Lock()
if !c.flags.isSet(infoReceived) { | 1 | // Copyright 2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/url"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
)
// Warning when user configures leafnode TLS insecure
const leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!"
// When a loop is detected, delay the reconnect of solicited connection.
const leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second
// Prefix for loop detection subject
const leafNodeLoopDetectionSubjectPrefix = "lds."
type leaf struct {
// Used to suppress sub and unsub interest. Same as routes but our audience
// here is tied to this leaf node. This will hold all subscriptions except this
// leaf nodes. This represents all the interest we want to send to the other side.
smap map[string]int32
// We have any auth stuff here for solicited connections.
remote *leafNodeCfg
}
// Used for remote (solicited) leafnodes.
type leafNodeCfg struct {
sync.RWMutex
*RemoteLeafOpts
urls []*url.URL
curURL *url.URL
tlsName string
username string
password string
loopDelay time.Duration // A loop condition was detected
}
// Check to see if this is a solicited leafnode. We do special processing for solicited.
func (c *client) isSolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote != nil
}
func (c *client) isUnsolicitedLeafNode() bool {
return c.kind == LEAF && c.leaf.remote == nil
}
// This will spin up go routines to solicit the remote leaf node connections.
func (s *Server) solicitLeafNodeRemotes(remotes []*RemoteLeafOpts) {
for _, r := range remotes {
remote := newLeafNodeCfg(r)
s.startGoRoutine(func() { s.connectToRemoteLeafNode(remote, true) })
}
}
func (s *Server) remoteLeafNodeStillValid(remote *leafNodeCfg) bool {
for _, ri := range s.getOpts().LeafNode.Remotes {
// FIXME(dlc) - What about auth changes?
if reflect.DeepEqual(ri.URLs, remote.URLs) {
return true
}
}
return false
}
// Ensure that leafnode is properly configured.
func validateLeafNode(o *Options) error {
if err := validateLeafNodeAuthOptions(o); err != nil {
return err
}
if o.LeafNode.Port == 0 {
return nil
}
if o.Gateway.Name == "" && o.Gateway.Port == 0 {
return nil
}
// If we are here we have both leaf nodes and gateways defined, make sure there
// is a system account defined.
if o.SystemAccount == "" {
return fmt.Errorf("leaf nodes and gateways (both being defined) require a system account to also be configured")
}
return nil
}
// Used to validate user names in LeafNode configuration.
// - rejects mix of single and multiple users.
// - rejects duplicate user names.
func validateLeafNodeAuthOptions(o *Options) error {
if len(o.LeafNode.Users) == 0 {
return nil
}
if o.LeafNode.Username != _EMPTY_ {
return fmt.Errorf("can not have a single user/pass and a users array")
}
users := map[string]struct{}{}
for _, u := range o.LeafNode.Users {
if _, exists := users[u.Username]; exists {
return fmt.Errorf("duplicate user %q detected in leafnode authorization", u.Username)
}
users[u.Username] = struct{}{}
}
return nil
}
func (s *Server) reConnectToRemoteLeafNode(remote *leafNodeCfg) {
delay := s.getOpts().LeafNode.ReconnectInterval
select {
case <-time.After(delay):
case <-s.quitCh:
s.grWG.Done()
return
}
s.connectToRemoteLeafNode(remote, false)
}
// Creates a leafNodeCfg object that wraps the RemoteLeafOpts.
func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg {
cfg := &leafNodeCfg{
RemoteLeafOpts: remote,
urls: make([]*url.URL, 0, len(remote.URLs)),
}
// Start with the one that is configured. We will add to this
// array when receiving async leafnode INFOs.
cfg.urls = append(cfg.urls, cfg.URLs...)
// If we are TLS make sure we save off a proper servername if possible.
// Do same for user/password since we may need them to connect to
// a bare URL that we get from INFO protocol.
for _, u := range cfg.urls {
cfg.saveTLSHostname(u)
cfg.saveUserPassword(u)
}
return cfg
}
// Will pick an URL from the list of available URLs.
func (cfg *leafNodeCfg) pickNextURL() *url.URL {
cfg.Lock()
defer cfg.Unlock()
// If the current URL is the first in the list and we have more than
// one URL, then move that one to end of the list.
if cfg.curURL != nil && len(cfg.urls) > 1 && urlsAreEqual(cfg.curURL, cfg.urls[0]) {
first := cfg.urls[0]
copy(cfg.urls, cfg.urls[1:])
cfg.urls[len(cfg.urls)-1] = first
}
cfg.curURL = cfg.urls[0]
return cfg.curURL
}
// Returns the current URL
func (cfg *leafNodeCfg) getCurrentURL() *url.URL {
cfg.RLock()
defer cfg.RUnlock()
return cfg.curURL
}
// Returns how long the server should wait before attempting
// to solicit a remote leafnode connection following the
// detection of a loop.
// Returns 0 if no loop was detected.
func (cfg *leafNodeCfg) getLoopDelay() time.Duration {
cfg.RLock()
delay := cfg.loopDelay
cfg.RUnlock()
return delay
}
// Reset the loop delay.
func (cfg *leafNodeCfg) resetLoopDelay() {
cfg.Lock()
cfg.loopDelay = 0
cfg.Unlock()
}
// Ensure that non-exported options (used in tests) have
// been properly set.
func (s *Server) setLeafNodeNonExportedOptions() {
opts := s.getOpts()
s.leafNodeOpts.dialTimeout = opts.LeafNode.dialTimeout
if s.leafNodeOpts.dialTimeout == 0 {
// Use same timeouts as routes for now.
s.leafNodeOpts.dialTimeout = DEFAULT_ROUTE_DIAL
}
s.leafNodeOpts.resolver = opts.LeafNode.resolver
if s.leafNodeOpts.resolver == nil {
s.leafNodeOpts.resolver = net.DefaultResolver
}
}
func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) {
defer s.grWG.Done()
if remote == nil || len(remote.URLs) == 0 {
s.Debugf("Empty remote leafnode definition, nothing to connect")
return
}
opts := s.getOpts()
reconnectDelay := opts.LeafNode.ReconnectInterval
s.mu.Lock()
dialTimeout := s.leafNodeOpts.dialTimeout
resolver := s.leafNodeOpts.resolver
s.mu.Unlock()
if loopDelay := remote.getLoopDelay(); loopDelay > 0 {
select {
case <-time.After(loopDelay):
case <-s.quitCh:
return
}
remote.resetLoopDelay()
}
var conn net.Conn
const connErrFmt = "Error trying to connect as leafnode to remote server %q (attempt %v): %v"
attempts := 0
for s.isRunning() && s.remoteLeafNodeStillValid(remote) {
rURL := remote.pickNextURL()
url, err := s.getRandomIP(resolver, rURL.Host)
if err == nil {
var ipStr string
if url != rURL.Host {
ipStr = fmt.Sprintf(" (%s)", url)
}
s.Debugf("Trying to connect as leafnode to remote server on %q%s", rURL.Host, ipStr)
conn, err = net.DialTimeout("tcp", url, dialTimeout)
}
if err != nil {
attempts++
if s.shouldReportConnectErr(firstConnect, attempts) {
s.Errorf(connErrFmt, rURL.Host, attempts, err)
} else {
s.Debugf(connErrFmt, rURL.Host, attempts, err)
}
select {
case <-s.quitCh:
return
case <-time.After(reconnectDelay):
continue
}
}
if !s.remoteLeafNodeStillValid(remote) {
conn.Close()
return
}
// We have a connection here to a remote server.
// Go ahead and create our leaf node and return.
s.createLeafNode(conn, remote)
// We will put this in the normal log if first connect, does not force -DV mode to know
// that the connect worked.
if firstConnect {
s.Noticef("Connected leafnode to %q", rURL.Hostname())
}
return
}
}
// Save off the tlsName for when we use TLS and mix hostnames and IPs. IPs usually
// come from the server we connect to.
func (cfg *leafNodeCfg) saveTLSHostname(u *url.URL) {
isTLS := cfg.TLSConfig != nil || u.Scheme == "tls"
if isTLS && cfg.tlsName == "" && net.ParseIP(u.Hostname()) == nil {
cfg.tlsName = u.Hostname()
}
}
// Save off the username/password for when we connect using a bare URL
// that we get from the INFO protocol.
func (cfg *leafNodeCfg) saveUserPassword(u *url.URL) {
if cfg.username == _EMPTY_ && u.User != nil {
cfg.username = u.User.Username()
cfg.password, _ = u.User.Password()
}
}
// This is the leafnode's accept loop. This runs as a go-routine.
// The listen specification is resolved (if use of random port),
// then a listener is started. After that, this routine enters
// a loop (until the server is shutdown) accepting incoming
// leaf node connections from remote servers.
func (s *Server) leafNodeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
port := opts.LeafNode.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on leafnode port: %d - %v", opts.LeafNode.Port, e)
return
}
s.Noticef("Listening for leafnode connections on %s",
net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
s.mu.Lock()
tlsRequired := opts.LeafNode.TLSConfig != nil
tlsVerify := tlsRequired && opts.LeafNode.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
info := Info{
ID: s.info.ID,
Version: s.info.Version,
GitCommit: gitCommit,
GoVersion: runtime.Version(),
AuthRequired: true,
TLSRequired: tlsRequired,
TLSVerify: tlsVerify,
MaxPayload: s.info.MaxPayload, // TODO(dlc) - Allow override?
Proto: 1, // Fixed for now.
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.LeafNode.Port = l.Addr().(*net.TCPAddr).Port
}
s.leafNodeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setLeafNodeInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", s.opts.LeafNode.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Add our LeafNode URL to the list that we send to servers connecting
// to our LeafNode accept URL. This call also regenerates leafNodeInfoJSON.
s.addLeafNodeURL(s.leafNodeInfo.IP)
// Setup state that can enable shutdown
s.leafNodeListener = l
// As of now, a server that does not have remotes configured would
// never solicit a connection, so we should not have to warn if
// InsecureSkipVerify is set in main LeafNodes config (since
// this TLS setting matters only when soliciting a connection).
// Still, warn if insecure is set in any of LeafNode block.
// We need to check remotes, even if tls is not required on accept.
warn := tlsRequired && opts.LeafNode.TLSConfig.InsecureSkipVerify
if !warn {
for _, r := range opts.LeafNode.Remotes {
if r.TLSConfig != nil && r.TLSConfig.InsecureSkipVerify {
warn = true
break
}
}
}
if warn {
s.Warnf(leafnodeTLSInsecureWarning)
}
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
tmpDelay = s.acceptError("LeafNode", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createLeafNode(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Leafnode accept loop exiting..")
s.done <- true
}
// RegEx to match a creds file with user JWT and Seed.
var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`)
// Lock should be held entering here.
func (c *client) sendLeafConnect(tlsRequired bool) {
// We support basic user/pass and operator based user JWT with signatures.
cinfo := leafConnectInfo{
TLS: tlsRequired,
Name: c.srv.info.ID,
}
// Check for credentials first, that will take precedence..
if creds := c.leaf.remote.Credentials; creds != "" {
c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials)
contents, err := ioutil.ReadFile(creds)
if err != nil {
c.Errorf("%v", err)
return
}
defer wipeSlice(contents)
items := credsRe.FindAllSubmatch(contents, -1)
if len(items) < 2 {
c.Errorf("Credentials file malformed")
return
}
// First result should be the user JWT.
// We copy here so that the file containing the seed will be wiped appropriately.
raw := items[0][1]
tmp := make([]byte, len(raw))
copy(tmp, raw)
// Seed is second item.
kp, err := nkeys.FromSeed(items[1][1])
if err != nil {
c.Errorf("Credentials file has malformed seed")
return
}
// Wipe our key on exit.
defer kp.Wipe()
sigraw, _ := kp.Sign(c.nonce)
sig := base64.RawURLEncoding.EncodeToString(sigraw)
cinfo.JWT = string(tmp)
cinfo.Sig = sig
} else if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
cinfo.User = userInfo.Username()
cinfo.Pass, _ = userInfo.Password()
} else if c.leaf.remote.username != _EMPTY_ {
cinfo.User = c.leaf.remote.username
cinfo.Pass = c.leaf.remote.password
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection(ProtocolViolation)
return
}
// Although this call is made before the writeLoop is created,
// we don't really need to send in place. The protocol will be
// sent out by the writeLoop.
c.enqueueProto([]byte(fmt.Sprintf(ConProto, b)))
}
// Makes a deep copy of the LeafNode Info structure.
// The server lock is held on entry.
func (s *Server) copyLeafNodeInfo() *Info {
clone := s.leafNodeInfo
// Copy the array of urls.
if len(s.leafNodeInfo.LeafNodeURLs) > 0 {
clone.LeafNodeURLs = append([]string(nil), s.leafNodeInfo.LeafNodeURLs...)
}
return &clone
}
// Adds a LeafNode URL that we get when a route connects to the Info structure.
// Regenerates the JSON byte array so that it can be sent to LeafNode connections.
// Returns a boolean indicating if the URL was added or not.
// Server lock is held on entry
func (s *Server) addLeafNodeURL(urlStr string) bool {
// Make sure we already don't have it.
for _, url := range s.leafNodeInfo.LeafNodeURLs {
if url == urlStr {
return false
}
}
s.leafNodeInfo.LeafNodeURLs = append(s.leafNodeInfo.LeafNodeURLs, urlStr)
s.generateLeafNodeInfoJSON()
return true
}
// Removes a LeafNode URL of the route that is disconnecting from the Info structure.
// Regenerates the JSON byte array so that it can be sent to LeafNode connections.
// Returns a boolean indicating if the URL was removed or not.
// Server lock is held on entry.
func (s *Server) removeLeafNodeURL(urlStr string) bool {
// Don't need to do this if we are removing the route connection because
// we are shuting down...
if s.shutdown {
return false
}
removed := false
urls := s.leafNodeInfo.LeafNodeURLs
for i, url := range urls {
if url == urlStr {
// If not last, move last into the position we remove.
last := len(urls) - 1
if i != last {
urls[i] = urls[last]
}
s.leafNodeInfo.LeafNodeURLs = urls[0:last]
removed = true
break
}
}
if removed {
s.generateLeafNodeInfoJSON()
}
return removed
}
func (s *Server) generateLeafNodeInfoJSON() {
b, _ := json.Marshal(s.leafNodeInfo)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
s.leafNodeInfoJSON = bytes.Join(pcs, []byte(" "))
}
// Sends an async INFO protocol so that the connected servers can update
// their list of LeafNode urls.
func (s *Server) sendAsyncLeafNodeInfo() {
for _, c := range s.leafs {
c.mu.Lock()
c.enqueueProto(s.leafNodeInfoJSON)
c.mu.Unlock()
}
}
// Called when an inbound leafnode connection is accepted or we create one for a solicited leafnode.
func (s *Server) createLeafNode(conn net.Conn, remote *leafNodeCfg) *client {
// Snapshot server options.
opts := s.getOpts()
maxPay := int32(opts.MaxPayload)
maxSubs := int32(opts.MaxSubs)
// For system, maxSubs of 0 means unlimited, so re-adjust here.
if maxSubs == 0 {
maxSubs = -1
}
now := time.Now()
c := &client{srv: s, nc: conn, kind: LEAF, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now}
c.leaf = &leaf{smap: map[string]int32{}}
// Determines if we are soliciting the connection or not.
var solicited bool
c.mu.Lock()
c.initClient()
if remote != nil {
solicited = true
// Users can bind to any local account, if its empty
// we will assume the $G account.
if remote.LocalAccount == "" {
remote.LocalAccount = globalAccountName
}
c.leaf.remote = remote
c.mu.Unlock()
// TODO: Decide what should be the optimal behavior here.
// For now, if lookup fails, we will constantly try
// to recreate this LN connection.
acc, err := s.LookupAccount(remote.LocalAccount)
if err != nil {
c.Errorf("No local account %q for leafnode: %v", remote.LocalAccount, err)
c.closeConnection(MissingAccount)
return nil
}
c.mu.Lock()
c.acc = acc
} else {
c.flags.set(expectConnect)
}
c.mu.Unlock()
var nonce [nonceLen]byte
// Grab server variables
s.mu.Lock()
info := s.copyLeafNodeInfo()
if !solicited {
s.generateNonce(nonce[:])
}
s.mu.Unlock()
// Grab lock
c.mu.Lock()
if solicited {
// We need to wait here for the info, but not for too long.
c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE)
info, err := br.ReadString('\n')
if err != nil {
c.mu.Unlock()
if err == io.EOF {
c.closeConnection(ClientClosed)
} else {
c.closeConnection(ReadError)
}
return nil
}
c.nc.SetReadDeadline(time.Time{})
c.mu.Unlock()
// Error will be handled below, so ignore here.
c.parse([]byte(info))
c.mu.Lock()
if !c.flags.isSet(infoReceived) {
c.mu.Unlock()
c.Debugf("Did not get the remote leafnode's INFO, timed-out")
c.closeConnection(ReadError)
return nil
}
// Do TLS here as needed.
tlsRequired := remote.TLS || remote.TLSConfig != nil
if tlsRequired {
c.Debugf("Starting TLS leafnode client handshake")
// Specify the ServerName we are expecting.
var tlsConfig *tls.Config
if remote.TLSConfig != nil {
tlsConfig = remote.TLSConfig.Clone()
} else {
tlsConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
var host string
// If ServerName was given to us from the option, use that, always.
if tlsConfig.ServerName == "" {
url := remote.getCurrentURL()
host = url.Hostname()
// We need to check if this host is an IP. If so, we probably
// had this advertised to us and should use the configured host
// name for the TLS server name.
if remote.tlsName != "" && net.ParseIP(host) != nil {
host = remote.tlsName
}
tlsConfig.ServerName = host
}
c.nc = tls.Client(c.nc, tlsConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
var wait time.Duration
if remote.TLSTimeout == 0 {
wait = TLS_TIMEOUT
} else {
wait = secondsToDuration(remote.TLSTimeout)
}
time.AfterFunc(wait, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(wait))
// Force handshake
c.mu.Unlock()
if err = conn.Handshake(); err != nil {
if solicited {
// If we overrode and used the saved tlsName but that failed
// we will clear that here. This is for the case that another server
// does not have the same tlsName, maybe only IPs.
// https://github.com/nats-io/nats-server/issues/1256
if _, ok := err.(x509.HostnameError); ok {
remote.Lock()
if host == remote.tlsName {
remote.tlsName = ""
}
remote.Unlock()
}
}
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
}
c.sendLeafConnect(tlsRequired)
c.Debugf("Remote leafnode connect msg sent")
} else {
// Send our info to the other side.
// Remember the nonce we sent here for signatures, etc.
c.nonce = make([]byte, nonceLen)
copy(c.nonce, nonce[:])
info.Nonce = string(c.nonce)
info.CID = c.cid
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
// We have to send from this go routine because we may
// have to block for TLS handshake before we start our
// writeLoop go routine. The other side needs to receive
// this before it can initiate the TLS handshake..
c.sendProtoNow(bytes.Join(pcs, []byte(" ")))
// Check to see if we need to spin up TLS.
if info.TLSRequired {
c.Debugf("Starting TLS leafnode server handshake")
c.nc = tls.Server(c.nc, opts.LeafNode.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.LeafNode.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
}
// Leaf nodes will always require a CONNECT to let us know
// when we are properly bound to an account.
// The connection may have been closed
if !c.isClosed() {
c.setAuthTimer(secondsToDuration(opts.LeafNode.AuthTimeout))
}
}
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
// Set the Ping timer
s.setFirstPingTimer(c)
c.mu.Unlock()
c.Debugf("Leafnode connection created")
// Update server's accounting here if we solicited.
// Also send our local subs.
if solicited {
// Make sure we register with the account here.
c.registerWithAccount(c.acc)
s.addLeafNodeConnection(c)
s.initLeafNodeSmap(c)
c.sendAllLeafSubs()
}
return c
}
func (c *client) processLeafnodeInfo(info *Info) {
c.mu.Lock()
defer c.mu.Unlock()
if c.leaf == nil || c.isClosed() {
return
}
// Mark that the INFO protocol has been received.
// Note: For now, only the initial INFO has a nonce. We
// will probably do auto key rotation at some point.
if c.flags.setIfNotSet(infoReceived) {
// Capture a nonce here.
c.nonce = []byte(info.Nonce)
if info.TLSRequired && c.leaf.remote != nil {
c.leaf.remote.TLS = true
}
}
// For both initial INFO and async INFO protocols, Possibly
// update our list of remote leafnode URLs we can connect to.
if c.leaf.remote != nil && len(info.LeafNodeURLs) > 0 {
// Consider the incoming array as the most up-to-date
// representation of the remote cluster's list of URLs.
c.updateLeafNodeURLs(info)
}
}
// When getting a leaf node INFO protocol, use the provided
// array of urls to update the list of possible endpoints.
func (c *client) updateLeafNodeURLs(info *Info) {
cfg := c.leaf.remote
cfg.Lock()
defer cfg.Unlock()
cfg.urls = make([]*url.URL, 0, 1+len(info.LeafNodeURLs))
// Add the ones we receive in the protocol
for _, surl := range info.LeafNodeURLs {
url, err := url.Parse("nats-leaf://" + surl)
if err != nil {
c.Errorf("Error parsing url %q: %v", surl, err)
continue
}
// Do not add if it's the same as what we already have configured.
var dup bool
for _, u := range cfg.URLs {
// URLs that we receive never have user info, but the
// ones that were configured may have. Simply compare
// host and port to decide if they are equal or not.
if url.Host == u.Host && url.Port() == u.Port() {
dup = true
break
}
}
if !dup {
cfg.urls = append(cfg.urls, url)
cfg.saveTLSHostname(url)
}
}
// Add the configured one
cfg.urls = append(cfg.urls, cfg.URLs...)
}
// Similar to setInfoHostPortAndGenerateJSON, but for leafNodeInfo.
func (s *Server) setLeafNodeInfoHostPortAndIP() error {
opts := s.getOpts()
if opts.LeafNode.Advertise != _EMPTY_ {
advHost, advPort, err := parseHostPort(opts.LeafNode.Advertise, opts.LeafNode.Port)
if err != nil {
return err
}
s.leafNodeInfo.Host = advHost
s.leafNodeInfo.Port = advPort
} else {
s.leafNodeInfo.Host = opts.LeafNode.Host
s.leafNodeInfo.Port = opts.LeafNode.Port
// If the host is "0.0.0.0" or "::" we need to resolve to a public IP.
// This will return at most 1 IP.
hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(s.leafNodeInfo.Host, false)
if err != nil {
return err
}
if hostIsIPAny {
if len(ips) == 0 {
s.Errorf("Could not find any non-local IP for leafnode's listen specification %q",
s.leafNodeInfo.Host)
} else {
// Take the first from the list...
s.leafNodeInfo.Host = ips[0]
}
}
}
// Use just host:port for the IP
s.leafNodeInfo.IP = net.JoinHostPort(s.leafNodeInfo.Host, strconv.Itoa(s.leafNodeInfo.Port))
if opts.LeafNode.Advertise != _EMPTY_ {
s.Noticef("Advertise address for leafnode is set to %s", s.leafNodeInfo.IP)
}
return nil
}
func (s *Server) addLeafNodeConnection(c *client) {
c.mu.Lock()
cid := c.cid
c.mu.Unlock()
s.mu.Lock()
s.leafs[cid] = c
s.mu.Unlock()
}
func (s *Server) removeLeafNodeConnection(c *client) {
c.mu.Lock()
cid := c.cid
c.mu.Unlock()
s.mu.Lock()
delete(s.leafs, cid)
s.mu.Unlock()
}
type leafConnectInfo struct {
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Comp bool `json:"compression,omitempty"`
Name string `json:"name,omitempty"`
// Just used to detect wrong connection attempts.
Gateway string `json:"gateway,omitempty"`
}
// processLeafNodeConnect will process the inbound connect args.
// Once we are here we are bound to an account, so can send any interest that
// we would have to the other side.
func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) error {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provided "lang" in the CONNECT protocol while LEAFNODEs don't.
if lang != "" {
c.sendErrAndErr(ErrClientConnectedToLeafNodePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToLeafNodePort
}
// Unmarshal as a leaf node connect protocol
proto := &leafConnectInfo{}
if err := json.Unmarshal(arg, proto); err != nil {
return err
}
// Reject if this has Gateway which means that it would be from a gateway
// connection that incorrectly connects to the leafnode port.
if proto.Gateway != "" {
errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway)
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongGateway)
return ErrWrongGateway
}
// Leaf Nodes do not do echo or verbose or pedantic.
c.opts.Verbose = false
c.opts.Echo = false
c.opts.Pedantic = false
// Create and initialize the smap since we know our bound account now.
lm := s.initLeafNodeSmap(c)
// We are good to go, send over all the bound account subscriptions.
if lm <= 128 {
c.sendAllLeafSubs()
} else {
s.startGoRoutine(func() {
c.sendAllLeafSubs()
s.grWG.Done()
})
}
// Add in the leafnode here since we passed through auth at this point.
s.addLeafNodeConnection(c)
// Announce the account connect event for a leaf node.
// This will no-op as needed.
s.sendLeafNodeConnect(c.acc)
return nil
}
// Snapshot the current subscriptions from the sublist into our smap which
// we will keep updated from now on.
func (s *Server) initLeafNodeSmap(c *client) int {
acc := c.acc
if acc == nil {
c.Debugf("Leafnode does not have an account bound")
return 0
}
// Collect all account subs here.
_subs := [32]*subscription{}
subs := _subs[:0]
ims := []string{}
acc.mu.Lock()
accName := acc.Name
// If we are solicited we only send interest for local clients.
if c.isSolicitedLeafNode() {
acc.sl.localSubs(&subs)
} else {
acc.sl.All(&subs)
}
// Since leaf nodes only send on interest, if the bound
// account has import services we need to send those over.
for isubj := range acc.imports.services {
ims = append(ims, isubj)
}
// Create a unique subject that will be used for loop detection.
lds := acc.lds
if lds == _EMPTY_ {
lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next()
acc.lds = lds
}
acc.mu.Unlock()
// Now check for gateway interest. Leafnodes will put this into
// the proper mode to propagate, but they are not held in the account.
gwsa := [16]*client{}
gws := gwsa[:0]
s.getOutboundGatewayConnections(&gws)
for _, cgw := range gws {
cgw.mu.Lock()
gw := cgw.gw
cgw.mu.Unlock()
if gw != nil {
if ei, _ := gw.outsim.Load(accName); ei != nil {
if e := ei.(*outsie); e != nil && e.sl != nil {
e.sl.All(&subs)
}
}
}
}
applyGlobalRouting := s.gateway.enabled
if c.isSolicitedLeafNode() {
// Add a fake subscription for this solicited leafnode connection
// so that we can send back directly for mapped GW replies.
c.srv.gwLeafSubs.Insert(&subscription{client: c, subject: []byte(gwReplyPrefix + ">")})
}
// Now walk the results and add them to our smap
c.mu.Lock()
for _, sub := range subs {
// We ignore ourselves here.
if c != sub.client {
c.leaf.smap[keyFromSub(sub)]++
}
}
// FIXME(dlc) - We need to update appropriately on an account claims update.
for _, isubj := range ims {
c.leaf.smap[isubj]++
}
// If we have gateways enabled we need to make sure the other side sends us responses
// that have been augmented from the original subscription.
// TODO(dlc) - Should we lock this down more?
if applyGlobalRouting {
c.leaf.smap[oldGWReplyPrefix+"*.>"]++
c.leaf.smap[gwReplyPrefix+">"]++
}
// Detect loop by subscribing to a specific subject and checking
// if this is coming back to us.
if c.leaf.remote == nil {
c.leaf.smap[lds]++
}
lenMap := len(c.leaf.smap)
c.mu.Unlock()
return lenMap
}
// updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-.
func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) {
acc, err := s.LookupAccount(accName)
if acc == nil || err != nil {
s.Debugf("No or bad account for %q, failed to update interest from gateway", accName)
return
}
s.updateLeafNodes(acc, sub, delta)
}
// updateLeafNodes will make sure to update the smap for the subscription. Will
// also forward to all leaf nodes as needed.
func (s *Server) updateLeafNodes(acc *Account, sub *subscription, delta int32) {
if acc == nil || sub == nil {
return
}
_l := [32]*client{}
leafs := _l[:0]
// Grab all leaf nodes. Ignore a leafnode if sub's client is a leafnode and matches.
acc.mu.RLock()
for _, ln := range acc.lleafs {
if ln != sub.client {
leafs = append(leafs, ln)
}
}
acc.mu.RUnlock()
for _, ln := range leafs {
ln.updateSmap(sub, delta)
}
}
// This will make an update to our internal smap and determine if we should send out
// an interest update to the remote side.
func (c *client) updateSmap(sub *subscription, delta int32) {
key := keyFromSub(sub)
c.mu.Lock()
// If we are solicited make sure this is a local client or a non-solicited leaf node
skind := sub.client.kind
if c.isSolicitedLeafNode() && !(skind == CLIENT || (skind == LEAF && !sub.client.isSolicitedLeafNode())) {
c.mu.Unlock()
return
}
n := c.leaf.smap[key]
// We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0.
update := sub.queue != nil || n == 0 || n+delta <= 0
n += delta
if n > 0 {
c.leaf.smap[key] = n
} else {
delete(c.leaf.smap, key)
}
if update {
c.sendLeafNodeSubUpdate(key, n)
}
c.mu.Unlock()
}
// Send the subscription interest change to the other side.
// Lock should be held.
func (c *client) sendLeafNodeSubUpdate(key string, n int32) {
_b := [64]byte{}
b := bytes.NewBuffer(_b[:0])
c.writeLeafSub(b, key, n)
c.enqueueProto(b.Bytes())
}
// Helper function to build the key.
func keyFromSub(sub *subscription) string {
var _rkey [1024]byte
var key []byte
if sub.queue != nil {
// Just make the key subject spc group, e.g. 'foo bar'
key = _rkey[:0]
key = append(key, sub.subject...)
key = append(key, byte(' '))
key = append(key, sub.queue...)
} else {
key = sub.subject
}
return string(key)
}
// Send all subscriptions for this account that include local
// and possibly all other remote subscriptions.
func (c *client) sendAllLeafSubs() {
// Hold all at once for now.
var b bytes.Buffer
c.mu.Lock()
for key, n := range c.leaf.smap {
c.writeLeafSub(&b, key, n)
}
buf := b.Bytes()
if len(buf) > 0 {
c.queueOutbound(buf)
c.flushSignal()
}
c.mu.Unlock()
}
func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) {
if key == "" {
return
}
if n > 0 {
w.WriteString("LS+ " + key)
// Check for queue semantics, if found write n.
if strings.Contains(key, " ") {
w.WriteString(" ")
var b [12]byte
var i = len(b)
for l := n; l > 0; l /= 10 {
i--
b[i] = digits[l%10]
}
w.Write(b[i:])
if c.trace {
arg := fmt.Sprintf("%s %d", key, n)
c.traceOutOp("LS+", []byte(arg))
}
} else if c.trace {
c.traceOutOp("LS+", []byte(key))
}
} else {
w.WriteString("LS- " + key)
if c.trace {
c.traceOutOp("LS-", []byte(key))
}
}
w.WriteString(CR_LF)
}
// processLeafSub will process an inbound sub request for the remote leaf node.
func (c *client) processLeafSub(argo []byte) (err error) {
c.traceInOp("LS+", argo)
// Indicate activity.
c.in.subs++
srv := c.srv
if srv == nil {
return nil
}
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 1:
sub.queue = nil
case 3:
sub.queue = args[1]
sub.qw = int32(parseSize(args[2]))
default:
return fmt.Errorf("processLeafSub Parse Error: '%s'", arg)
}
sub.subject = args[0]
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return nil
}
acc := c.acc
// Check if we have a loop.
if string(sub.subject) == acc.getLds() {
c.mu.Unlock()
srv.reportLeafNodeLoop(c)
return nil
}
// Check permissions if applicable.
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
c.Debugf("Can not export %q, ignoring remote subscription request", sub.subject)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// Like Routes, we store local subs by account and subject and optionally queue name.
// If we have a queue it will have a trailing weight which we do not want.
if sub.queue != nil {
sub.sid = arg[:len(arg)-len(args[2])-1]
} else {
sub.sid = arg
}
key := string(sub.sid)
osub := c.subs[key]
updateGWs := false
if osub == nil {
c.subs[key] = sub
// Now place into the account sl.
if err = acc.sl.Insert(sub); err != nil {
delete(c.subs, key)
c.mu.Unlock()
c.Errorf("Could not insert subscription: %v", err)
c.sendErr("Invalid Subscription")
return nil
}
updateGWs = srv.gateway.enabled
} else if sub.queue != nil {
// For a queue we need to update the weight.
atomic.StoreInt32(&osub.qw, sub.qw)
acc.sl.UpdateRemoteQSub(osub)
}
solicited := c.isSolicitedLeafNode()
c.mu.Unlock()
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
// If we are not solicited, treat leaf node subscriptions similar to a
// client subscription, meaning we forward them to routes, gateways and
// other leaf nodes as needed.
if !solicited {
// If we are routing add to the route map for the associated account.
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates for other leaf nodes. We understand solicited
// and non-solicited state in this call so we will do the right thing.
srv.updateLeafNodes(acc, sub, 1)
return nil
}
func (s *Server) reportLeafNodeLoop(c *client) {
delay := leafNodeReconnectDelayAfterLoopDetected
opts := s.getOpts()
if opts.LeafNode.loopDelay != 0 {
delay = opts.LeafNode.loopDelay
}
c.mu.Lock()
if c.leaf.remote != nil {
c.leaf.remote.Lock()
c.leaf.remote.loopDelay = delay
c.leaf.remote.Unlock()
}
accName := c.acc.Name
c.mu.Unlock()
c.sendErrAndErr(fmt.Sprintf("Loop detected for leafnode account=%q. Delaying attempt to reconnect for %v",
accName, delay))
}
// processLeafUnsub will process an inbound unsub request for the remote leaf node.
func (c *client) processLeafUnsub(arg []byte) error {
c.traceInOp("LS-", arg)
// Indicate any activity, so pub and sub or unsubs.
c.in.subs++
acc := c.acc
srv := c.srv
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return nil
}
updateGWs := false
// We store local subs by account and subject and optionally queue name.
// LS- will have the arg exactly as the key.
sub, ok := c.subs[string(arg)]
c.mu.Unlock()
if ok {
c.unsubscribe(acc, sub, true, true)
updateGWs = srv.gateway.enabled
}
// If we are routing subtract from the route map for the associated account.
srv.updateRouteSubscriptionMap(acc, sub, -1)
// Gateways
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
// Now check on leafnode updates for other leaf nodes.
srv.updateLeafNodes(acc, sub, -1)
return nil
}
func (c *client) processLeafMsgArgs(trace bool, arg []byte) error {
if trace {
c.traceInOp("LMSG", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1:
return fmt.Errorf("processLeafMsgArgs Parse Error: '%s'", args)
case 2:
c.pa.reply = nil
c.pa.queues = nil
c.pa.szb = args[1]
c.pa.size = parseSize(args[1])
case 3:
c.pa.reply = args[1]
c.pa.queues = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
default:
// args[1] is our reply indicator. Should be + or | normally.
if len(args[1]) != 1 {
return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
switch args[1][0] {
case '+':
c.pa.reply = args[2]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
}
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[3 : len(args)-1]
} else {
c.pa.queues = args[2 : len(args)-1]
}
}
if c.pa.size < 0 {
return fmt.Errorf("processLeafMsgArgs Bad or Missing Size: '%s'", args)
}
// Common ones processed after check for arg length
c.pa.subject = args[0]
return nil
}
// processInboundLeafMsg is called to process an inbound msg from a leaf node.
func (c *client) processInboundLeafMsg(msg []byte) {
// Update statistics
c.in.msgs++
// The msg includes the CR_LF, so pull back out for accounting.
c.in.bytes += int32(len(msg) - LEN_CR_LF)
if c.trace {
c.traceMsg(msg)
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return
}
srv := c.srv
acc := c.acc
// Mostly under testing scenarios.
if srv == nil || acc == nil {
return
}
// Check to see if we need to map/route to another account.
if acc.imports.services != nil {
c.checkForImportServices(acc, msg)
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Collect queue names if needed.
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If we have queue subs in this cluster, then if we run in gateway
// mode and the remote gateways have queue subs, then we need to
// collect the queue groups this message was sent to so that we
// exclude them when sending to gateways.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
qnames = c.processMsgResults(acc, r, msg, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames)
}
}
| 1 | 9,892 | Should be an error... it is important for the admin of the server attempting to create the leafnode connection to see the error asap. | nats-io-nats-server | go |
@@ -313,6 +313,15 @@ def speakObjectProperties(obj,reason=controlTypes.REASON_QUERY,index=None,**allo
newPropertyValues['current']=obj.isCurrent
if allowedProperties.get('placeholder', False):
newPropertyValues['placeholder']=obj.placeholder
+ # When speaking an object due to a focus change, the 'selected' state should not be reported if only one item is selected.
+ # This is because that one item will be the focused object, and saying selected is redundant.
+ # Rather, 'unselected' will be spoken for an unselected object if 1 or more items are selected.
+
+ states=newPropertyValues.get('states')
+ if states is not None and reason==controlTypes.REASON_FOCUS:
+ if controlTypes.STATE_SELECTABLE in states and controlTypes.STATE_SELECTED in states and obj.selectionContainer and obj.selectionContainer.getSelectedItemsCount(2)==1:
+ states.discard(controlTypes.STATE_SELECTED)
+ states.discard(controlTypes.STATE_SELECTABLE)
#Get the speech text for the properties we want to speak, and then speak it
text=getSpeechTextForProperties(reason,**newPropertyValues)
if text: | 1 | # -*- coding: UTF-8 -*-
#speech.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Babbage B.V.
"""High-level functions to speak information.
"""
import itertools
import weakref
import unicodedata
import time
import colors
import globalVars
from logHandler import log
import api
import controlTypes
import config
import tones
import synthDriverHandler
from synthDriverHandler import *
import re
import textInfos
import queueHandler
import speechDictHandler
import characterProcessing
import languageHandler
speechMode_off=0
speechMode_beeps=1
speechMode_talk=2
#: How speech should be handled; one of speechMode_off, speechMode_beeps or speechMode_talk.
speechMode=speechMode_talk
speechMode_beeps_ms=15
beenCanceled=True
isPaused=False
curWordChars=[]
#Set containing locale codes for languages supporting conjunct characters
LANGS_WITH_CONJUNCT_CHARS = {'hi', 'as', 'bn', 'gu', 'kn', 'kok', 'ml', 'mni', 'mr', 'pa', 'te', 'ur', 'ta'}
#: The string used to separate distinct chunks of text when multiple chunks should be spoken without pauses.
# #555: Use two spaces so that numbers from adjacent chunks aren't treated as a single number
# for languages such as French and German which use space as a thousands separator.
CHUNK_SEPARATOR = " "
oldTreeLevel=None
oldTableID=None
oldRowNumber=None
oldRowSpan=None
oldColumnNumber=None
oldColumnSpan=None
def initialize():
"""Loads and sets the synth driver configured in nvda.ini."""
synthDriverHandler.initialize()
setSynth(config.conf["speech"]["synth"])
def terminate():
setSynth(None)
speechViewerObj=None
#: If a chunk of text contains only these characters, it will be considered blank.
BLANK_CHUNK_CHARS = frozenset((" ", "\n", "\r", "\0", u"\xa0"))
def isBlank(text):
"""Determine whether text should be reported as blank.
@param text: The text in question.
@type text: str
@return: C{True} if the text is blank, C{False} if not.
@rtype: bool
"""
return not text or set(text) <= BLANK_CHUNK_CHARS
RE_CONVERT_WHITESPACE = re.compile("[\0\r\n]")
def processText(locale,text,symbolLevel):
text = speechDictHandler.processText(text)
text = characterProcessing.processSpeechSymbols(locale, text, symbolLevel)
text = RE_CONVERT_WHITESPACE.sub(u" ", text)
return text.strip()
def getLastSpeechIndex():
"""Gets the last index passed by the synthesizer. Indexing is used so that its possible to find out when a certain peace of text has been spoken yet. Usually the character position of the text is passed to speak functions as the index.
@returns: the last index encountered
@rtype: int
"""
return getSynth().lastIndex
def cancelSpeech():
"""Interupts the synthesizer from currently speaking"""
global beenCanceled, isPaused, _speakSpellingGenerator
# Import only for this function to avoid circular import.
import sayAllHandler
sayAllHandler.stop()
speakWithoutPauses._pendingSpeechSequence=[]
speakWithoutPauses.lastSentIndex=None
if _speakSpellingGenerator:
_speakSpellingGenerator.close()
if beenCanceled:
return
elif speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
return
getSynth().cancel()
beenCanceled=True
isPaused=False
def pauseSpeech(switch):
global isPaused, beenCanceled
getSynth().pause(switch)
isPaused=switch
beenCanceled=False
def speakMessage(text,index=None):
"""Speaks a given message.
@param text: the message to speak
@type text: string
@param index: the index to mark this current text with, its best to use the character position of the text if you know it
@type index: int
"""
speakText(text,index=index,reason=controlTypes.REASON_MESSAGE)
def getCurrentLanguage():
synth=getSynth()
language=None
if synth:
try:
language=synth.language if config.conf['speech']['trustVoiceLanguage'] else None
except NotImplementedError:
pass
if language:
language=languageHandler.normalizeLanguage(language)
if not language:
language=languageHandler.getLanguage()
return language
def spellTextInfo(info,useCharacterDescriptions=False):
"""Spells the text from the given TextInfo, honouring any LangChangeCommand objects it finds if autoLanguageSwitching is enabled."""
if not config.conf['speech']['autoLanguageSwitching']:
speakSpelling(info.text,useCharacterDescriptions=useCharacterDescriptions)
return
curLanguage=None
for field in info.getTextWithFields({}):
if isinstance(field,basestring):
speakSpelling(field,curLanguage,useCharacterDescriptions=useCharacterDescriptions)
elif isinstance(field,textInfos.FieldCommand) and field.command=="formatChange":
curLanguage=field.field.get('language')
_speakSpellingGenerator=None
def speakSpelling(text,locale=None,useCharacterDescriptions=False):
global beenCanceled, _speakSpellingGenerator
import speechViewer
if speechViewer.isActive:
speechViewer.appendText(text)
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
defaultLanguage=getCurrentLanguage()
if not locale or (not config.conf['speech']['autoDialectSwitching'] and locale.split('_')[0]==defaultLanguage.split('_')[0]):
locale=defaultLanguage
if not text:
# Translators: This is spoken when NVDA moves to an empty line.
return getSynth().speak((_("blank"),))
if not text.isspace():
text=text.rstrip()
if _speakSpellingGenerator and _speakSpellingGenerator.gi_frame:
_speakSpellingGenerator.send((text,locale,useCharacterDescriptions))
else:
_speakSpellingGenerator=_speakSpellingGen(text,locale,useCharacterDescriptions)
try:
# Speak the first character before this function returns.
next(_speakSpellingGenerator)
except StopIteration:
return
queueHandler.registerGeneratorObject(_speakSpellingGenerator)
def getCharDescListFromText(text,locale):
"""This method prepares a list, which contains character and its description for all characters the text is made up of, by checking the presence of character descriptions in characterDescriptions.dic of that locale for all possible combination of consecutive characters in the text.
This is done to take care of conjunct characters present in several languages such as Hindi, Urdu, etc.
"""
charDescList = []
charDesc=None
i = len(text)
while i:
subText = text[:i]
charDesc = characterProcessing.getCharacterDescription(locale,subText)
if charDesc or i==1:
if not charDesc:
# #5375: We're down to a single character (i == 1) and we don't have a description.
# Try converting to lower case.
# This provides for upper case English characters (which only have lower case descriptions).
charDesc = characterProcessing.getCharacterDescription(locale,subText.lower())
charDescList.append((subText,charDesc))
text = text[i:]
i = len(text)
else:
i = i - 1
return charDescList
def _speakSpellingGen(text,locale,useCharacterDescriptions):
synth=getSynth()
synthConfig=config.conf["speech"][synth.name]
buf=[(text,locale,useCharacterDescriptions)]
for text,locale,useCharacterDescriptions in buf:
textLength=len(text)
count = 0
localeHasConjuncts = True if locale.split('_',1)[0] in LANGS_WITH_CONJUNCT_CHARS else False
charDescList = getCharDescListFromText(text,locale) if localeHasConjuncts else text
for item in charDescList:
if localeHasConjuncts:
# item is a tuple containing character and its description
char = item[0]
charDesc = item[1]
else:
# item is just a character.
char = item
if useCharacterDescriptions:
charDesc=characterProcessing.getCharacterDescription(locale,char.lower())
uppercase=char.isupper()
if useCharacterDescriptions and charDesc:
#Consider changing to multiple synth speech calls
char=charDesc[0] if textLength>1 else u"\u3001".join(charDesc)
else:
char=characterProcessing.processSpeechSymbol(locale,char)
if uppercase and synthConfig["sayCapForCapitals"]:
# Translators: cap will be spoken before the given letter when it is capitalized.
char=_("cap %s")%char
if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]:
oldPitch=synthConfig["pitch"]
synth.pitch=max(0,min(oldPitch+synthConfig["capPitchChange"],100))
count = len(char)
index=count+1
log.io("Speaking character %r"%char)
speechSequence=[LangChangeCommand(locale)] if config.conf['speech']['autoLanguageSwitching'] else []
if len(char) == 1 and synthConfig["useSpellingFunctionality"]:
speechSequence.append(CharacterModeCommand(True))
if index is not None:
speechSequence.append(IndexCommand(index))
speechSequence.append(char)
synth.speak(speechSequence)
if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]:
synth.pitch=oldPitch
while textLength>1 and (isPaused or getLastSpeechIndex()!=index):
for x in xrange(2):
args=yield
if args: buf.append(args)
if uppercase and synthConfig["beepForCapitals"]:
tones.beep(2000,50)
args=yield
if args: buf.append(args)
def speakObjectProperties(obj,reason=controlTypes.REASON_QUERY,index=None,**allowedProperties):
#Fetch the values for all wanted properties
newPropertyValues={}
positionInfo=None
for name,value in allowedProperties.iteritems():
if name=="includeTableCellCoords":
# This is verbosity info.
newPropertyValues[name]=value
elif name.startswith('positionInfo_') and value:
if positionInfo is None:
positionInfo=obj.positionInfo
elif value:
try:
newPropertyValues[name]=getattr(obj,name)
except NotImplementedError:
pass
if positionInfo:
if allowedProperties.get('positionInfo_level',False) and 'level' in positionInfo:
newPropertyValues['positionInfo_level']=positionInfo['level']
if allowedProperties.get('positionInfo_indexInGroup',False) and 'indexInGroup' in positionInfo:
newPropertyValues['positionInfo_indexInGroup']=positionInfo['indexInGroup']
if allowedProperties.get('positionInfo_similarItemsInGroup',False) and 'similarItemsInGroup' in positionInfo:
newPropertyValues['positionInfo_similarItemsInGroup']=positionInfo['similarItemsInGroup']
#Fetched the cached properties and update them with the new ones
oldCachedPropertyValues=getattr(obj,'_speakObjectPropertiesCache',{}).copy()
cachedPropertyValues=oldCachedPropertyValues.copy()
cachedPropertyValues.update(newPropertyValues)
obj._speakObjectPropertiesCache=cachedPropertyValues
#If we should only cache we can stop here
if reason==controlTypes.REASON_ONLYCACHE:
return
#If only speaking change, then filter out all values that havn't changed
if reason==controlTypes.REASON_CHANGE:
for name in set(newPropertyValues)&set(oldCachedPropertyValues):
if newPropertyValues[name]==oldCachedPropertyValues[name]:
del newPropertyValues[name]
elif name=="states": #states need specific handling
oldStates=oldCachedPropertyValues[name]
newStates=newPropertyValues[name]
newPropertyValues['states']=newStates-oldStates
newPropertyValues['negativeStates']=oldStates-newStates
#properties such as states need to know the role to speak properly, give it as a _ name
newPropertyValues['_role']=newPropertyValues.get('role',obj.role)
# The real states are needed also, as the states entry might be filtered.
newPropertyValues['_states']=obj.states
if "rowNumber" in newPropertyValues or "columnNumber" in newPropertyValues:
# We're reporting table cell info, so pass the table ID.
try:
newPropertyValues["_tableID"]=obj.tableID
except NotImplementedError:
pass
newPropertyValues['current']=obj.isCurrent
if allowedProperties.get('placeholder', False):
newPropertyValues['placeholder']=obj.placeholder
#Get the speech text for the properties we want to speak, and then speak it
text=getSpeechTextForProperties(reason,**newPropertyValues)
if text:
speakText(text,index=index)
def _speakPlaceholderIfEmpty(info, obj, reason):
""" attempt to speak placeholder attribute if the textInfo 'info' is empty
@return: True if info was considered empty, and we attempted to speak the placeholder value.
False if info was not considered empty.
"""
textEmpty = obj._isTextEmpty
if textEmpty:
speakObjectProperties(obj,reason=reason,placeholder=True)
return True
return False
def speakObject(obj,reason=controlTypes.REASON_QUERY,index=None):
from NVDAObjects import NVDAObjectTextInfo
role=obj.role
# Choose when we should report the content of this object's textInfo, rather than just the object's value
import browseMode
shouldReportTextContent=not (
# focusEntered should never present text content
(reason==controlTypes.REASON_FOCUSENTERED) or
# The rootNVDAObject of a browseMode document in browse mode (not passThrough) should never present text content
(isinstance(obj.treeInterceptor,browseMode.BrowseModeDocumentTreeInterceptor) and not obj.treeInterceptor.passThrough and obj==obj.treeInterceptor.rootNVDAObject) or
# objects that do not report as having navigableText should not report their text content either
not obj._hasNavigableText
)
allowProperties={'name':True,'role':True,'roleText':True,'states':True,'value':True,'description':True,'keyboardShortcut':True,'positionInfo_level':True,'positionInfo_indexInGroup':True,'positionInfo_similarItemsInGroup':True,"cellCoordsText":True,"rowNumber":True,"columnNumber":True,"includeTableCellCoords":True,"columnCount":True,"rowCount":True,"rowHeaderText":True,"columnHeaderText":True,"rowSpan":True,"columnSpan":True}
if reason==controlTypes.REASON_FOCUSENTERED:
allowProperties["value"]=False
allowProperties["keyboardShortcut"]=False
allowProperties["positionInfo_level"]=False
# Aside from excluding some properties, focus entered should be spoken like focus.
reason=controlTypes.REASON_FOCUS
if not config.conf["presentation"]["reportObjectDescriptions"]:
allowProperties["description"]=False
if not config.conf["presentation"]["reportKeyboardShortcuts"]:
allowProperties["keyboardShortcut"]=False
if not config.conf["presentation"]["reportObjectPositionInformation"]:
allowProperties["positionInfo_level"]=False
allowProperties["positionInfo_indexInGroup"]=False
allowProperties["positionInfo_similarItemsInGroup"]=False
if reason!=controlTypes.REASON_QUERY:
allowProperties["rowCount"]=False
allowProperties["columnCount"]=False
formatConf=config.conf["documentFormatting"]
if not formatConf["reportTableCellCoords"]:
allowProperties["cellCoordsText"]=False
# rowNumber and columnNumber might be needed even if we're not reporting coordinates.
allowProperties["includeTableCellCoords"]=False
if not formatConf["reportTableHeaders"]:
allowProperties["rowHeaderText"]=False
allowProperties["columnHeaderText"]=False
if (not formatConf["reportTables"]
or (not formatConf["reportTableCellCoords"] and not formatConf["reportTableHeaders"])):
# We definitely aren't reporting any table info at all.
allowProperties["rowNumber"]=False
allowProperties["columnNumber"]=False
allowProperties["rowSpan"]=False
allowProperties["columnSpan"]=False
if shouldReportTextContent:
allowProperties['value']=False
speakObjectProperties(obj,reason=reason,index=index,**allowProperties)
if reason==controlTypes.REASON_ONLYCACHE:
return
if shouldReportTextContent:
try:
info=obj.makeTextInfo(textInfos.POSITION_SELECTION)
if not info.isCollapsed:
# if there is selected text, then there is a value and we do not report placeholder
# Translators: This is spoken to indicate what has been selected. for example 'selected hello world'
speakSelectionMessage(_("selected %s"),info.text)
else:
info.expand(textInfos.UNIT_LINE)
_speakPlaceholderIfEmpty(info, obj, reason)
speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET)
except:
newInfo=obj.makeTextInfo(textInfos.POSITION_ALL)
if not _speakPlaceholderIfEmpty(newInfo, obj, reason):
speakTextInfo(newInfo,unit=textInfos.UNIT_PARAGRAPH,reason=controlTypes.REASON_CARET)
elif role==controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.speechProvider:
try:
speak(mathPres.speechProvider.getSpeechForMathMl(obj.mathMl))
except (NotImplementedError, LookupError):
pass
def speakText(text,index=None,reason=controlTypes.REASON_MESSAGE,symbolLevel=None):
"""Speaks some text.
@param text: The text to speak.
@type text: str
@param index: The index to mark this text with, which can be used later to determine whether this piece of text has been spoken.
@type index: int
@param reason: The reason for this speech; one of the controlTypes.REASON_* constants.
@param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration.
"""
speechSequence=[]
if index is not None:
speechSequence.append(IndexCommand(index))
if text is not None:
if isBlank(text):
# Translators: This is spoken when the line is considered blank.
text=_("blank")
speechSequence.append(text)
speak(speechSequence,symbolLevel=symbolLevel)
RE_INDENTATION_SPLIT = re.compile(r"^([^\S\r\n\f\v]*)(.*)$", re.UNICODE | re.DOTALL)
def splitTextIndentation(text):
"""Splits indentation from the rest of the text.
@param text: The text to split.
@type text: basestring
@return: Tuple of indentation and content.
@rtype: (basestring, basestring)
"""
return RE_INDENTATION_SPLIT.match(text).groups()
RE_INDENTATION_CONVERT = re.compile(r"(?P<char>\s)(?P=char)*", re.UNICODE)
IDT_BASE_FREQUENCY = 220 #One octave below middle A.
IDT_TONE_DURATION = 80 #Milleseconds
IDT_MAX_SPACES = 72
def getIndentationSpeech(indentation, formatConfig):
"""Retrieves the phrase to be spoken for a given string of indentation.
@param indentation: The string of indentation.
@type indentation: unicode
@param formatConfig: The configuration to use.
@type formatConfig: dict
@return: The phrase to be spoken.
@rtype: unicode
"""
speechIndentConfig = formatConfig["reportLineIndentation"]
toneIndentConfig = formatConfig["reportLineIndentationWithTones"] and speechMode == speechMode_talk
if not indentation:
if toneIndentConfig:
tones.beep(IDT_BASE_FREQUENCY, IDT_TONE_DURATION)
# Translators: This is spoken when the given line has no indentation.
return (_("no indent") if speechIndentConfig else "")
#The non-breaking space is semantically a space, so we replace it here.
indentation = indentation.replace(u"\xa0", u" ")
res = []
locale=languageHandler.getLanguage()
quarterTones = 0
for m in RE_INDENTATION_CONVERT.finditer(indentation):
raw = m.group()
symbol = characterProcessing.processSpeechSymbol(locale, raw[0])
count = len(raw)
if symbol == raw[0]:
# There is no replacement for this character, so do nothing.
res.append(raw)
elif count == 1:
res.append(symbol)
else:
res.append(u"{count} {symbol}".format(count=count, symbol=symbol))
quarterTones += (count*4 if raw[0]== "\t" else count)
speak = speechIndentConfig
if toneIndentConfig:
if quarterTones <= IDT_MAX_SPACES:
#Remove me during speech refactor.
pitch = IDT_BASE_FREQUENCY*2**(quarterTones/24.0) #24 quarter tones per octave.
tones.beep(pitch, IDT_TONE_DURATION)
else:
#we have more than 72 spaces (18 tabs), and must speak it since we don't want to hurt the users ears.
speak = True
return (" ".join(res) if speak else "")
def speak(speechSequence,symbolLevel=None):
"""Speaks a sequence of text and speech commands
@param speechSequence: the sequence of text and L{SpeechCommand} objects to speak
@param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration.
"""
if not speechSequence: #Pointless - nothing to speak
return
import speechViewer
if speechViewer.isActive:
for item in speechSequence:
if isinstance(item, basestring):
speechViewer.appendText(item)
global beenCanceled, curWordChars
curWordChars=[]
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
#Filter out redundant LangChangeCommand objects
#And also fill in default values
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
autoDialectSwitching=config.conf['speech']['autoDialectSwitching']
curLanguage=defaultLanguage=getCurrentLanguage()
prevLanguage=None
defaultLanguageRoot=defaultLanguage.split('_')[0]
oldSpeechSequence=speechSequence
speechSequence=[]
for item in oldSpeechSequence:
if isinstance(item,LangChangeCommand):
if not autoLanguageSwitching: continue
curLanguage=item.lang
if not curLanguage or (not autoDialectSwitching and curLanguage.split('_')[0]==defaultLanguageRoot):
curLanguage=defaultLanguage
elif isinstance(item,basestring):
if not item: continue
if autoLanguageSwitching and curLanguage!=prevLanguage:
speechSequence.append(LangChangeCommand(curLanguage))
prevLanguage=curLanguage
speechSequence.append(item)
else:
speechSequence.append(item)
if not speechSequence:
# After normalisation, the sequence is empty.
# There's nothing to speak.
return
log.io("Speaking %r" % speechSequence)
if symbolLevel is None:
symbolLevel=config.conf["speech"]["symbolLevel"]
curLanguage=defaultLanguage
inCharacterMode=False
for index in xrange(len(speechSequence)):
item=speechSequence[index]
if isinstance(item,CharacterModeCommand):
inCharacterMode=item.state
if autoLanguageSwitching and isinstance(item,LangChangeCommand):
curLanguage=item.lang
if isinstance(item,basestring):
speechSequence[index]=processText(curLanguage,item,symbolLevel)
if not inCharacterMode:
speechSequence[index]+=CHUNK_SEPARATOR
getSynth().speak(speechSequence)
def speakSelectionMessage(message,text):
if len(text) < 512:
speakMessage(message % text)
else:
# Translators: This is spoken when the user has selected a large portion of text. Example output "1000 characters"
speakMessage(message % _("%d characters") % len(text))
def speakSelectionChange(oldInfo,newInfo,speakSelected=True,speakUnselected=True,generalize=False):
"""Speaks a change in selection, either selected or unselected text.
@param oldInfo: a TextInfo instance representing what the selection was before
@type oldInfo: L{textInfos.TextInfo}
@param newInfo: a TextInfo instance representing what the selection is now
@type newInfo: L{textInfos.TextInfo}
@param generalize: if True, then this function knows that the text may have changed between the creation of the oldInfo and newInfo objects, meaning that changes need to be spoken more generally, rather than speaking the specific text, as the bounds may be all wrong.
@type generalize: boolean
"""
selectedTextList=[]
unselectedTextList=[]
if newInfo.isCollapsed and oldInfo.isCollapsed:
return
startToStart=newInfo.compareEndPoints(oldInfo,"startToStart")
startToEnd=newInfo.compareEndPoints(oldInfo,"startToEnd")
endToStart=newInfo.compareEndPoints(oldInfo,"endToStart")
endToEnd=newInfo.compareEndPoints(oldInfo,"endToEnd")
if speakSelected and oldInfo.isCollapsed:
selectedTextList.append(newInfo.text)
elif speakUnselected and newInfo.isCollapsed:
unselectedTextList.append(oldInfo.text)
else:
if startToEnd>0 or endToStart<0:
if speakSelected and not newInfo.isCollapsed:
selectedTextList.append(newInfo.text)
if speakUnselected and not oldInfo.isCollapsed:
unselectedTextList.append(oldInfo.text)
else:
if speakSelected and startToStart<0 and not newInfo.isCollapsed:
tempInfo=newInfo.copy()
tempInfo.setEndPoint(oldInfo,"endToStart")
selectedTextList.append(tempInfo.text)
if speakSelected and endToEnd>0 and not newInfo.isCollapsed:
tempInfo=newInfo.copy()
tempInfo.setEndPoint(oldInfo,"startToEnd")
selectedTextList.append(tempInfo.text)
if startToStart>0 and not oldInfo.isCollapsed:
tempInfo=oldInfo.copy()
tempInfo.setEndPoint(newInfo,"endToStart")
unselectedTextList.append(tempInfo.text)
if endToEnd<0 and not oldInfo.isCollapsed:
tempInfo=oldInfo.copy()
tempInfo.setEndPoint(newInfo,"startToEnd")
unselectedTextList.append(tempInfo.text)
locale=getCurrentLanguage()
if speakSelected:
if not generalize:
for text in selectedTextList:
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken while the user is in the process of selecting something, For example: "hello selected"
speakSelectionMessage(_("%s selected"),text)
elif len(selectedTextList)>0:
text=newInfo.text
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate what has been selected. for example 'selected hello world'
speakSelectionMessage(_("selected %s"),text)
if speakUnselected:
if not generalize:
for text in unselectedTextList:
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate what has been unselected. for example 'hello unselected'
speakSelectionMessage(_("%s unselected"),text)
elif len(unselectedTextList)>0:
if not newInfo.isCollapsed:
text=newInfo.text
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate when the previous selection was removed and a new selection was made. for example 'hello world selected instead'
speakSelectionMessage(_("%s selected instead"),text)
else:
# Translators: Reported when selection is removed.
speakMessage(_("selection removed"))
#: The number of typed characters for which to suppress speech.
_suppressSpeakTypedCharactersNumber = 0
#: The time at which suppressed typed characters were sent.
_suppressSpeakTypedCharactersTime = None
def _suppressSpeakTypedCharacters(number):
"""Suppress speaking of typed characters.
This should be used when sending a string of characters to the system
and those characters should not be spoken individually as if the user were typing them.
@param number: The number of characters to suppress.
@type number: int
"""
global _suppressSpeakTypedCharactersNumber, _suppressSpeakTypedCharactersTime
_suppressSpeakTypedCharactersNumber += number
_suppressSpeakTypedCharactersTime = time.time()
#: The character to use when masking characters in protected fields.
PROTECTED_CHAR = "*"
#: The first character which is not a Unicode control character.
#: This is used to test whether a character should be spoken as a typed character;
#: i.e. it should have a visual or spatial representation.
FIRST_NONCONTROL_CHAR = u" "
def speakTypedCharacters(ch):
global curWordChars
typingIsProtected=api.isTypingProtected()
if typingIsProtected:
realChar=PROTECTED_CHAR
else:
realChar=ch
if unicodedata.category(ch)[0] in "LMN":
curWordChars.append(realChar)
elif ch=="\b":
# Backspace, so remove the last character from our buffer.
del curWordChars[-1:]
elif ch==u'\u007f':
# delete character produced in some apps with control+backspace
return
elif len(curWordChars)>0:
typedWord="".join(curWordChars)
curWordChars=[]
if log.isEnabledFor(log.IO):
log.io("typed word: %s"%typedWord)
if config.conf["keyboard"]["speakTypedWords"] and not typingIsProtected:
speakText(typedWord)
global _suppressSpeakTypedCharactersNumber, _suppressSpeakTypedCharactersTime
if _suppressSpeakTypedCharactersNumber > 0:
# We primarily suppress based on character count and still have characters to suppress.
# However, we time out after a short while just in case.
suppress = time.time() - _suppressSpeakTypedCharactersTime <= 0.1
if suppress:
_suppressSpeakTypedCharactersNumber -= 1
else:
_suppressSpeakTypedCharactersNumber = 0
_suppressSpeakTypedCharactersTime = None
else:
suppress = False
if not suppress and config.conf["keyboard"]["speakTypedCharacters"] and ch >= FIRST_NONCONTROL_CHAR:
speakSpelling(realChar)
class SpeakTextInfoState(object):
"""Caches the state of speakTextInfo such as the current controlField stack, current formatfield and indentation."""
__slots__=[
'objRef',
'controlFieldStackCache',
'formatFieldAttributesCache',
'indentationCache',
]
def __init__(self,obj):
if isinstance(obj,SpeakTextInfoState):
oldState=obj
self.objRef=oldState.objRef
else:
self.objRef=weakref.ref(obj)
oldState=getattr(obj,'_speakTextInfoState',None)
self.controlFieldStackCache=list(oldState.controlFieldStackCache) if oldState else []
self.formatFieldAttributesCache=oldState.formatFieldAttributesCache if oldState else {}
self.indentationCache=oldState.indentationCache if oldState else ""
def updateObj(self):
obj=self.objRef()
if obj:
obj._speakTextInfoState=self.copy()
def copy(self):
return self.__class__(self)
def _speakTextInfo_addMath(speechSequence, info, field):
import mathPres
mathPres.ensureInit()
if not mathPres.speechProvider:
return
try:
speechSequence.extend(mathPres.speechProvider.getSpeechForMathMl(info.getMathMl(field)))
except (NotImplementedError, LookupError):
return
def speakTextInfo(info,useCache=True,formatConfig=None,unit=None,reason=controlTypes.REASON_QUERY,index=None,onlyInitialFields=False,suppressBlanks=False):
onlyCache=reason==controlTypes.REASON_ONLYCACHE
if isinstance(useCache,SpeakTextInfoState):
speakTextInfoState=useCache
elif useCache:
speakTextInfoState=SpeakTextInfoState(info.obj)
else:
speakTextInfoState=None
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
extraDetail=unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD)
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
if extraDetail:
formatConfig=formatConfig.copy()
formatConfig['extraDetail']=True
reportIndentation=unit==textInfos.UNIT_LINE and ( formatConfig["reportLineIndentation"] or formatConfig["reportLineIndentationWithTones"])
speechSequence=[]
#Fetch the last controlFieldStack, or make a blank one
controlFieldStackCache=speakTextInfoState.controlFieldStackCache if speakTextInfoState else []
formatFieldAttributesCache=speakTextInfoState.formatFieldAttributesCache if speakTextInfoState else {}
textWithFields=info.getTextWithFields(formatConfig)
# We don't care about node bounds, especially when comparing fields.
# Remove them.
for command in textWithFields:
if not isinstance(command,textInfos.FieldCommand):
continue
field=command.field
if not field:
continue
try:
del field["_startOfNode"]
except KeyError:
pass
try:
del field["_endOfNode"]
except KeyError:
pass
#Make a new controlFieldStack and formatField from the textInfo's initialFields
newControlFieldStack=[]
newFormatField=textInfos.FormatField()
initialFields=[]
for field in textWithFields:
if isinstance(field,textInfos.FieldCommand) and field.command in ("controlStart","formatChange"):
initialFields.append(field.field)
else:
break
if len(initialFields)>0:
del textWithFields[0:len(initialFields)]
endFieldCount=0
for field in reversed(textWithFields):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlEnd":
endFieldCount+=1
else:
break
if endFieldCount>0:
del textWithFields[0-endFieldCount:]
for field in initialFields:
if isinstance(field,textInfos.ControlField):
newControlFieldStack.append(field)
elif isinstance(field,textInfos.FormatField):
newFormatField.update(field)
else:
raise ValueError("unknown field: %s"%field)
#Calculate how many fields in the old and new controlFieldStacks are the same
commonFieldCount=0
for count in xrange(min(len(newControlFieldStack),len(controlFieldStackCache))):
# #2199: When comparing controlFields try using uniqueID if it exists before resorting to compairing the entire dictionary
oldUniqueID=controlFieldStackCache[count].get('uniqueID')
newUniqueID=newControlFieldStack[count].get('uniqueID')
if ((oldUniqueID is not None or newUniqueID is not None) and newUniqueID==oldUniqueID) or (newControlFieldStack[count]==controlFieldStackCache[count]):
commonFieldCount+=1
else:
break
# #2591: Only if the reason is not focus, Speak the exit of any controlFields not in the new stack.
# We don't do this for focus because hearing "out of list", etc. isn't useful when tabbing or using quick navigation and makes navigation less efficient.
if reason!=controlTypes.REASON_FOCUS:
endingBlock=False
for count in reversed(xrange(commonFieldCount,len(controlFieldStackCache))):
text=info.getControlFieldSpeech(controlFieldStackCache[count],controlFieldStackCache[0:count],"end_removedFromControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
if not endingBlock and reason==controlTypes.REASON_SAYALL:
endingBlock=bool(int(controlFieldStackCache[count].get('isBlock',0)))
if endingBlock:
speechSequence.append(SpeakWithoutPausesBreakCommand())
# The TextInfo should be considered blank if we are only exiting fields (i.e. we aren't entering any new fields and there is no text).
isTextBlank=True
# Even when there's no speakable text, we still need to notify the synth of the index.
if index is not None:
speechSequence.append(IndexCommand(index))
#Get speech text for any fields that are in both controlFieldStacks, if extra detail is not requested
if not extraDetail:
for count in xrange(commonFieldCount):
field=newControlFieldStack[count]
text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_inControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
if field.get("role")==controlTypes.ROLE_MATH:
isTextBlank=False
_speakTextInfo_addMath(speechSequence,info,field)
#Get speech text for any fields in the new controlFieldStack that are not in the old controlFieldStack
for count in xrange(commonFieldCount,len(newControlFieldStack)):
field=newControlFieldStack[count]
text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_addedToControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
if field.get("role")==controlTypes.ROLE_MATH:
isTextBlank=False
_speakTextInfo_addMath(speechSequence,info,field)
commonFieldCount+=1
#Fetch the text for format field attributes that have changed between what was previously cached, and this textInfo's initialFormatField.
text=info.getFormatFieldSpeech(newFormatField,formatFieldAttributesCache,formatConfig,reason=reason,unit=unit,extraDetail=extraDetail,initialFormat=True)
if text:
speechSequence.append(text)
if autoLanguageSwitching:
language=newFormatField.get('language')
speechSequence.append(LangChangeCommand(language))
lastLanguage=language
if onlyInitialFields or (unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD) and len(textWithFields)>0 and len(textWithFields[0])==1 and all((isinstance(x,textInfos.FieldCommand) and x.command=="controlEnd") for x in itertools.islice(textWithFields,1,None) )):
if not onlyCache:
if onlyInitialFields or any(isinstance(x,basestring) for x in speechSequence):
speak(speechSequence)
if not onlyInitialFields:
speakSpelling(textWithFields[0],locale=language if autoLanguageSwitching else None)
if useCache:
speakTextInfoState.controlFieldStackCache=newControlFieldStack
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
return
#Move through the field commands, getting speech text for all controlStarts, controlEnds and formatChange commands
#But also keep newControlFieldStack up to date as we will need it for the ends
# Add any text to a separate list, as it must be handled differently.
#Also make sure that LangChangeCommand objects are added before any controlField or formatField speech
relativeSpeechSequence=[]
inTextChunk=False
allIndentation=""
indentationDone=False
for command in textWithFields:
if isinstance(command,basestring):
if reportIndentation and not indentationDone:
indentation,command=splitTextIndentation(command)
# Combine all indentation into one string for later processing.
allIndentation+=indentation
if command:
# There was content after the indentation, so there is no more indentation.
indentationDone=True
if command:
if inTextChunk:
relativeSpeechSequence[-1]+=command
else:
relativeSpeechSequence.append(command)
inTextChunk=True
elif isinstance(command,textInfos.FieldCommand):
newLanguage=None
if command.command=="controlStart":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldText=info.getControlFieldSpeech(command.field,newControlFieldStack,"start_relative",formatConfig,extraDetail,reason=reason)
newControlFieldStack.append(command.field)
elif command.command=="controlEnd":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldText=info.getControlFieldSpeech(newControlFieldStack[-1],newControlFieldStack[0:-1],"end_relative",formatConfig,extraDetail,reason=reason)
del newControlFieldStack[-1]
if commonFieldCount>len(newControlFieldStack):
commonFieldCount=len(newControlFieldStack)
elif command.command=="formatChange":
fieldText=info.getFormatFieldSpeech(command.field,formatFieldAttributesCache,formatConfig,reason=reason,unit=unit,extraDetail=extraDetail)
if fieldText:
inTextChunk=False
if autoLanguageSwitching:
newLanguage=command.field.get('language')
if lastLanguage!=newLanguage:
# The language has changed, so this starts a new text chunk.
inTextChunk=False
if not inTextChunk:
if fieldText:
if autoLanguageSwitching and lastLanguage is not None:
# Fields must be spoken in the default language.
relativeSpeechSequence.append(LangChangeCommand(None))
lastLanguage=None
relativeSpeechSequence.append(fieldText)
if command.command=="controlStart" and command.field.get("role")==controlTypes.ROLE_MATH:
_speakTextInfo_addMath(relativeSpeechSequence,info,command.field)
if autoLanguageSwitching and newLanguage!=lastLanguage:
relativeSpeechSequence.append(LangChangeCommand(newLanguage))
lastLanguage=newLanguage
if reportIndentation and speakTextInfoState and allIndentation!=speakTextInfoState.indentationCache:
indentationSpeech=getIndentationSpeech(allIndentation, formatConfig)
if autoLanguageSwitching and speechSequence[-1].lang is not None:
# Indentation must be spoken in the default language,
# but the initial format field specified a different language.
# Insert the indentation before the LangChangeCommand.
speechSequence.insert(-1, indentationSpeech)
else:
speechSequence.append(indentationSpeech)
if speakTextInfoState: speakTextInfoState.indentationCache=allIndentation
# Don't add this text if it is blank.
relativeBlank=True
for x in relativeSpeechSequence:
if isinstance(x,basestring) and not isBlank(x):
relativeBlank=False
break
if not relativeBlank:
speechSequence.extend(relativeSpeechSequence)
isTextBlank=False
#Finally get speech text for any fields left in new controlFieldStack that are common with the old controlFieldStack (for closing), if extra detail is not requested
if autoLanguageSwitching and lastLanguage is not None:
speechSequence.append(LangChangeCommand(None))
lastLanguage=None
if not extraDetail:
for count in reversed(xrange(min(len(newControlFieldStack),commonFieldCount))):
text=info.getControlFieldSpeech(newControlFieldStack[count],newControlFieldStack[0:count],"end_inControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
# If there is nothing that should cause the TextInfo to be considered non-blank, blank should be reported, unless we are doing a say all.
if not suppressBlanks and reason != controlTypes.REASON_SAYALL and isTextBlank:
# Translators: This is spoken when the line is considered blank.
speechSequence.append(_("blank"))
#Cache a copy of the new controlFieldStack for future use
if useCache:
speakTextInfoState.controlFieldStackCache=list(newControlFieldStack)
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
if not onlyCache and speechSequence:
if reason==controlTypes.REASON_SAYALL:
speakWithoutPauses(speechSequence)
else:
speak(speechSequence)
def getSpeechTextForProperties(reason=controlTypes.REASON_QUERY,**propertyValues):
global oldTreeLevel, oldTableID, oldRowNumber, oldRowSpan, oldColumnNumber, oldColumnSpan
textList=[]
name=propertyValues.get('name')
if name:
textList.append(name)
if 'role' in propertyValues:
role=propertyValues['role']
speakRole=True
elif '_role' in propertyValues:
speakRole=False
role=propertyValues['_role']
else:
speakRole=False
role=controlTypes.ROLE_UNKNOWN
value=propertyValues.get('value') if role not in controlTypes.silentValuesForRoles else None
cellCoordsText=propertyValues.get('cellCoordsText')
rowNumber=propertyValues.get('rowNumber')
columnNumber=propertyValues.get('columnNumber')
includeTableCellCoords=propertyValues.get('includeTableCellCoords',True)
if role==controlTypes.ROLE_CHARTELEMENT:
speakRole=False
roleText=propertyValues.get('roleText')
if speakRole and (roleText or reason not in (controlTypes.REASON_SAYALL,controlTypes.REASON_CARET,controlTypes.REASON_FOCUS) or not (name or value or cellCoordsText or rowNumber or columnNumber) or role not in controlTypes.silentRolesOnFocus) and (role!=controlTypes.ROLE_MATH or reason not in (controlTypes.REASON_CARET,controlTypes.REASON_SAYALL)):
textList.append(roleText if roleText else controlTypes.roleLabels[role])
if value:
textList.append(value)
states=propertyValues.get('states',set())
realStates=propertyValues.get('_states',states)
negativeStates=propertyValues.get('negativeStates',set())
if states or negativeStates:
textList.extend(controlTypes.processAndLabelStates(role, realStates, reason, states, negativeStates))
if 'description' in propertyValues:
textList.append(propertyValues['description'])
if 'keyboardShortcut' in propertyValues:
textList.append(propertyValues['keyboardShortcut'])
if includeTableCellCoords and cellCoordsText:
textList.append(cellCoordsText)
if cellCoordsText or rowNumber or columnNumber:
tableID = propertyValues.get("_tableID")
# Always treat the table as different if there is no tableID.
sameTable = (tableID and tableID == oldTableID)
# Don't update the oldTableID if no tableID was given.
if tableID and not sameTable:
oldTableID = tableID
rowSpan = propertyValues.get("rowSpan")
columnSpan = propertyValues.get("columnSpan")
if rowNumber and (not sameTable or rowNumber != oldRowNumber or rowSpan != oldRowSpan):
rowHeaderText = propertyValues.get("rowHeaderText")
if rowHeaderText:
textList.append(rowHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Speaks current row number (example output: row 3).
textList.append(_("row %s")%rowNumber)
if rowSpan>1 and columnSpan<=1:
# Translators: Speaks the row span added to the current row number (example output: through 5).
textList.append(_("through %s")%(rowNumber+rowSpan-1))
oldRowNumber = rowNumber
oldRowSpan = rowSpan
if columnNumber and (not sameTable or columnNumber != oldColumnNumber or columnSpan != oldColumnSpan):
columnHeaderText = propertyValues.get("columnHeaderText")
if columnHeaderText:
textList.append(columnHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Speaks current column number (example output: column 3).
textList.append(_("column %s")%columnNumber)
if columnSpan>1 and rowSpan<=1:
# Translators: Speaks the column span added to the current column number (example output: through 5).
textList.append(_("through %s")%(columnNumber+columnSpan-1))
oldColumnNumber = columnNumber
oldColumnSpan = columnSpan
if includeTableCellCoords and not cellCoordsText and rowSpan>1 and columnSpan>1:
# Translators: Speaks the row and column span added to the current row and column numbers
# (example output: through row 5 column 3).
textList.append(_("through row {row} column {column}").format(
row=rowNumber+rowSpan-1,
column=columnNumber+columnSpan-1
))
rowCount=propertyValues.get('rowCount',0)
columnCount=propertyValues.get('columnCount',0)
if rowCount and columnCount:
# Translators: Speaks number of columns and rows in a table (example output: with 3 rows and 2 columns).
textList.append(_("with {rowCount} rows and {columnCount} columns").format(rowCount=rowCount,columnCount=columnCount))
elif columnCount and not rowCount:
# Translators: Speaks number of columns (example output: with 4 columns).
textList.append(_("with %s columns")%columnCount)
elif rowCount and not columnCount:
# Translators: Speaks number of rows (example output: with 2 rows).
textList.append(_("with %s rows")%rowCount)
if rowCount or columnCount:
# The caller is entering a table, so ensure that it is treated as a new table, even if the previous table was the same.
oldTableID = None
ariaCurrent = propertyValues.get('current', False)
if ariaCurrent:
try:
textList.append(controlTypes.isCurrentLabels[ariaCurrent])
except KeyError:
log.debugWarning("Aria-current value not handled: %s"%ariaCurrent)
textList.append(controlTypes.isCurrentLabels[True])
placeholder = propertyValues.get('placeholder', None)
if placeholder:
textList.append(placeholder)
indexInGroup=propertyValues.get('positionInfo_indexInGroup',0)
similarItemsInGroup=propertyValues.get('positionInfo_similarItemsInGroup',0)
if 0<indexInGroup<=similarItemsInGroup:
# Translators: Spoken to indicate the position of an item in a group of items (such as a list).
# {number} is replaced with the number of the item in the group.
# {total} is replaced with the total number of items in the group.
textList.append(_("{number} of {total}").format(number=indexInGroup, total=similarItemsInGroup))
if 'positionInfo_level' in propertyValues:
level=propertyValues.get('positionInfo_level',None)
role=propertyValues.get('role',None)
if level is not None:
if role in (controlTypes.ROLE_TREEVIEWITEM,controlTypes.ROLE_LISTITEM) and level!=oldTreeLevel:
textList.insert(0,_("level %s")%level)
oldTreeLevel=level
else:
# Translators: Speaks the item level in treeviews (example output: level 2).
textList.append(_('level %s')%propertyValues['positionInfo_level'])
return CHUNK_SEPARATOR.join([x for x in textList if x])
def getControlFieldSpeech(attrs,ancestorAttrs,fieldType,formatConfig=None,extraDetail=False,reason=None):
if attrs.get('isHidden'):
return u""
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
presCat=attrs.getPresentationCategory(ancestorAttrs,formatConfig, reason=reason)
childControlCount=int(attrs.get('_childcontrolcount',"0"))
if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportName',False):
name=attrs.get('name',"")
else:
name=""
role=attrs.get('role',controlTypes.ROLE_UNKNOWN)
states=attrs.get('states',set())
keyboardShortcut=attrs.get('keyboardShortcut', "")
ariaCurrent=attrs.get('current', None)
placeholderValue=attrs.get('placeholder', None)
value=attrs.get('value',"")
if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportDescription',False):
description=attrs.get('description',"")
else:
description=""
level=attrs.get('level',None)
if presCat != attrs.PRESCAT_LAYOUT:
tableID = attrs.get("table-id")
else:
tableID = None
roleText=attrs.get('roleText')
if not roleText:
roleText=getSpeechTextForProperties(reason=reason,role=role)
stateText=getSpeechTextForProperties(reason=reason,states=states,_role=role)
keyboardShortcutText=getSpeechTextForProperties(reason=reason,keyboardShortcut=keyboardShortcut) if config.conf["presentation"]["reportKeyboardShortcuts"] else ""
ariaCurrentText=getSpeechTextForProperties(reason=reason,current=ariaCurrent)
placeholderText=getSpeechTextForProperties(reason=reason,placeholder=placeholderValue)
nameText=getSpeechTextForProperties(reason=reason,name=name)
valueText=getSpeechTextForProperties(reason=reason,value=value)
descriptionText=(getSpeechTextForProperties(reason=reason,description=description)
if config.conf["presentation"]["reportObjectDescriptions"] else "")
levelText=getSpeechTextForProperties(reason=reason,positionInfo_level=level)
# Determine under what circumstances this node should be spoken.
# speakEntry: Speak when the user enters the control.
# speakWithinForLine: When moving by line, speak when the user is already within the control.
# speakExitForLine: When moving by line, speak when the user exits the control.
# speakExitForOther: When moving by word or character, speak when the user exits the control.
speakEntry=speakWithinForLine=speakExitForLine=speakExitForOther=False
if presCat == attrs.PRESCAT_SINGLELINE:
speakEntry=True
speakWithinForLine=True
speakExitForOther=True
elif presCat in (attrs.PRESCAT_MARKER, attrs.PRESCAT_CELL):
speakEntry=True
elif presCat == attrs.PRESCAT_CONTAINER:
speakEntry=True
speakExitForLine=True
speakExitForOther=True
# Determine the order of speech.
# speakContentFirst: Speak the content before the control field info.
speakContentFirst = reason == controlTypes.REASON_FOCUS and presCat != attrs.PRESCAT_CONTAINER and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX) and not tableID and controlTypes.STATE_EDITABLE not in states
# speakStatesFirst: Speak the states before the role.
speakStatesFirst=role==controlTypes.ROLE_LINK
containerContainsText="" #: used for item counts for lists
# Determine what text to speak.
# Special cases
if childControlCount and fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_LIST and controlTypes.STATE_READONLY in states:
# List.
# #7652: containerContainsText variable is set here, but the actual generation of all other output is handled further down in the general cases section.
# This ensures that properties such as name, states and level etc still get reported appropriately.
# Translators: Number of items in a list (example output: list with 5 items).
containerContainsText=_("with %s items")%childControlCount
elif fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_TABLE and tableID:
# Table.
return " ".join((nameText,roleText,stateText, getSpeechTextForProperties(_tableID=tableID, rowCount=attrs.get("table-rowcount"), columnCount=attrs.get("table-columncount")),levelText))
elif nameText and reason==controlTypes.REASON_FOCUS and fieldType == "start_addedToControlFieldStack" and role==controlTypes.ROLE_GROUPING:
# #3321: Report the name of groupings (such as fieldsets) for quicknav and focus jumps
return " ".join((nameText,roleText))
elif fieldType in ("start_addedToControlFieldStack","start_relative") and role in (controlTypes.ROLE_TABLECELL,controlTypes.ROLE_TABLECOLUMNHEADER,controlTypes.ROLE_TABLEROWHEADER) and tableID:
# Table cell.
reportTableHeaders = formatConfig["reportTableHeaders"]
reportTableCellCoords = formatConfig["reportTableCellCoords"]
getProps = {
'rowNumber': attrs.get("table-rownumber"),
'columnNumber': attrs.get("table-columnnumber"),
'rowSpan': attrs.get("table-rowsspanned"),
'columnSpan': attrs.get("table-columnsspanned"),
'includeTableCellCoords': reportTableCellCoords
}
if reportTableHeaders:
getProps['rowHeaderText'] = attrs.get("table-rowheadertext")
getProps['columnHeaderText'] = attrs.get("table-columnheadertext")
return (getSpeechTextForProperties(_tableID=tableID, **getProps)
+ (" %s" % stateText if stateText else "")
+ (" %s" % ariaCurrentText if ariaCurrent else ""))
# General cases.
if (
(speakEntry and ((speakContentFirst and fieldType in ("end_relative","end_inControlFieldStack")) or (not speakContentFirst and fieldType in ("start_addedToControlFieldStack","start_relative"))))
or (speakWithinForLine and not speakContentFirst and not extraDetail and fieldType=="start_inControlFieldStack")
):
out = []
content = attrs.get("content")
if content and speakContentFirst:
out.append(content)
if placeholderValue:
if valueText:
log.error("valueText exists when expected none: valueText:'%s' placeholderText:'%s'"%(valueText,placeholderText))
valueText = placeholderText
out.extend(x for x in (nameText,(stateText if speakStatesFirst else roleText),(roleText if speakStatesFirst else stateText),containerContainsText,ariaCurrentText,valueText,descriptionText,levelText,keyboardShortcutText) if x)
if content and not speakContentFirst:
out.append(content)
return CHUNK_SEPARATOR.join(out)
elif fieldType in ("end_removedFromControlFieldStack","end_relative") and roleText and ((not extraDetail and speakExitForLine) or (extraDetail and speakExitForOther)):
# Translators: Indicates end of something (example output: at the end of a list, speaks out of list).
return _("out of %s")%roleText
# Special cases
elif not speakEntry and fieldType in ("start_addedToControlFieldStack","start_relative"):
out = []
if not extraDetail and controlTypes.STATE_CLICKABLE in states:
# Clickable.
out.append(getSpeechTextForProperties(states=set([controlTypes.STATE_CLICKABLE])))
if ariaCurrent:
out.append(ariaCurrentText)
return CHUNK_SEPARATOR.join(out)
else:
return ""
def getFormatFieldSpeech(attrs,attrsCache=None,formatConfig=None,reason=None,unit=None,extraDetail=False , initialFormat=False, separator=CHUNK_SEPARATOR):
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
textList=[]
if formatConfig["reportTables"]:
tableInfo=attrs.get("table-info")
oldTableInfo=attrsCache.get("table-info") if attrsCache is not None else None
text=getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=extraDetail)
if text:
textList.append(text)
if formatConfig["reportPage"]:
pageNumber=attrs.get("page-number")
oldPageNumber=attrsCache.get("page-number") if attrsCache is not None else None
if pageNumber and pageNumber!=oldPageNumber:
# Translators: Indicates the page number in a document.
# %s will be replaced with the page number.
text=_("page %s")%pageNumber
textList.append(text)
sectionNumber=attrs.get("section-number")
oldSectionNumber=attrsCache.get("section-number") if attrsCache is not None else None
if sectionNumber and sectionNumber!=oldSectionNumber:
# Translators: Indicates the section number in a document.
# %s will be replaced with the section number.
text=_("section %s")%sectionNumber
textList.append(text)
textColumnCount=attrs.get("text-column-count")
oldTextColumnCount=attrsCache.get("text-column-count") if attrsCache is not None else None
textColumnNumber=attrs.get("text-column-number")
oldTextColumnNumber=attrsCache.get("text-column-number") if attrsCache is not None else None
# Because we do not want to report the number of columns when a document is just opened and there is only
# one column. This would be verbose, in the standard case.
# column number has changed, or the columnCount has changed
# but not if the columnCount is 1 or less and there is no old columnCount.
if (((textColumnNumber and textColumnNumber!=oldTextColumnNumber) or
(textColumnCount and textColumnCount!=oldTextColumnCount)) and not
(textColumnCount and int(textColumnCount) <=1 and oldTextColumnCount == None)) :
if textColumnNumber and textColumnCount:
# Translators: Indicates the text column number in a document.
# {0} will be replaced with the text column number.
# {1} will be replaced with the number of text columns.
text=_("column {0} of {1}").format(textColumnNumber,textColumnCount)
textList.append(text)
elif textColumnCount:
# Translators: Indicates the text column number in a document.
# %s will be replaced with the number of text columns.
text=_("%s columns")%(textColumnCount)
textList.append(text)
sectionBreakType=attrs.get("section-break")
if sectionBreakType:
if sectionBreakType == "0" : # Continuous section break.
text=_("continuous section break")
elif sectionBreakType == "1" : # New column section break.
text=_("new column section break")
elif sectionBreakType == "2" : # New page section break.
text=_("new page section break")
elif sectionBreakType == "3" : # Even pages section break.
text=_("even pages section break")
elif sectionBreakType == "4" : # Odd pages section break.
text=_("odd pages section break")
else:
text=""
textList.append(text)
columnBreakType=attrs.get("column-break")
if columnBreakType:
textList.append(_("column break"))
if formatConfig["reportHeadings"]:
headingLevel=attrs.get("heading-level")
oldHeadingLevel=attrsCache.get("heading-level") if attrsCache is not None else None
# headings should be spoken not only if they change, but also when beginning to speak lines or paragraphs
# Ensuring a similar experience to if a heading was a controlField
if headingLevel and (initialFormat and (reason==controlTypes.REASON_FOCUS or unit in (textInfos.UNIT_LINE,textInfos.UNIT_PARAGRAPH)) or headingLevel!=oldHeadingLevel):
# Translators: Speaks the heading level (example output: heading level 2).
text=_("heading level %d")%headingLevel
textList.append(text)
if formatConfig["reportStyle"]:
style=attrs.get("style")
oldStyle=attrsCache.get("style") if attrsCache is not None else None
if style!=oldStyle:
if style:
# Translators: Indicates the style of text.
# A style is a collection of formatting settings and depends on the application.
# %s will be replaced with the name of the style.
text=_("style %s")%style
else:
# Translators: Indicates that text has reverted to the default style.
# A style is a collection of formatting settings and depends on the application.
text=_("default style")
textList.append(text)
if formatConfig["reportBorderStyle"]:
borderStyle=attrs.get("border-style")
oldBorderStyle=attrsCache.get("border-style") if attrsCache is not None else None
if borderStyle!=oldBorderStyle:
if borderStyle:
text=borderStyle
else:
# Translators: Indicates that cell does not have border lines.
text=_("no border lines")
textList.append(text)
if formatConfig["reportFontName"]:
fontFamily=attrs.get("font-family")
oldFontFamily=attrsCache.get("font-family") if attrsCache is not None else None
if fontFamily and fontFamily!=oldFontFamily:
textList.append(fontFamily)
fontName=attrs.get("font-name")
oldFontName=attrsCache.get("font-name") if attrsCache is not None else None
if fontName and fontName!=oldFontName:
textList.append(fontName)
if formatConfig["reportFontSize"]:
fontSize=attrs.get("font-size")
oldFontSize=attrsCache.get("font-size") if attrsCache is not None else None
if fontSize and fontSize!=oldFontSize:
textList.append(fontSize)
if formatConfig["reportColor"]:
color=attrs.get("color")
oldColor=attrsCache.get("color") if attrsCache is not None else None
backgroundColor=attrs.get("background-color")
oldBackgroundColor=attrsCache.get("background-color") if attrsCache is not None else None
backgroundColor2=attrs.get("background-color2")
oldBackgroundColor2=attrsCache.get("background-color2") if attrsCache is not None else None
bgColorChanged=backgroundColor!=oldBackgroundColor or backgroundColor2!=oldBackgroundColor2
bgColorText=backgroundColor.name if isinstance(backgroundColor,colors.RGB) else unicode(backgroundColor)
if backgroundColor2:
bg2Name=backgroundColor2.name if isinstance(backgroundColor2,colors.RGB) else unicode(backgroundColor2)
# Translators: Reported when there are two background colors.
# This occurs when, for example, a gradient pattern is applied to a spreadsheet cell.
# {color1} will be replaced with the first background color.
# {color2} will be replaced with the second background color.
bgColorText=_("{color1} to {color2}").format(color1=bgColorText,color2=bg2Name)
if color and backgroundColor and color!=oldColor and bgColorChanged:
# Translators: Reported when both the text and background colors change.
# {color} will be replaced with the text color.
# {backgroundColor} will be replaced with the background color.
textList.append(_("{color} on {backgroundColor}").format(
color=color.name if isinstance(color,colors.RGB) else unicode(color),
backgroundColor=bgColorText))
elif color and color!=oldColor:
# Translators: Reported when the text color changes (but not the background color).
# {color} will be replaced with the text color.
textList.append(_("{color}").format(color=color.name if isinstance(color,colors.RGB) else unicode(color)))
elif backgroundColor and bgColorChanged:
# Translators: Reported when the background color changes (but not the text color).
# {backgroundColor} will be replaced with the background color.
textList.append(_("{backgroundColor} background").format(backgroundColor=bgColorText))
backgroundPattern=attrs.get("background-pattern")
oldBackgroundPattern=attrsCache.get("background-pattern") if attrsCache is not None else None
if backgroundPattern and backgroundPattern!=oldBackgroundPattern:
textList.append(_("background pattern {pattern}").format(pattern=backgroundPattern))
if formatConfig["reportLineNumber"]:
lineNumber=attrs.get("line-number")
oldLineNumber=attrsCache.get("line-number") if attrsCache is not None else None
if lineNumber is not None and lineNumber!=oldLineNumber:
# Translators: Indicates the line number of the text.
# %s will be replaced with the line number.
text=_("line %s")%lineNumber
textList.append(text)
if formatConfig["reportRevisions"]:
# Insertion
revision=attrs.get("revision-insertion")
oldRevision=attrsCache.get("revision-insertion") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is marked as having been inserted
text=(_("inserted") if revision
# Translators: Reported when text is no longer marked as having been inserted.
else _("not inserted"))
textList.append(text)
revision=attrs.get("revision-deletion")
oldRevision=attrsCache.get("revision-deletion") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is marked as having been deleted
text=(_("deleted") if revision
# Translators: Reported when text is no longer marked as having been deleted.
else _("not deleted"))
textList.append(text)
revision=attrs.get("revision")
oldRevision=attrsCache.get("revision") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is revised.
text=(_("revised %s"%revision) if revision
# Translators: Reported when text is not revised.
else _("no revised %s")%oldRevision)
textList.append(text)
if formatConfig["reportEmphasis"]:
# marked text
marked=attrs.get("marked")
oldMarked=attrsCache.get("marked") if attrsCache is not None else None
if (marked or oldMarked is not None) and marked!=oldMarked:
# Translators: Reported when text is marked
text=(_("marked") if marked
# Translators: Reported when text is no longer marked
else _("not marked"))
textList.append(text)
# strong text
strong=attrs.get("strong")
oldStrong=attrsCache.get("strong") if attrsCache is not None else None
if (strong or oldStrong is not None) and strong!=oldStrong:
# Translators: Reported when text is marked as strong (e.g. bold)
text=(_("strong") if strong
# Translators: Reported when text is no longer marked as strong (e.g. bold)
else _("not strong"))
textList.append(text)
# emphasised text
emphasised=attrs.get("emphasised")
oldEmphasised=attrsCache.get("emphasised") if attrsCache is not None else None
if (emphasised or oldEmphasised is not None) and emphasised!=oldEmphasised:
# Translators: Reported when text is marked as emphasised
text=(_("emphasised") if emphasised
# Translators: Reported when text is no longer marked as emphasised
else _("not emphasised"))
textList.append(text)
if formatConfig["reportFontAttributes"]:
bold=attrs.get("bold")
oldBold=attrsCache.get("bold") if attrsCache is not None else None
if (bold or oldBold is not None) and bold!=oldBold:
# Translators: Reported when text is bolded.
text=(_("bold") if bold
# Translators: Reported when text is not bolded.
else _("no bold"))
textList.append(text)
italic=attrs.get("italic")
oldItalic=attrsCache.get("italic") if attrsCache is not None else None
if (italic or oldItalic is not None) and italic!=oldItalic:
# Translators: Reported when text is italicized.
text=(_("italic") if italic
# Translators: Reported when text is not italicized.
else _("no italic"))
textList.append(text)
strikethrough=attrs.get("strikethrough")
oldStrikethrough=attrsCache.get("strikethrough") if attrsCache is not None else None
if (strikethrough or oldStrikethrough is not None) and strikethrough!=oldStrikethrough:
if strikethrough:
# Translators: Reported when text is formatted with double strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
text=(_("double strikethrough") if strikethrough=="double"
# Translators: Reported when text is formatted with strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
else _("strikethrough"))
else:
# Translators: Reported when text is formatted without strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
text=_("no strikethrough")
textList.append(text)
underline=attrs.get("underline")
oldUnderline=attrsCache.get("underline") if attrsCache is not None else None
if (underline or oldUnderline is not None) and underline!=oldUnderline:
# Translators: Reported when text is underlined.
text=(_("underlined") if underline
# Translators: Reported when text is not underlined.
else _("not underlined"))
textList.append(text)
textPosition=attrs.get("text-position")
oldTextPosition=attrsCache.get("text-position") if attrsCache is not None else None
if (textPosition or oldTextPosition is not None) and textPosition!=oldTextPosition:
textPosition=textPosition.lower() if textPosition else textPosition
if textPosition=="super":
# Translators: Reported for superscript text.
text=_("superscript")
elif textPosition=="sub":
# Translators: Reported for subscript text.
text=_("subscript")
else:
# Translators: Reported for text which is at the baseline position;
# i.e. not superscript or subscript.
text=_("baseline")
textList.append(text)
if formatConfig["reportAlignment"]:
textAlign=attrs.get("text-align")
oldTextAlign=attrsCache.get("text-align") if attrsCache is not None else None
if (textAlign or oldTextAlign is not None) and textAlign!=oldTextAlign:
textAlign=textAlign.lower() if textAlign else textAlign
if textAlign=="left":
# Translators: Reported when text is left-aligned.
text=_("align left")
elif textAlign=="center":
# Translators: Reported when text is centered.
text=_("align center")
elif textAlign=="right":
# Translators: Reported when text is right-aligned.
text=_("align right")
elif textAlign=="justify":
# Translators: Reported when text is justified.
# See http://en.wikipedia.org/wiki/Typographic_alignment#Justified
text=_("align justify")
elif textAlign=="distribute":
# Translators: Reported when text is justified with character spacing (Japanese etc)
# See http://kohei.us/2010/01/21/distributed-text-justification/
text=_("align distributed")
else:
# Translators: Reported when text has reverted to default alignment.
text=_("align default")
textList.append(text)
verticalAlign=attrs.get("vertical-align")
oldverticalAlign=attrsCache.get("vertical-align") if attrsCache is not None else None
if (verticalAlign or oldverticalAlign is not None) and verticalAlign!=oldverticalAlign:
verticalAlign=verticalAlign.lower() if verticalAlign else verticalAlign
if verticalAlign=="top":
# Translators: Reported when text is vertically top-aligned.
text=_("vertical align top")
elif verticalAlign in("center","middle"):
# Translators: Reported when text is vertically middle aligned.
text=_("vertical align middle")
elif verticalAlign=="bottom":
# Translators: Reported when text is vertically bottom-aligned.
text=_("vertical align bottom")
elif verticalAlign=="baseline":
# Translators: Reported when text is vertically aligned on the baseline.
text=_("vertical align baseline")
elif verticalAlign=="justify":
# Translators: Reported when text is vertically justified.
text=_("vertical align justified")
elif verticalAlign=="distributed":
# Translators: Reported when text is vertically justified but with character spacing (For some Asian content).
text=_("vertical align distributed")
else:
# Translators: Reported when text has reverted to default vertical alignment.
text=_("vertical align default")
textList.append(text)
if formatConfig["reportParagraphIndentation"]:
indentLabels={
'left-indent':(
# Translators: the label for paragraph format left indent
_("left indent"),
# Translators: the message when there is no paragraph format left indent
_("no left indent"),
),
'right-indent':(
# Translators: the label for paragraph format right indent
_("right indent"),
# Translators: the message when there is no paragraph format right indent
_("no right indent"),
),
'hanging-indent':(
# Translators: the label for paragraph format hanging indent
_("hanging indent"),
# Translators: the message when there is no paragraph format hanging indent
_("no hanging indent"),
),
'first-line-indent':(
# Translators: the label for paragraph format first line indent
_("first line indent"),
# Translators: the message when there is no paragraph format first line indent
_("no first line indent"),
),
}
for attr,(label,noVal) in indentLabels.iteritems():
newVal=attrs.get(attr)
oldVal=attrsCache.get(attr) if attrsCache else None
if (newVal or oldVal is not None) and newVal!=oldVal:
if newVal:
textList.append(u"%s %s"%(label,newVal))
else:
textList.append(noVal)
if formatConfig["reportLineSpacing"]:
lineSpacing=attrs.get("line-spacing")
oldLineSpacing=attrsCache.get("line-spacing") if attrsCache is not None else None
if (lineSpacing or oldLineSpacing is not None) and lineSpacing!=oldLineSpacing:
# Translators: a type of line spacing (E.g. single line spacing)
textList.append(_("line spacing %s")%lineSpacing)
if formatConfig["reportLinks"]:
link=attrs.get("link")
oldLink=attrsCache.get("link") if attrsCache is not None else None
if (link or oldLink is not None) and link!=oldLink:
text=_("link") if link else _("out of %s")%_("link")
textList.append(text)
if formatConfig["reportComments"]:
comment=attrs.get("comment")
oldComment=attrsCache.get("comment") if attrsCache is not None else None
if (comment or oldComment is not None) and comment!=oldComment:
if comment:
# Translators: Reported when text contains a comment.
text=_("has comment")
textList.append(text)
elif extraDetail:
# Translators: Reported when text no longer contains a comment.
text=_("out of comment")
textList.append(text)
if formatConfig["reportSpellingErrors"]:
invalidSpelling=attrs.get("invalid-spelling")
oldInvalidSpelling=attrsCache.get("invalid-spelling") if attrsCache is not None else None
if (invalidSpelling or oldInvalidSpelling is not None) and invalidSpelling!=oldInvalidSpelling:
if invalidSpelling:
# Translators: Reported when text contains a spelling error.
text=_("spelling error")
elif extraDetail:
# Translators: Reported when moving out of text containing a spelling error.
text=_("out of spelling error")
else:
text=""
if text:
textList.append(text)
invalidGrammar=attrs.get("invalid-grammar")
oldInvalidGrammar=attrsCache.get("invalid-grammar") if attrsCache is not None else None
if (invalidGrammar or oldInvalidGrammar is not None) and invalidGrammar!=oldInvalidGrammar:
if invalidGrammar:
# Translators: Reported when text contains a grammar error.
text=_("grammar error")
elif extraDetail:
# Translators: Reported when moving out of text containing a grammar error.
text=_("out of grammar error")
else:
text=""
if text:
textList.append(text)
# The line-prefix formatField attribute contains the text for a bullet or number for a list item, when the bullet or number does not appear in the actual text content.
# Normally this attribute could be repeated across formatFields within a list item and therefore is not safe to speak when the unit is word or character.
# However, some implementations (such as MS Word with UIA) do limit its useage to the very first formatField of the list item.
# Therefore, they also expose a line-prefix_speakAlways attribute to allow its usage for any unit.
linePrefix_speakAlways=attrs.get('line-prefix_speakAlways',False)
if linePrefix_speakAlways or unit in (textInfos.UNIT_LINE,textInfos.UNIT_SENTENCE,textInfos.UNIT_PARAGRAPH,textInfos.UNIT_READINGCHUNK):
linePrefix=attrs.get("line-prefix")
if linePrefix:
textList.append(linePrefix)
if attrsCache is not None:
attrsCache.clear()
attrsCache.update(attrs)
return separator.join(textList)
def getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=False):
if tableInfo is None and oldTableInfo is None:
return ""
if tableInfo is None and oldTableInfo is not None:
# Translators: Indicates end of a table.
return _("out of table")
if not oldTableInfo or tableInfo.get("table-id")!=oldTableInfo.get("table-id"):
newTable=True
else:
newTable=False
textList=[]
if newTable:
columnCount=tableInfo.get("column-count",0)
rowCount=tableInfo.get("row-count",0)
# Translators: reports number of columns and rows in a table (example output: table with 3 columns and 5 rows).
text=_("table with {columnCount} columns and {rowCount} rows").format(columnCount=columnCount,rowCount=rowCount)
textList.append(text)
oldColumnNumber=oldTableInfo.get("column-number",0) if oldTableInfo else 0
columnNumber=tableInfo.get("column-number",0)
if columnNumber!=oldColumnNumber:
textList.append(_("column %s")%columnNumber)
oldRowNumber=oldTableInfo.get("row-number",0) if oldTableInfo else 0
rowNumber=tableInfo.get("row-number",0)
if rowNumber!=oldRowNumber:
textList.append(_("row %s")%rowNumber)
return " ".join(textList)
re_last_pause=re.compile(ur"^(.*(?<=[^\s.!?])[.!?][\"'”’)]?(?:\s+|$))(.*$)",re.DOTALL|re.UNICODE)
def speakWithoutPauses(speechSequence,detectBreaks=True):
"""
Speaks the speech sequences given over multiple calls, only sending to the synth at acceptable phrase or sentence boundaries, or when given None for the speech sequence.
"""
lastStartIndex=0
#Break on all explicit break commands
if detectBreaks and speechSequence:
sequenceLen=len(speechSequence)
for index in xrange(sequenceLen):
if isinstance(speechSequence[index],SpeakWithoutPausesBreakCommand):
if index>0 and lastStartIndex<index:
speakWithoutPauses(speechSequence[lastStartIndex:index],detectBreaks=False)
speakWithoutPauses(None)
lastStartIndex=index+1
if lastStartIndex<sequenceLen:
speakWithoutPauses(speechSequence[lastStartIndex:],detectBreaks=False)
return
finalSpeechSequence=[] #To be spoken now
pendingSpeechSequence=[] #To be saved off for speaking later
if speechSequence is None: #Requesting flush
if speakWithoutPauses._pendingSpeechSequence:
#Place the last incomplete phrase in to finalSpeechSequence to be spoken now
finalSpeechSequence=speakWithoutPauses._pendingSpeechSequence
speakWithoutPauses._pendingSpeechSequence=[]
else: #Handling normal speech
#Scan the given speech and place all completed phrases in finalSpeechSequence to be spoken,
#And place the final incomplete phrase in pendingSpeechSequence
for index in xrange(len(speechSequence)-1,-1,-1):
item=speechSequence[index]
if isinstance(item,basestring):
m=re_last_pause.match(item)
if m:
before,after=m.groups()
if after:
pendingSpeechSequence.append(after)
if before:
finalSpeechSequence.extend(speakWithoutPauses._pendingSpeechSequence)
speakWithoutPauses._pendingSpeechSequence=[]
finalSpeechSequence.extend(speechSequence[0:index])
finalSpeechSequence.append(before)
# Apply the last language change to the pending sequence.
# This will need to be done for any other speech change commands introduced in future.
for changeIndex in xrange(index-1,-1,-1):
change=speechSequence[changeIndex]
if not isinstance(change,LangChangeCommand):
continue
pendingSpeechSequence.append(change)
break
break
else:
pendingSpeechSequence.append(item)
else:
pendingSpeechSequence.append(item)
if pendingSpeechSequence:
pendingSpeechSequence.reverse()
speakWithoutPauses._pendingSpeechSequence.extend(pendingSpeechSequence)
#Scan the final speech sequence backwards
for item in reversed(finalSpeechSequence):
if isinstance(item,IndexCommand):
speakWithoutPauses.lastSentIndex=item.index
break
if finalSpeechSequence:
speak(finalSpeechSequence)
speakWithoutPauses.lastSentIndex=None
speakWithoutPauses._pendingSpeechSequence=[]
class SpeechCommand(object):
"""
The base class for objects that can be inserted between string of text for parituclar speech functions that convey things such as indexing or voice parameter changes.
"""
class IndexCommand(SpeechCommand):
"""Represents an index within some speech."""
def __init__(self,index):
"""
@param index: the value of this index
@type index: integer
"""
if not isinstance(index,int): raise ValueError("index must be int, not %s"%type(index))
self.index=index
def __repr__(self):
return "IndexCommand(%r)" % self.index
class CharacterModeCommand(SpeechCommand):
"""Turns character mode on and off for speech synths."""
def __init__(self,state):
"""
@param state: if true character mode is on, if false its turned off.
@type state: boolean
"""
if not isinstance(state,bool): raise ValueError("state must be boolean, not %s"%type(state))
self.state=state
def __repr__(self):
return "CharacterModeCommand(%r)" % self.state
class LangChangeCommand(SpeechCommand):
"""A command to switch the language within speech."""
def __init__(self,lang):
"""
@param lang: the language to switch to: If None then the NVDA locale will be used.
@type lang: string
"""
self.lang=lang # if lang else languageHandler.getLanguage()
def __repr__(self):
return "LangChangeCommand (%r)"%self.lang
class SpeakWithoutPausesBreakCommand(SpeechCommand):
"""Forces speakWithoutPauses to flush its buffer and therefore break the sentence at this point.
This should only be used with the L{speakWithoutPauses} function.
This will be removed during processing.
"""
class BreakCommand(SpeechCommand):
"""Insert a break between words.
"""
def __init__(self, time=0):
"""
@param time: The duration of the pause to be inserted in milliseconds.
@param time: int
"""
self.time = time
def __repr__(self):
return "BreakCommand(time=%d)" % self.time
class PitchCommand(SpeechCommand):
"""Change the pitch of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current pitch setting;
e.g. 0.5 is half, 1 returns to the current pitch setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "PitchCommand(multiplier=%g)" % self.multiplier
class VolumeCommand(SpeechCommand):
"""Change the volume of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current volume setting;
e.g. 0.5 is half, 1 returns to the current volume setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "VolumeCommand(multiplier=%g)" % self.multiplier
class RateCommand(SpeechCommand):
"""Change the rate of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current rate setting;
e.g. 0.5 is half, 1 returns to the current rate setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "RateCommand(multiplier=%g)" % self.multiplier
class PhonemeCommand(SpeechCommand):
"""Insert a specific pronunciation.
This command accepts Unicode International Phonetic Alphabet (IPA) characters.
Note that this is not well supported by synthesizers.
"""
def __init__(self, ipa, text=None):
"""
@param ipa: Unicode IPA characters.
@type ipa: unicode
@param text: Text to speak if the synthesizer does not support
some or all of the specified IPA characters,
C{None} to ignore this command instead.
@type text: unicode
"""
self.ipa = ipa
self.text = text
def __repr__(self):
out = "PhonemeCommand(%r" % self.ipa
if self.text:
out += ", text=%r" % self.text
return out + ")"
| 1 | 23,534 | Could you split these conditions up over multiple lines please. | nvaccess-nvda | py |
@@ -33,7 +33,7 @@ func (t *tag) parseJSONTag(structTag reflect.StructTag) {
}
func (t *tag) parseTagStr(tagStr string) {
- parts := strings.SplitN(tagStr, ",", 2)
+ parts := strings.SplitN(tagStr, ",", 3)
if len(parts) == 0 {
return
} | 1 | package dynamodbattribute
import (
"reflect"
"strings"
)
type tag struct {
Name string
Ignore bool
OmitEmpty bool
OmitEmptyElem bool
AsString bool
AsBinSet, AsNumSet, AsStrSet bool
}
func (t *tag) parseAVTag(structTag reflect.StructTag) {
tagStr := structTag.Get("dynamodbav")
if len(tagStr) == 0 {
return
}
t.parseTagStr(tagStr)
}
func (t *tag) parseJSONTag(structTag reflect.StructTag) {
tagStr := structTag.Get("json")
if len(tagStr) == 0 {
return
}
t.parseTagStr(tagStr)
}
func (t *tag) parseTagStr(tagStr string) {
parts := strings.SplitN(tagStr, ",", 2)
if len(parts) == 0 {
return
}
if name := parts[0]; name == "-" {
t.Name = ""
t.Ignore = true
} else {
t.Name = name
t.Ignore = false
}
for _, opt := range parts[1:] {
switch opt {
case "omitempty":
t.OmitEmpty = true
case "omitemptyelem":
t.OmitEmptyElem = true
case "string":
t.AsString = true
case "binaryset":
t.AsBinSet = true
case "numberset":
t.AsNumSet = true
case "stringset":
t.AsStrSet = true
}
}
}
| 1 | 8,334 | I think we can just change this to `Split` instead of `SplitN`. I don't think we need to limit the number of parts in the tag. | aws-aws-sdk-go | go |
@@ -69,10 +69,8 @@ services:
image: {{ .router_image }}:{{ .router_tag }}
container_name: nginx-proxy
ports:
- - "80:80"
- - {{ .mailhogport }}:{{ .mailhogport }}
- - {{ .dbaport }}:{{ .dbaport }}
- - {{ .dbport }}:{{ .dbport }}
+ {{ range $port := .ports }}- "{{ $port }}:{{ $port }}"
+ {{ end }}
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
networks: | 1 | package platform
// SequelproTemplate is the template for Sequelpro config.
var SequelproTemplate = `<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ContentFilters</key>
<dict/>
<key>auto_connect</key>
<true/>
<key>data</key>
<dict>
<key>connection</key>
<dict>
<key>database</key>
<string>%s</string>
<key>host</key>
<string>%s</string>
<key>name</key>
<string>drud/%s</string>
<key>password</key>
<string>%s</string>
<key>port</key>
<integer>%s</integer>
<key>rdbms_type</key>
<string>mysql</string>
<key>sslCACertFileLocation</key>
<string></string>
<key>sslCACertFileLocationEnabled</key>
<integer>0</integer>
<key>sslCertificateFileLocation</key>
<string></string>
<key>sslCertificateFileLocationEnabled</key>
<integer>0</integer>
<key>sslKeyFileLocation</key>
<string></string>
<key>sslKeyFileLocationEnabled</key>
<integer>0</integer>
<key>type</key>
<string>SPTCPIPConnection</string>
<key>useSSL</key>
<integer>0</integer>
<key>user</key>
<string>%s</string>
</dict>
</dict>
<key>encrypted</key>
<false/>
<key>format</key>
<string>connection</string>
<key>queryFavorites</key>
<array/>
<key>queryHistory</key>
<array/>
<key>rdbms_type</key>
<string>mysql</string>
<key>rdbms_version</key>
<string>5.5.44</string>
<key>version</key>
<integer>1</integer>
</dict>
</plist>`
// DrudRouterTemplate is the template for the generic router container.
const DrudRouterTemplate = `version: '2'
services:
nginx-proxy:
image: {{ .router_image }}:{{ .router_tag }}
container_name: nginx-proxy
ports:
- "80:80"
- {{ .mailhogport }}:{{ .mailhogport }}
- {{ .dbaport }}:{{ .dbaport }}
- {{ .dbport }}:{{ .dbport }}
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
networks:
default:
external:
name: ddev_default
`
| 1 | 11,142 | So this is actually a mistake that I introduced without realizing it, and have known would need to be fixed when we get here . The format of the ports directive is "host:container". We only want the host port to change, not the internal container ports. The ports variable probably needs to be a map which maps external -> internal ports. | drud-ddev | php |
@@ -1176,7 +1176,9 @@ func (s *ContextImpl) transitionLocked(request contextRequest) {
s.state = contextStateStopping
// The change in state should cause all write methods to fail, but just in case, set this also,
// which will cause failures at the persistence level
- s.shardInfo.RangeId = -1
+ if s.shardInfo != nil {
+ s.shardInfo.RangeId = -1
+ }
// This will cause the controller to remove this shard from the map and then call s.stop()
go s.closeCallback(s)
} | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package shard
import (
"errors"
"sync"
"time"
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/api/serviceerror"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/convert"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/resource"
"go.temporal.io/server/service/history/configs"
"go.temporal.io/server/service/history/events"
"go.temporal.io/server/service/history/tasks"
)
var (
defaultTime = time.Unix(0, 0)
)
const (
// See transitionLocked for overview of state transitions.
// These are the possible values of ContextImpl.state:
contextStateInitialized contextState = iota
contextStateAcquiring
contextStateAcquired
contextStateStopping
contextStateStopped
// These are the requests that can be passed to transitionLocked to change state:
contextRequestAcquire contextRequest = iota
contextRequestAcquired
contextRequestLost
contextRequestStop
contextRequestFinishStop
)
type (
contextState int32
contextRequest int
ContextImpl struct {
// These fields are constant:
resource.Resource
shardID int32
executionManager persistence.ExecutionManager
metricsClient metrics.Client
eventsCache events.Cache
closeCallback func(*ContextImpl)
config *configs.Config
logger log.Logger
throttledLogger log.Logger
engineFactory EngineFactory
// All following fields are protected by rwLock, and only valid if state >= Acquiring:
rwLock sync.RWMutex
state contextState
engine Engine
lastUpdated time.Time
shardInfo *persistence.ShardInfoWithFailover
transferSequenceNumber int64
maxTransferSequenceNumber int64
transferMaxReadLevel int64
timerMaxReadLevelMap map[string]time.Time // cluster -> timerMaxReadLevel
// exist only in memory
remoteClusterCurrentTime map[string]time.Time
}
)
var _ Context = (*ContextImpl)(nil)
var (
// ErrShardClosed is returned when shard is closed and a req cannot be processed
ErrShardClosed = errors.New("shard closed")
// ErrShardStatusUnknown means we're not sure if we have the shard lock or not. This may be returned
// during short windows at initialization and if we've lost the connection to the database.
ErrShardStatusUnknown = serviceerror.NewUnavailable("shard status unknown")
// errStoppingContext is an internal error used to abort acquireShard
errStoppingContext = errors.New("stopping context")
)
const (
logWarnTransferLevelDiff = 3000000 // 3 million
logWarnTimerLevelDiff = time.Duration(30 * time.Minute)
historySizeLogThreshold = 10 * 1024 * 1024
)
func (s *ContextImpl) GetShardID() int32 {
// constant from initialization, no need for locks
return s.shardID
}
func (s *ContextImpl) GetService() resource.Resource {
// constant from initialization, no need for locks
return s.Resource
}
func (s *ContextImpl) GetExecutionManager() persistence.ExecutionManager {
// constant from initialization, no need for locks
return s.executionManager
}
func (s *ContextImpl) GetEngine() (Engine, error) {
s.rLock()
defer s.rUnlock()
if err := s.errorByStateLocked(); err != nil {
return nil, err
}
return s.engine, nil
}
func (s *ContextImpl) GenerateTransferTaskID() (int64, error) {
s.wLock()
defer s.wUnlock()
return s.generateTransferTaskIDLocked()
}
func (s *ContextImpl) GenerateTransferTaskIDs(number int) ([]int64, error) {
s.wLock()
defer s.wUnlock()
result := []int64{}
for i := 0; i < number; i++ {
id, err := s.generateTransferTaskIDLocked()
if err != nil {
return nil, err
}
result = append(result, id)
}
return result, nil
}
func (s *ContextImpl) GetTransferMaxReadLevel() int64 {
s.rLock()
defer s.rUnlock()
return s.transferMaxReadLevel
}
func (s *ContextImpl) GetTransferAckLevel() int64 {
s.rLock()
defer s.rUnlock()
return s.shardInfo.TransferAckLevel
}
func (s *ContextImpl) UpdateTransferAckLevel(ackLevel int64) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.TransferAckLevel = ackLevel
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetTransferClusterAckLevel(cluster string) int64 {
s.rLock()
defer s.rUnlock()
// if we can find corresponding ack level
if ackLevel, ok := s.shardInfo.ClusterTransferAckLevel[cluster]; ok {
return ackLevel
}
// otherwise, default to existing ack level, which belongs to local cluster
// this can happen if you add more cluster
return s.shardInfo.TransferAckLevel
}
func (s *ContextImpl) UpdateTransferClusterAckLevel(cluster string, ackLevel int64) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.ClusterTransferAckLevel[cluster] = ackLevel
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetVisibilityAckLevel() int64 {
s.rLock()
defer s.rUnlock()
return s.shardInfo.VisibilityAckLevel
}
func (s *ContextImpl) UpdateVisibilityAckLevel(ackLevel int64) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.VisibilityAckLevel = ackLevel
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetReplicatorAckLevel() int64 {
s.rLock()
defer s.rUnlock()
return s.shardInfo.ReplicationAckLevel
}
func (s *ContextImpl) UpdateReplicatorAckLevel(ackLevel int64) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.ReplicationAckLevel = ackLevel
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetReplicatorDLQAckLevel(sourceCluster string) int64 {
s.rLock()
defer s.rUnlock()
if ackLevel, ok := s.shardInfo.ReplicationDlqAckLevel[sourceCluster]; ok {
return ackLevel
}
return -1
}
func (s *ContextImpl) UpdateReplicatorDLQAckLevel(
sourceCluster string,
ackLevel int64,
) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.ReplicationDlqAckLevel[sourceCluster] = ackLevel
s.shardInfo.StolenSinceRenew = 0
if err := s.updateShardInfoLocked(); err != nil {
return err
}
s.GetMetricsClient().Scope(
metrics.ReplicationDLQStatsScope,
metrics.TargetClusterTag(sourceCluster),
metrics.InstanceTag(convert.Int32ToString(s.shardID)),
).UpdateGauge(
metrics.ReplicationDLQAckLevelGauge,
float64(ackLevel),
)
return nil
}
func (s *ContextImpl) GetClusterReplicationLevel(cluster string) int64 {
s.rLock()
defer s.rUnlock()
// if we can find corresponding replication level
if replicationLevel, ok := s.shardInfo.ClusterReplicationLevel[cluster]; ok {
return replicationLevel
}
// New cluster always starts from -1
return persistence.EmptyQueueMessageID
}
func (s *ContextImpl) UpdateClusterReplicationLevel(cluster string, ackTaskID int64) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.ClusterReplicationLevel[cluster] = ackTaskID
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetTimerAckLevel() time.Time {
s.rLock()
defer s.rUnlock()
return timestamp.TimeValue(s.shardInfo.TimerAckLevelTime)
}
func (s *ContextImpl) UpdateTimerAckLevel(ackLevel time.Time) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.TimerAckLevelTime = &ackLevel
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetTimerClusterAckLevel(cluster string) time.Time {
s.rLock()
defer s.rUnlock()
// if we can find corresponding ack level
if ackLevel, ok := s.shardInfo.ClusterTimerAckLevel[cluster]; ok {
return timestamp.TimeValue(ackLevel)
}
// otherwise, default to existing ack level, which belongs to local cluster
// this can happen if you add more cluster
return timestamp.TimeValue(s.shardInfo.TimerAckLevelTime)
}
func (s *ContextImpl) UpdateTimerClusterAckLevel(cluster string, ackLevel time.Time) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.ClusterTimerAckLevel[cluster] = &ackLevel
s.shardInfo.StolenSinceRenew = 0
return s.updateShardInfoLocked()
}
func (s *ContextImpl) UpdateTransferFailoverLevel(failoverID string, level persistence.TransferFailoverLevel) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.TransferFailoverLevels[failoverID] = level
return s.updateShardInfoLocked()
}
func (s *ContextImpl) DeleteTransferFailoverLevel(failoverID string) error {
s.wLock()
defer s.wUnlock()
if level, ok := s.shardInfo.TransferFailoverLevels[failoverID]; ok {
s.GetMetricsClient().RecordTimer(metrics.ShardInfoScope, metrics.ShardInfoTransferFailoverLatencyTimer, time.Since(level.StartTime))
delete(s.shardInfo.TransferFailoverLevels, failoverID)
}
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetAllTransferFailoverLevels() map[string]persistence.TransferFailoverLevel {
s.rLock()
defer s.rUnlock()
ret := map[string]persistence.TransferFailoverLevel{}
for k, v := range s.shardInfo.TransferFailoverLevels {
ret[k] = v
}
return ret
}
func (s *ContextImpl) UpdateTimerFailoverLevel(failoverID string, level persistence.TimerFailoverLevel) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.TimerFailoverLevels[failoverID] = level
return s.updateShardInfoLocked()
}
func (s *ContextImpl) DeleteTimerFailoverLevel(failoverID string) error {
s.wLock()
defer s.wUnlock()
if level, ok := s.shardInfo.TimerFailoverLevels[failoverID]; ok {
s.GetMetricsClient().RecordTimer(metrics.ShardInfoScope, metrics.ShardInfoTimerFailoverLatencyTimer, time.Since(level.StartTime))
delete(s.shardInfo.TimerFailoverLevels, failoverID)
}
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetAllTimerFailoverLevels() map[string]persistence.TimerFailoverLevel {
s.rLock()
defer s.rUnlock()
ret := map[string]persistence.TimerFailoverLevel{}
for k, v := range s.shardInfo.TimerFailoverLevels {
ret[k] = v
}
return ret
}
func (s *ContextImpl) GetNamespaceNotificationVersion() int64 {
s.rLock()
defer s.rUnlock()
return s.shardInfo.NamespaceNotificationVersion
}
func (s *ContextImpl) UpdateNamespaceNotificationVersion(namespaceNotificationVersion int64) error {
s.wLock()
defer s.wUnlock()
s.shardInfo.NamespaceNotificationVersion = namespaceNotificationVersion
return s.updateShardInfoLocked()
}
func (s *ContextImpl) GetTimerMaxReadLevel(cluster string) time.Time {
s.rLock()
defer s.rUnlock()
return s.timerMaxReadLevelMap[cluster]
}
func (s *ContextImpl) UpdateTimerMaxReadLevel(cluster string) time.Time {
s.wLock()
defer s.wUnlock()
currentTime := s.GetTimeSource().Now()
if cluster != "" && cluster != s.GetClusterMetadata().GetCurrentClusterName() {
currentTime = s.remoteClusterCurrentTime[cluster]
}
s.timerMaxReadLevelMap[cluster] = currentTime.Add(s.config.TimerProcessorMaxTimeShift()).Truncate(time.Millisecond)
return s.timerMaxReadLevelMap[cluster]
}
func (s *ContextImpl) CreateWorkflowExecution(
request *persistence.CreateWorkflowExecutionRequest,
) (*persistence.CreateWorkflowExecutionResponse, error) {
if err := s.errorByState(); err != nil {
return nil, err
}
namespaceID := namespace.ID(request.NewWorkflowSnapshot.ExecutionInfo.NamespaceId)
workflowID := request.NewWorkflowSnapshot.ExecutionInfo.WorkflowId
// do not try to get namespace cache within shard lock
namespaceEntry, err := s.GetNamespaceRegistry().GetNamespaceByID(namespaceID)
if err != nil {
return nil, err
}
s.wLock()
defer s.wUnlock()
transferMaxReadLevel := int64(0)
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.NewWorkflowSnapshot.TransferTasks,
request.NewWorkflowSnapshot.ReplicationTasks,
request.NewWorkflowSnapshot.TimerTasks,
request.NewWorkflowSnapshot.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return nil, err
}
defer s.updateMaxReadLevelLocked(transferMaxReadLevel)
currentRangeID := s.getRangeIDLocked()
request.RangeID = currentRangeID
resp, err := s.executionManager.CreateWorkflowExecution(request)
if err = s.handleErrorLocked(err); err != nil {
return nil, err
}
return resp, nil
}
func (s *ContextImpl) UpdateWorkflowExecution(
request *persistence.UpdateWorkflowExecutionRequest,
) (*persistence.UpdateWorkflowExecutionResponse, error) {
if err := s.errorByState(); err != nil {
return nil, err
}
namespaceID := namespace.ID(request.UpdateWorkflowMutation.ExecutionInfo.NamespaceId)
workflowID := request.UpdateWorkflowMutation.ExecutionInfo.WorkflowId
// do not try to get namespace cache within shard lock
namespaceEntry, err := s.GetNamespaceRegistry().GetNamespaceByID(namespaceID)
if err != nil {
return nil, err
}
s.wLock()
defer s.wUnlock()
transferMaxReadLevel := int64(0)
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.UpdateWorkflowMutation.TransferTasks,
request.UpdateWorkflowMutation.ReplicationTasks,
request.UpdateWorkflowMutation.TimerTasks,
request.UpdateWorkflowMutation.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return nil, err
}
if request.NewWorkflowSnapshot != nil {
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.NewWorkflowSnapshot.TransferTasks,
request.NewWorkflowSnapshot.ReplicationTasks,
request.NewWorkflowSnapshot.TimerTasks,
request.NewWorkflowSnapshot.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return nil, err
}
}
defer s.updateMaxReadLevelLocked(transferMaxReadLevel)
currentRangeID := s.getRangeIDLocked()
request.RangeID = currentRangeID
resp, err := s.executionManager.UpdateWorkflowExecution(request)
if err = s.handleErrorLocked(err); err != nil {
return nil, err
}
return resp, nil
}
func (s *ContextImpl) ConflictResolveWorkflowExecution(
request *persistence.ConflictResolveWorkflowExecutionRequest,
) (*persistence.ConflictResolveWorkflowExecutionResponse, error) {
if err := s.errorByState(); err != nil {
return nil, err
}
namespaceID := namespace.ID(request.ResetWorkflowSnapshot.ExecutionInfo.NamespaceId)
workflowID := request.ResetWorkflowSnapshot.ExecutionInfo.WorkflowId
// do not try to get namespace cache within shard lock
namespaceEntry, err := s.GetNamespaceRegistry().GetNamespaceByID(namespaceID)
if err != nil {
return nil, err
}
s.wLock()
defer s.wUnlock()
transferMaxReadLevel := int64(0)
if request.CurrentWorkflowMutation != nil {
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.CurrentWorkflowMutation.TransferTasks,
request.CurrentWorkflowMutation.ReplicationTasks,
request.CurrentWorkflowMutation.TimerTasks,
request.CurrentWorkflowMutation.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return nil, err
}
}
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.ResetWorkflowSnapshot.TransferTasks,
request.ResetWorkflowSnapshot.ReplicationTasks,
request.ResetWorkflowSnapshot.TimerTasks,
request.ResetWorkflowSnapshot.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return nil, err
}
if request.NewWorkflowSnapshot != nil {
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.NewWorkflowSnapshot.TransferTasks,
request.NewWorkflowSnapshot.ReplicationTasks,
request.NewWorkflowSnapshot.TimerTasks,
request.NewWorkflowSnapshot.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return nil, err
}
}
defer s.updateMaxReadLevelLocked(transferMaxReadLevel)
currentRangeID := s.getRangeIDLocked()
request.RangeID = currentRangeID
resp, err := s.executionManager.ConflictResolveWorkflowExecution(request)
if err := s.handleErrorLocked(err); err != nil {
return nil, err
}
return resp, nil
}
func (s *ContextImpl) AddTasks(
request *persistence.AddTasksRequest,
) error {
if err := s.errorByState(); err != nil {
return err
}
namespaceID := namespace.ID(request.NamespaceID)
workflowID := request.WorkflowID
// do not try to get namespace cache within shard lock
namespaceEntry, err := s.GetNamespaceRegistry().GetNamespaceByID(namespaceID)
if err != nil {
return err
}
s.wLock()
defer s.wUnlock()
transferMaxReadLevel := int64(0)
if err := s.allocateTaskIDsLocked(
namespaceEntry,
workflowID,
request.TransferTasks,
request.ReplicationTasks,
request.TimerTasks,
request.VisibilityTasks,
&transferMaxReadLevel,
); err != nil {
return err
}
defer s.updateMaxReadLevelLocked(transferMaxReadLevel)
request.RangeID = s.getRangeIDLocked()
err = s.executionManager.AddTasks(request)
if err = s.handleErrorLocked(err); err != nil {
return err
}
s.engine.NotifyNewTransferTasks(request.TransferTasks)
s.engine.NotifyNewTimerTasks(request.TimerTasks)
s.engine.NotifyNewVisibilityTasks(request.VisibilityTasks)
s.engine.NotifyNewReplicationTasks(request.ReplicationTasks)
return nil
}
func (s *ContextImpl) AppendHistoryEvents(
request *persistence.AppendHistoryNodesRequest,
namespaceID namespace.ID,
execution commonpb.WorkflowExecution,
) (int, error) {
if err := s.errorByState(); err != nil {
return 0, err
}
request.ShardID = s.shardID
size := 0
defer func() {
// N.B. - Dual emit here makes sense so that we can see aggregate timer stats across all
// namespaces along with the individual namespaces stats
s.GetMetricsClient().RecordDistribution(metrics.SessionSizeStatsScope, metrics.HistorySize, size)
if entry, err := s.GetNamespaceRegistry().GetNamespaceByID(namespaceID); err == nil && entry != nil {
s.GetMetricsClient().Scope(
metrics.SessionSizeStatsScope,
metrics.NamespaceTag(entry.Name().String()),
).RecordDistribution(metrics.HistorySize, size)
}
if size >= historySizeLogThreshold {
s.throttledLogger.Warn("history size threshold breached",
tag.WorkflowID(execution.GetWorkflowId()),
tag.WorkflowRunID(execution.GetRunId()),
tag.WorkflowNamespaceID(namespaceID.String()),
tag.WorkflowHistorySizeBytes(size))
}
}()
resp, err0 := s.GetExecutionManager().AppendHistoryNodes(request)
if resp != nil {
size = resp.Size
}
return size, err0
}
func (s *ContextImpl) GetConfig() *configs.Config {
// constant from initialization, no need for locks
return s.config
}
func (s *ContextImpl) GetEventsCache() events.Cache {
// constant from initialization (except for tests), no need for locks
return s.eventsCache
}
func (s *ContextImpl) GetLogger() log.Logger {
// constant from initialization, no need for locks
return s.logger
}
func (s *ContextImpl) GetThrottledLogger() log.Logger {
// constant from initialization, no need for locks
return s.throttledLogger
}
func (s *ContextImpl) getRangeIDLocked() int64 {
return s.shardInfo.GetRangeId()
}
func (s *ContextImpl) errorByState() error {
s.rLock()
defer s.rUnlock()
return s.errorByStateLocked()
}
func (s *ContextImpl) errorByStateLocked() error {
switch s.state {
case contextStateInitialized, contextStateAcquiring:
return ErrShardStatusUnknown
case contextStateAcquired:
return nil
case contextStateStopping, contextStateStopped:
return ErrShardClosed
default:
panic("invalid state")
}
}
func (s *ContextImpl) generateTransferTaskIDLocked() (int64, error) {
if err := s.updateRangeIfNeededLocked(); err != nil {
return -1, err
}
taskID := s.transferSequenceNumber
s.transferSequenceNumber++
return taskID, nil
}
func (s *ContextImpl) updateRangeIfNeededLocked() error {
if s.transferSequenceNumber < s.maxTransferSequenceNumber {
return nil
}
return s.renewRangeLocked(false)
}
func (s *ContextImpl) renewRangeLocked(isStealing bool) error {
updatedShardInfo := copyShardInfo(s.shardInfo)
updatedShardInfo.RangeId++
if isStealing {
updatedShardInfo.StolenSinceRenew++
}
err := s.GetShardManager().UpdateShard(&persistence.UpdateShardRequest{
ShardInfo: updatedShardInfo.ShardInfo,
PreviousRangeID: s.shardInfo.GetRangeId()})
if err != nil {
// Failure in updating shard to grab new RangeID
s.logger.Error("Persistent store operation failure",
tag.StoreOperationUpdateShard,
tag.Error(err),
tag.ShardRangeID(updatedShardInfo.GetRangeId()),
tag.PreviousShardRangeID(s.shardInfo.GetRangeId()),
)
return s.handleErrorLocked(err)
}
// Range is successfully updated in cassandra now update shard context to reflect new range
s.logger.Info("Range updated for shardID",
tag.ShardRangeID(updatedShardInfo.RangeId),
tag.PreviousShardRangeID(s.shardInfo.RangeId),
tag.Number(s.transferSequenceNumber),
tag.NextNumber(s.maxTransferSequenceNumber),
)
s.transferSequenceNumber = updatedShardInfo.GetRangeId() << s.config.RangeSizeBits
s.maxTransferSequenceNumber = (updatedShardInfo.GetRangeId() + 1) << s.config.RangeSizeBits
s.transferMaxReadLevel = s.transferSequenceNumber - 1
s.shardInfo = updatedShardInfo
return nil
}
func (s *ContextImpl) updateMaxReadLevelLocked(rl int64) {
if rl > s.transferMaxReadLevel {
s.logger.Debug("Updating MaxTaskID", tag.MaxLevel(rl))
s.transferMaxReadLevel = rl
}
}
func (s *ContextImpl) updateShardInfoLocked() error {
if err := s.errorByStateLocked(); err != nil {
return err
}
var err error
now := clock.NewRealTimeSource().Now()
if s.lastUpdated.Add(s.config.ShardUpdateMinInterval()).After(now) {
return nil
}
updatedShardInfo := copyShardInfo(s.shardInfo)
s.emitShardInfoMetricsLogsLocked()
err = s.GetShardManager().UpdateShard(&persistence.UpdateShardRequest{
ShardInfo: updatedShardInfo.ShardInfo,
PreviousRangeID: s.shardInfo.GetRangeId(),
})
if err != nil {
return s.handleErrorLocked(err)
}
s.lastUpdated = now
return nil
}
func (s *ContextImpl) emitShardInfoMetricsLogsLocked() {
currentCluster := s.GetClusterMetadata().GetCurrentClusterName()
minTransferLevel := s.shardInfo.ClusterTransferAckLevel[currentCluster]
maxTransferLevel := s.shardInfo.ClusterTransferAckLevel[currentCluster]
for _, v := range s.shardInfo.ClusterTransferAckLevel {
if v < minTransferLevel {
minTransferLevel = v
}
if v > maxTransferLevel {
maxTransferLevel = v
}
}
diffTransferLevel := maxTransferLevel - minTransferLevel
minTimerLevel := timestamp.TimeValue(s.shardInfo.ClusterTimerAckLevel[currentCluster])
maxTimerLevel := timestamp.TimeValue(s.shardInfo.ClusterTimerAckLevel[currentCluster])
for _, v := range s.shardInfo.ClusterTimerAckLevel {
t := timestamp.TimeValue(v)
if t.Before(minTimerLevel) {
minTimerLevel = t
}
if t.After(maxTimerLevel) {
maxTimerLevel = t
}
}
diffTimerLevel := maxTimerLevel.Sub(minTimerLevel)
replicationLag := s.transferMaxReadLevel - s.shardInfo.ReplicationAckLevel
transferLag := s.transferMaxReadLevel - s.shardInfo.TransferAckLevel
timerLag := time.Since(timestamp.TimeValue(s.shardInfo.TimerAckLevelTime))
transferFailoverInProgress := len(s.shardInfo.TransferFailoverLevels)
timerFailoverInProgress := len(s.shardInfo.TimerFailoverLevels)
if s.config.EmitShardDiffLog() &&
(logWarnTransferLevelDiff < diffTransferLevel ||
logWarnTimerLevelDiff < diffTimerLevel ||
logWarnTransferLevelDiff < transferLag ||
logWarnTimerLevelDiff < timerLag) {
s.logger.Warn("Shard ack levels diff exceeds warn threshold.",
tag.ShardTime(s.remoteClusterCurrentTime),
tag.ShardReplicationAck(s.shardInfo.ReplicationAckLevel),
tag.ShardTimerAcks(s.shardInfo.ClusterTimerAckLevel),
tag.ShardTransferAcks(s.shardInfo.ClusterTransferAckLevel))
}
s.GetMetricsClient().RecordDistribution(metrics.ShardInfoScope, metrics.ShardInfoTransferDiffTimer, int(diffTransferLevel))
s.GetMetricsClient().RecordTimer(metrics.ShardInfoScope, metrics.ShardInfoTimerDiffTimer, diffTimerLevel)
s.GetMetricsClient().RecordDistribution(metrics.ShardInfoScope, metrics.ShardInfoReplicationLagTimer, int(replicationLag))
s.GetMetricsClient().RecordDistribution(metrics.ShardInfoScope, metrics.ShardInfoTransferLagTimer, int(transferLag))
s.GetMetricsClient().RecordTimer(metrics.ShardInfoScope, metrics.ShardInfoTimerLagTimer, timerLag)
s.GetMetricsClient().RecordDistribution(metrics.ShardInfoScope, metrics.ShardInfoTransferFailoverInProgressTimer, transferFailoverInProgress)
s.GetMetricsClient().RecordDistribution(metrics.ShardInfoScope, metrics.ShardInfoTimerFailoverInProgressTimer, timerFailoverInProgress)
}
func (s *ContextImpl) allocateTaskIDsLocked(
namespaceEntry *namespace.Namespace,
workflowID string,
transferTasks []tasks.Task,
replicationTasks []tasks.Task,
timerTasks []tasks.Task,
visibilityTasks []tasks.Task,
transferMaxReadLevel *int64,
) error {
if err := s.allocateTransferIDsLocked(
transferTasks,
transferMaxReadLevel); err != nil {
return err
}
if err := s.allocateTransferIDsLocked(
replicationTasks,
transferMaxReadLevel); err != nil {
return err
}
if err := s.allocateTransferIDsLocked(
visibilityTasks,
transferMaxReadLevel); err != nil {
return err
}
return s.allocateTimerIDsLocked(
namespaceEntry,
workflowID,
timerTasks)
}
func (s *ContextImpl) allocateTransferIDsLocked(
tasks []tasks.Task,
transferMaxReadLevel *int64,
) error {
for _, task := range tasks {
id, err := s.generateTransferTaskIDLocked()
if err != nil {
return err
}
s.logger.Debug("Assigning task ID", tag.TaskID(id))
task.SetTaskID(id)
*transferMaxReadLevel = id
}
return nil
}
// NOTE: allocateTimerIDsLocked should always been called after assigning taskID for transferTasks when assigning taskID together,
// because Temporal Indexer assume timer taskID of deleteWorkflowExecution is larger than transfer taskID of closeWorkflowExecution
// for a given workflow.
func (s *ContextImpl) allocateTimerIDsLocked(
namespaceEntry *namespace.Namespace,
workflowID string,
timerTasks []tasks.Task,
) error {
// assign IDs for the timer tasks. They need to be assigned under shard lock.
currentCluster := s.GetClusterMetadata().GetCurrentClusterName()
for _, task := range timerTasks {
ts := task.GetVisibilityTime()
if task.GetVersion() != common.EmptyVersion {
// cannot use version to determine the corresponding cluster for timer task
// this is because during failover, timer task should be created as active
// or otherwise, failover + active processing logic may not pick up the task.
currentCluster = namespaceEntry.ActiveClusterName()
}
readCursorTS := s.timerMaxReadLevelMap[currentCluster]
if ts.Before(readCursorTS) {
// This can happen if shard move and new host have a time SKU, or there is db write delay.
// We generate a new timer ID using timerMaxReadLevel.
s.logger.Debug("New timer generated is less than read level",
tag.WorkflowNamespaceID(namespaceEntry.ID().String()),
tag.WorkflowID(workflowID),
tag.Timestamp(ts),
tag.CursorTimestamp(readCursorTS),
tag.ValueShardAllocateTimerBeforeRead)
task.SetVisibilityTime(s.timerMaxReadLevelMap[currentCluster].Add(time.Millisecond))
}
seqNum, err := s.generateTransferTaskIDLocked()
if err != nil {
return err
}
task.SetTaskID(seqNum)
visibilityTs := task.GetVisibilityTime()
s.logger.Debug("Assigning new timer",
tag.Timestamp(visibilityTs), tag.TaskID(task.GetTaskID()), tag.AckLevel(s.shardInfo.TimerAckLevelTime))
}
return nil
}
func (s *ContextImpl) SetCurrentTime(cluster string, currentTime time.Time) {
s.wLock()
defer s.wUnlock()
if cluster != s.GetClusterMetadata().GetCurrentClusterName() {
prevTime := s.remoteClusterCurrentTime[cluster]
if prevTime.Before(currentTime) {
s.remoteClusterCurrentTime[cluster] = currentTime
}
} else {
panic("Cannot set current time for current cluster")
}
}
func (s *ContextImpl) GetCurrentTime(cluster string) time.Time {
s.rLock()
defer s.rUnlock()
if cluster != s.GetClusterMetadata().GetCurrentClusterName() {
return s.remoteClusterCurrentTime[cluster]
}
return s.GetTimeSource().Now().UTC()
}
func (s *ContextImpl) GetLastUpdatedTime() time.Time {
s.rLock()
defer s.rUnlock()
return s.lastUpdated
}
func (s *ContextImpl) handleErrorLocked(err error) error {
switch err.(type) {
case nil:
return nil
case *persistence.CurrentWorkflowConditionFailedError,
*persistence.WorkflowConditionFailedError,
*persistence.ConditionFailedError,
*serviceerror.ResourceExhausted:
// No special handling required for these errors
return err
case *persistence.ShardOwnershipLostError:
// Shard is stolen, trigger shutdown of history engine
s.transitionLocked(contextRequestStop)
return err
default:
// We have no idea if the write failed or will eventually make it to persistence. Try to re-acquire
// the shard in the background. If successful, we'll get a new RangeID, to guarantee that subsequent
// reads will either see that write, or know for certain that it failed. This allows the callers to
// reliably check the outcome by performing a read. If we fail, we'll shut down the shard.
s.transitionLocked(contextRequestLost)
return err
}
}
func (s *ContextImpl) maybeRecordShardAcquisitionLatency(ownershipChanged bool) {
if ownershipChanged {
s.GetMetricsClient().RecordTimer(metrics.ShardInfoScope, metrics.ShardContextAcquisitionLatency,
s.GetCurrentTime(s.GetClusterMetadata().GetCurrentClusterName()).Sub(s.GetLastUpdatedTime()))
}
}
func (s *ContextImpl) createEngine() Engine {
s.logger.Info("", tag.LifeCycleStarting, tag.ComponentShardEngine)
engine := s.engineFactory.CreateEngine(s)
engine.Start()
s.logger.Info("", tag.LifeCycleStarted, tag.ComponentShardEngine)
return engine
}
func (s *ContextImpl) getOrCreateEngine() (engine Engine, retErr error) {
// Wait on shard acquisition for 1s. Note that this retry is just polling a value in memory.
// Another goroutine is doing the actual work.
// TODO: use context to do timeout here
policy := backoff.NewExponentialRetryPolicy(5 * time.Millisecond)
policy.SetExpirationInterval(1 * time.Second)
isRetryable := func(err error) bool { return err == ErrShardStatusUnknown }
op := func() error {
s.rLock()
defer s.rUnlock()
err := s.errorByStateLocked()
if err == nil {
engine = s.engine
}
return err
}
retErr = backoff.Retry(op, policy, isRetryable)
if retErr == nil && engine == nil {
// This shouldn't ever happen, but don't let it return nil error.
retErr = ErrShardStatusUnknown
}
return
}
// start should only be called by the controller.
func (s *ContextImpl) start() {
s.wLock()
defer s.wUnlock()
s.transitionLocked(contextRequestAcquire)
}
// stop should only be called by the controller.
func (s *ContextImpl) stop() {
s.wLock()
s.transitionLocked(contextRequestFinishStop)
engine := s.engine
s.engine = nil
s.wUnlock()
// Stop the engine if it was running (outside the lock but before returning)
if engine != nil {
s.logger.Info("", tag.LifeCycleStopping, tag.ComponentShardEngine)
engine.Stop()
s.logger.Info("", tag.LifeCycleStopped, tag.ComponentShardEngine)
}
}
func (s *ContextImpl) isValid() bool {
s.rLock()
defer s.rUnlock()
return s.state < contextStateStopping
}
func (s *ContextImpl) wLock() {
scope := metrics.ShardInfoScope
s.metricsClient.IncCounter(scope, metrics.LockRequests)
sw := s.metricsClient.StartTimer(scope, metrics.LockLatency)
defer sw.Stop()
s.rwLock.Lock()
}
func (s *ContextImpl) rLock() {
scope := metrics.ShardInfoScope
s.metricsClient.IncCounter(scope, metrics.LockRequests)
sw := s.metricsClient.StartTimer(scope, metrics.LockLatency)
defer sw.Stop()
s.rwLock.RLock()
}
func (s *ContextImpl) wUnlock() {
s.rwLock.Unlock()
}
func (s *ContextImpl) rUnlock() {
s.rwLock.RUnlock()
}
func (s *ContextImpl) transitionLocked(request contextRequest) {
/* State transitions:
The normal pattern:
Initialized
controller calls start()
Acquiring
acquireShard gets the shard
Acquired
If we get a transient error from persistence:
Acquired
transient error: handleErrorLocked calls transitionLocked(contextRequestLost)
Acquiring
acquireShard gets the shard
Acquired
If we get shard ownership lost:
Acquired
ShardOwnershipLostError: handleErrorLocked calls transitionLocked(contextRequestStop)
Stopping
controller removes from map and calls stop()
Stopped
Stopping can be triggered internally (if we get a ShardOwnershipLostError, or fail to acquire the rangeid
lock after several minutes) or externally (from controller, e.g. controller shutting down or admin force-
unload shard). If it's triggered internally, we transition to Stopping, then make an asynchronous callback
to controller, which will remove us from the map and call stop(), which will transition to Stopped and
stop the engine. If it's triggered externally, we'll skip over Stopping and go straight to Stopped.
If we want to stop, and the acquireShard goroutine is still running, we can't kill it, but we need a
mechanism to make sure it doesn't make any persistence calls or state transitions. We make acquireShard
check the state each time it acquires the lock, and do nothing if the state has changed to Stopping (or
Stopped).
Invariants:
- Once state is Stopping, it can only go to Stopped.
- Once state is Stopped, it can't go anywhere else.
- At the start of acquireShard, state must be Acquiring.
- By the end of acquireShard, state must not be Acquiring: either acquireShard set it to Acquired, or the
controller set it to Stopped.
- If state is Acquiring, acquireShard should be running in the background.
- Only acquireShard can use contextRequestAcquired (i.e. transition from Acquiring to Acquired).
- Once state has reached Acquired at least once, and not reached Stopped, engine must be non-nil.
- Only the controller may call start() and stop().
- The controller must call stop() for every ContextImpl it creates.
*/
setStateAcquiring := func() {
s.state = contextStateAcquiring
go s.acquireShard()
}
setStateStopping := func() {
s.state = contextStateStopping
// The change in state should cause all write methods to fail, but just in case, set this also,
// which will cause failures at the persistence level
s.shardInfo.RangeId = -1
// This will cause the controller to remove this shard from the map and then call s.stop()
go s.closeCallback(s)
}
setStateStopped := func() {
s.state = contextStateStopped
}
switch s.state {
case contextStateInitialized:
switch request {
case contextRequestAcquire:
setStateAcquiring()
return
case contextRequestStop:
setStateStopping()
return
case contextRequestFinishStop:
setStateStopped()
return
}
case contextStateAcquiring:
switch request {
case contextRequestAcquire:
return // nothing to do, already acquiring
case contextRequestAcquired:
s.state = contextStateAcquired
return
case contextRequestLost:
return // nothing to do, already acquiring
case contextRequestStop:
setStateStopping()
return
case contextRequestFinishStop:
setStateStopped()
return
}
case contextStateAcquired:
switch request {
case contextRequestAcquire:
return // nothing to to do, already acquired
case contextRequestLost:
setStateAcquiring()
return
case contextRequestStop:
setStateStopping()
return
case contextRequestFinishStop:
setStateStopped()
return
}
case contextStateStopping:
switch request {
case contextRequestStop:
// nothing to do, already stopping
return
case contextRequestFinishStop:
setStateStopped()
return
}
}
s.logger.Warn("invalid state transition request",
tag.ShardContextState(int(s.state)),
tag.ShardContextStateRequest(int(request)),
)
}
func (s *ContextImpl) loadOrCreateShardMetadata() (*persistence.ShardInfoWithFailover, error) {
resp, err := s.GetShardManager().GetShard(&persistence.GetShardRequest{
ShardID: s.shardID,
})
if _, ok := err.(*serviceerror.NotFound); ok {
// EntityNotExistsError: doesn't exist in db yet, try to create it
req := &persistence.CreateShardRequest{
ShardInfo: &persistencespb.ShardInfo{
ShardId: s.shardID,
},
}
err = s.GetShardManager().CreateShard(req)
if err != nil {
return nil, err
}
return &persistence.ShardInfoWithFailover{ShardInfo: req.ShardInfo}, nil
}
if err != nil {
return nil, err
}
return &persistence.ShardInfoWithFailover{ShardInfo: resp.ShardInfo}, nil
}
func (s *ContextImpl) loadShardMetadata(ownershipChanged *bool) error {
// Only have to do this once, we can just re-acquire the rangeid lock after that
s.rLock()
if s.state >= contextStateStopping {
return errStoppingContext
}
if s.shardInfo != nil {
s.rUnlock()
return nil
}
s.rUnlock()
// We don't have any shardInfo yet, load it (outside of context rwlock)
shardInfo, err := s.loadOrCreateShardMetadata()
if err != nil {
s.logger.Error("Failed to load shard", tag.Error(err))
return err
}
updatedShardInfo := copyShardInfo(shardInfo)
*ownershipChanged = shardInfo.Owner != s.GetHostInfo().Identity()
updatedShardInfo.Owner = s.GetHostInfo().Identity()
// initialize the cluster current time to be the same as ack level
remoteClusterCurrentTime := make(map[string]time.Time)
timerMaxReadLevelMap := make(map[string]time.Time)
for clusterName, info := range s.GetClusterMetadata().GetAllClusterInfo() {
if !info.Enabled {
continue
}
currentReadTime := timestamp.TimeValue(shardInfo.TimerAckLevelTime)
if clusterName != s.GetClusterMetadata().GetCurrentClusterName() {
if currentTime, ok := shardInfo.ClusterTimerAckLevel[clusterName]; ok {
currentReadTime = timestamp.TimeValue(currentTime)
}
remoteClusterCurrentTime[clusterName] = currentReadTime
timerMaxReadLevelMap[clusterName] = currentReadTime
} else { // active cluster
timerMaxReadLevelMap[clusterName] = currentReadTime
}
timerMaxReadLevelMap[clusterName] = timerMaxReadLevelMap[clusterName].Truncate(time.Millisecond)
}
s.wLock()
defer s.wUnlock()
if s.state >= contextStateStopping {
return errStoppingContext
}
s.shardInfo = updatedShardInfo
s.remoteClusterCurrentTime = remoteClusterCurrentTime
s.timerMaxReadLevelMap = timerMaxReadLevelMap
return nil
}
func (s *ContextImpl) acquireShard() {
// Retry for 5m, with interval up to 10s (default)
policy := backoff.NewExponentialRetryPolicy(50 * time.Millisecond)
policy.SetExpirationInterval(5 * time.Minute)
isRetryable := func(err error) bool {
if common.IsPersistenceTransientError(err) {
return true
}
// Retry this in case we need to create the shard and race with someone else doing it.
if _, ok := err.(*persistence.ShardAlreadyExistError); ok {
return true
}
return false
}
// Remember this value across attempts
ownershipChanged := false
op := func() error {
// Initial load of shard metadata
err := s.loadShardMetadata(&ownershipChanged)
if err != nil {
return err
}
s.wLock()
defer s.wUnlock()
// Check that we should still be running
if s.state >= contextStateStopping {
return errStoppingContext
}
// Try to acquire RangeID lock. If this gets a persistence error, it may call:
// transitionLocked(contextRequestStop) for ShardOwnershipLostError:
// This will transition to Stopping right here, and the transitionLocked call at the end of the
// outer function will do nothing, since the state was already changed.
// transitionLocked(contextRequestLost) for other transient errors:
// This will do nothing, since state is already Acquiring.
err = s.renewRangeLocked(true)
if err != nil {
return err
}
s.logger.Info("Acquired shard")
// The first time we get the shard, we have to create the engine. We have to release the lock to
// create the engine, and then reacquire it. This is safe because:
// 1. We know we're currently in the Acquiring state. The only thing we can transition to (without
// doing it ourselves) is Stopped. In that case, we'll have to stop the engine that we just
// created, since the stop transition didn't do it.
// 2. We don't have an engine yet, so no one should be calling any of our methods that mutate things.
if s.engine == nil {
s.wUnlock()
s.maybeRecordShardAcquisitionLatency(ownershipChanged)
engine := s.createEngine()
s.wLock()
if s.state >= contextStateStopping {
engine.Stop()
return errStoppingContext
}
s.engine = engine
}
s.transitionLocked(contextRequestAcquired)
return nil
}
err := backoff.Retry(op, policy, isRetryable)
if err == errStoppingContext {
// State changed since this goroutine started, exit silently.
return
} else if err != nil {
// We got an unretryable error (perhaps ShardOwnershipLostError) or timed out.
s.logger.Error("Couldn't acquire shard", tag.Error(err))
// If there's been another state change since we started (e.g. to Stopping), then don't do anything
// here. But if not (i.e. timed out or error), initiate shutting down the shard.
s.wLock()
defer s.wUnlock()
if s.state >= contextStateStopping {
return
}
s.transitionLocked(contextRequestStop)
}
}
func newContext(
resource resource.Resource,
shardID int32,
factory EngineFactory,
config *configs.Config,
closeCallback func(*ContextImpl),
) (*ContextImpl, error) {
hostIdentity := resource.GetHostInfo().Identity()
shardContext := &ContextImpl{
Resource: resource,
state: contextStateInitialized,
shardID: shardID,
executionManager: resource.GetExecutionManager(),
metricsClient: resource.GetMetricsClient(),
closeCallback: closeCallback,
config: config,
logger: log.With(resource.GetLogger(), tag.ShardID(shardID), tag.Address(hostIdentity)),
throttledLogger: log.With(resource.GetThrottledLogger(), tag.ShardID(shardID), tag.Address(hostIdentity)),
engineFactory: factory,
}
shardContext.eventsCache = events.NewEventsCache(
shardContext.GetShardID(),
shardContext.GetConfig().EventsCacheInitialSize(),
shardContext.GetConfig().EventsCacheMaxSize(),
shardContext.GetConfig().EventsCacheTTL(),
shardContext.GetExecutionManager(),
false,
shardContext.GetLogger(),
shardContext.GetMetricsClient(),
)
return shardContext, nil
}
func copyShardInfo(shardInfo *persistence.ShardInfoWithFailover) *persistence.ShardInfoWithFailover {
transferFailoverLevels := map[string]persistence.TransferFailoverLevel{}
for k, v := range shardInfo.TransferFailoverLevels {
transferFailoverLevels[k] = v
}
timerFailoverLevels := map[string]persistence.TimerFailoverLevel{}
for k, v := range shardInfo.TimerFailoverLevels {
timerFailoverLevels[k] = v
}
clusterTransferAckLevel := make(map[string]int64)
for k, v := range shardInfo.ClusterTransferAckLevel {
clusterTransferAckLevel[k] = v
}
clusterTimerAckLevel := make(map[string]*time.Time)
for k, v := range shardInfo.ClusterTimerAckLevel {
if timestamp.TimeValue(v).IsZero() {
v = timestamp.TimePtr(defaultTime)
}
clusterTimerAckLevel[k] = v
}
clusterReplicationLevel := make(map[string]int64)
for k, v := range shardInfo.ClusterReplicationLevel {
clusterReplicationLevel[k] = v
}
clusterReplicationDLQLevel := make(map[string]int64)
for k, v := range shardInfo.ReplicationDlqAckLevel {
clusterReplicationDLQLevel[k] = v
}
if timestamp.TimeValue(shardInfo.TimerAckLevelTime).IsZero() {
shardInfo.TimerAckLevelTime = timestamp.TimePtr(defaultTime)
}
shardInfoCopy := &persistence.ShardInfoWithFailover{
ShardInfo: &persistencespb.ShardInfo{
ShardId: shardInfo.GetShardId(),
Owner: shardInfo.Owner,
RangeId: shardInfo.GetRangeId(),
StolenSinceRenew: shardInfo.StolenSinceRenew,
ReplicationAckLevel: shardInfo.ReplicationAckLevel,
TransferAckLevel: shardInfo.TransferAckLevel,
TimerAckLevelTime: shardInfo.TimerAckLevelTime,
ClusterTransferAckLevel: clusterTransferAckLevel,
ClusterTimerAckLevel: clusterTimerAckLevel,
NamespaceNotificationVersion: shardInfo.NamespaceNotificationVersion,
ClusterReplicationLevel: clusterReplicationLevel,
ReplicationDlqAckLevel: clusterReplicationDLQLevel,
UpdateTime: shardInfo.UpdateTime,
VisibilityAckLevel: shardInfo.VisibilityAckLevel,
},
TransferFailoverLevels: transferFailoverLevels,
TimerFailoverLevels: timerFailoverLevels,
}
return shardInfoCopy
}
| 1 | 13,196 | An alternate approach would be to always initialize shardInfo with a dummy (non-nil) value, which could protect against other uses before it's initialized (I couldn't find any though). But I can change it to do it that way instead. | temporalio-temporal | go |
@@ -960,3 +960,8 @@ func SessionInfoFromProtocol(session keybase1.Session) (SessionInfo, error) {
VerifyingKey: verifyingKey,
}, nil
}
+
+// NodeMetadata has metadata about a node needed for higher level operations.
+type NodeMetadata struct {
+ LastWriter keybase1.UID
+} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"encoding/hex"
"fmt"
"reflect"
"strings"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
)
const (
// ReaderSep is the string that separates readers from writers in a
// TLF name.
ReaderSep = "#"
// TlfHandleExtensionSep is the string that separates the folder
// participants from an extension suffix in the TLF name.
TlfHandleExtensionSep = " "
// PublicUIDName is the name given to keybase1.PublicUID. This string
// should correspond to an illegal or reserved Keybase user name.
PublicUIDName = "_public"
)
// disallowedPrefixes must not be allowed at the beginning of any
// user-created directory entry name.
var disallowedPrefixes = [...]string{".kbfs"}
// UserInfo contains all the info about a keybase user that kbfs cares
// about.
type UserInfo struct {
Name libkb.NormalizedUsername
UID keybase1.UID
VerifyingKeys []VerifyingKey
CryptPublicKeys []CryptPublicKey
KIDNames map[keybase1.KID]string
// Revoked keys, and the time at which they were revoked.
RevokedVerifyingKeys map[VerifyingKey]keybase1.KeybaseTime
RevokedCryptPublicKeys map[CryptPublicKey]keybase1.KeybaseTime
}
// SessionInfo contains all the info about the keybase session that
// kbfs cares about.
type SessionInfo struct {
Name libkb.NormalizedUsername
UID keybase1.UID
Token string
CryptPublicKey CryptPublicKey
VerifyingKey VerifyingKey
}
// SigVer denotes a signature version.
type SigVer int
const (
// SigED25519 is the signature type for ED25519
SigED25519 SigVer = 1
)
// IsNil returns true if this SigVer is nil.
func (v SigVer) IsNil() bool {
return int(v) == 0
}
// SignatureInfo contains all the info needed to verify a signature
// for a message.
type SignatureInfo struct {
// Exported only for serialization purposes.
Version SigVer `codec:"v"`
Signature []byte `codec:"s"`
VerifyingKey VerifyingKey `codec:"k"`
}
// IsNil returns true if this SignatureInfo is nil.
func (s SignatureInfo) IsNil() bool {
return s.Version.IsNil() && len(s.Signature) == 0 && s.VerifyingKey.IsNil()
}
// deepCopy makes a complete copy of this SignatureInfo.
func (s SignatureInfo) deepCopy() SignatureInfo {
signature := make([]byte, len(s.Signature))
copy(signature[:], s.Signature[:])
return SignatureInfo{s.Version, signature, s.VerifyingKey}
}
// String implements the fmt.Stringer interface for SignatureInfo.
func (s SignatureInfo) String() string {
return fmt.Sprintf("SignatureInfo{Version: %d, Signature: %s, "+
"VerifyingKey: %s}", s.Version, hex.EncodeToString(s.Signature[:]),
&s.VerifyingKey)
}
// TLFEphemeralPublicKeys stores a list of TLFEphemeralPublicKey
type TLFEphemeralPublicKeys []TLFEphemeralPublicKey
// EncryptionVer denotes a version for the encryption method.
type EncryptionVer int
const (
// EncryptionSecretbox is the encryption version that uses
// nacl/secretbox or nacl/box.
EncryptionSecretbox EncryptionVer = 1
)
// encryptedData is encrypted data with a nonce and a version.
type encryptedData struct {
// Exported only for serialization purposes. Should only be
// used by implementations of Crypto.
Version EncryptionVer `codec:"v"`
EncryptedData []byte `codec:"e"`
Nonce []byte `codec:"n"`
}
// EncryptedTLFCryptKeyClientHalf is an encrypted
// TLFCryptKeyCLientHalf object.
type EncryptedTLFCryptKeyClientHalf encryptedData
// EncryptedPrivateMetadata is an encrypted PrivateMetadata object.
type EncryptedPrivateMetadata encryptedData
// EncryptedBlock is an encrypted Block.
type EncryptedBlock encryptedData
// EncryptedMerkleLeaf is an encrypted Merkle leaf.
type EncryptedMerkleLeaf struct {
_struct bool `codec:",toarray"`
Version EncryptionVer
EncryptedData []byte
}
// EncryptedTLFCryptKeyClientAndEphemeral has what's needed to
// request a client half decryption.
type EncryptedTLFCryptKeyClientAndEphemeral struct {
// PublicKey contains the wrapped Key ID of the public key
PubKey CryptPublicKey
// ClientHalf contains the encrypted client half of the TLF key
ClientHalf EncryptedTLFCryptKeyClientHalf
// EPubKey contains the ephemeral public key used to encrypt ClientHalf
EPubKey TLFEphemeralPublicKey
}
// KeyGen is the type of a key generation for a top-level folder.
type KeyGen int
const (
// PublicKeyGen is the value used for public TLFs. Note that
// it is not considered a valid key generation.
PublicKeyGen KeyGen = -1
// FirstValidKeyGen is the first value that is considered a
// valid key generation. Note that the nil value is not
// considered valid.
FirstValidKeyGen = 1
)
// MetadataVer is the type of a version for marshalled KBFS metadata
// structures.
type MetadataVer int
const (
// FirstValidMetadataVer is the first value that is considered a
// valid data version. For historical reasons 0 is considered
// valid.
FirstValidMetadataVer = 0
// PreExtraMetadataVer is the latest metadata version that did not include
// support for extra MD fields.
PreExtraMetadataVer = 1
// InitialExtraMetadataVer is the first metadata version that did
// include support for extra MD fields.
InitialExtraMetadataVer = 2
)
// DataVer is the type of a version for marshalled KBFS data
// structures.
type DataVer int
const (
// FirstValidDataVer is the first value that is considered a
// valid data version. Note that the nil value is not
// considered valid.
FirstValidDataVer = 1
// FilesWithHolesDataVer is the data version for files
// with holes.
FilesWithHolesDataVer = 2
)
// BlockRefNonce is a 64-bit unique sequence of bytes for identifying
// this reference of a block ID from other references to the same
// (duplicated) block.
type BlockRefNonce [8]byte
// zeroBlockRefNonce is a special BlockRefNonce used for the initial
// reference to a block.
var zeroBlockRefNonce = BlockRefNonce([8]byte{0, 0, 0, 0, 0, 0, 0, 0})
func (nonce BlockRefNonce) String() string {
return hex.EncodeToString(nonce[:])
}
// blockRef is a block ID/ref nonce pair, which defines a unique
// reference to a block.
type blockRef struct {
id BlockID
refNonce BlockRefNonce
}
func (r blockRef) IsValid() bool {
return r.id.IsValid()
}
func (r blockRef) String() string {
s := fmt.Sprintf("blockRef{id: %s", r.id)
if r.refNonce != zeroBlockRefNonce {
s += fmt.Sprintf(", refNonce: %s", r.refNonce)
}
s += "}"
return s
}
// BlockContext contains all the information used by the server to
// identify blocks (other than the ID).
//
// NOTE: Don't add or modify anything in this struct without
// considering how old clients will handle them.
type BlockContext struct {
// Creator is the UID that was first charged for the initial
// reference to this block.
Creator keybase1.UID `codec:"c"`
// Writer is the UID that should be charged for this reference to
// the block. If empty, it defaults to Creator.
Writer keybase1.UID `codec:"w,omitempty"`
// When RefNonce is all 0s, this is the initial reference to a
// particular block. Using a constant refnonce for the initial
// reference allows the server to identify and optimize for the
// common case where there is only one reference for a block. Two
// initial references cannot happen simultaneously, because the
// encrypted block contents (and thus the block ID) will be
// randomized by the server-side block crypt key half. All
// subsequent references to the same block must have a random
// RefNonce (it can't be a monotonically increasing number because
// that would require coordination among clients).
RefNonce BlockRefNonce `codec:"r,omitempty"`
}
// GetCreator returns the creator of the associated block.
func (c BlockContext) GetCreator() keybase1.UID {
return c.Creator
}
// GetWriter returns the writer of the associated block.
func (c BlockContext) GetWriter() keybase1.UID {
if !c.Writer.IsNil() {
return c.Writer
}
return c.Creator
}
// SetWriter sets the Writer field, if necessary.
func (c *BlockContext) SetWriter(newWriter keybase1.UID) {
if c.Creator != newWriter {
c.Writer = newWriter
} else {
// save some bytes by not populating the separate Writer
// field if it matches the creator.
c.Writer = ""
}
}
// GetRefNonce returns the ref nonce of the associated block.
func (c BlockContext) GetRefNonce() BlockRefNonce {
return c.RefNonce
}
// IsFirstRef returns whether or not p represents the first reference
// to the corresponding BlockID.
func (c BlockContext) IsFirstRef() bool {
return c.RefNonce == zeroBlockRefNonce
}
func (c BlockContext) String() string {
s := fmt.Sprintf("BlockContext{Creator: %s", c.Creator)
if len(c.Writer) > 0 {
s += fmt.Sprintf(", Writer: %s", c.Writer)
}
if c.RefNonce != zeroBlockRefNonce {
s += fmt.Sprintf(", RefNonce: %s", c.RefNonce)
}
s += "}"
return s
}
// BlockPointer contains the identifying information for a block in KBFS.
//
// NOTE: Don't add or modify anything in this struct without
// considering how old clients will handle them.
type BlockPointer struct {
ID BlockID `codec:"i"`
KeyGen KeyGen `codec:"k"` // if valid, which generation of the TLF{Writer,Reader}KeyBundle to use.
DataVer DataVer `codec:"d"` // if valid, which version of the KBFS data structures is pointed to
BlockContext
}
// IsValid returns whether the block pointer is valid. A zero block
// pointer is considered invalid.
func (p BlockPointer) IsValid() bool {
if !p.ID.IsValid() {
return false
}
// TODO: Should also check KeyGen, DataVer, and Creator. (A
// bunch of tests use invalid values for one of these.)
return true
}
func (p BlockPointer) String() string {
return fmt.Sprintf("BlockPointer{ID: %s, KeyGen: %d, DataVer: %d, Context: %s}", p.ID, p.KeyGen, p.DataVer, p.BlockContext)
}
// IsInitialized returns whether or not this BlockPointer has non-nil data.
func (p BlockPointer) IsInitialized() bool {
return p.ID != BlockID{}
}
func (p BlockPointer) ref() blockRef {
return blockRef{
id: p.ID,
refNonce: p.RefNonce,
}
}
// BlockInfo contains all information about a block in KBFS and its
// contents.
//
// NOTE: Don't add or modify anything in this struct without
// considering how old clients will handle them.
type BlockInfo struct {
BlockPointer
// When non-zero, the size of the encoded (and possibly
// encrypted) data contained in the block. When non-zero,
// always at least the size of the plaintext data contained in
// the block.
EncodedSize uint32 `codec:"e"`
}
func (bi BlockInfo) String() string {
return fmt.Sprintf("BlockInfo{BlockPointer: %s, EncodedSize: %d}",
bi.BlockPointer, bi.EncodedSize)
}
var bpSize = uint64(reflect.TypeOf(BlockPointer{}).Size())
// ReadyBlockData is a block that has been encoded (and encrypted).
type ReadyBlockData struct {
// These fields should not be used outside of BlockOps.Put().
buf []byte
serverHalf BlockCryptKeyServerHalf
}
// GetEncodedSize returns the size of the encoded (and encrypted)
// block data.
func (r ReadyBlockData) GetEncodedSize() int {
return len(r.buf)
}
// Favorite is a top-level favorited folder name.
type Favorite struct {
Name string
Public bool
}
// NewFavoriteFromFolder creates a Favorite from a
// keybase1.Folder.
func NewFavoriteFromFolder(folder keybase1.Folder) *Favorite {
name := folder.Name
if !folder.Private {
// Old versions of the client still use an outdated "#public"
// suffix for favorited public folders. TODO: remove this once
// those old versions of the client are retired.
const oldPublicSuffix = ReaderSep + "public"
name = strings.TrimSuffix(folder.Name, oldPublicSuffix)
}
return &Favorite{
Name: name,
Public: !folder.Private,
}
}
func (f Favorite) toKBFolder(created bool) keybase1.Folder {
return keybase1.Folder{
Name: f.Name,
Private: !f.Public,
Created: created,
}
}
// PathNode is a single node along an KBFS path, pointing to the top
// block for that node of the path.
type pathNode struct {
BlockPointer
Name string
}
func (n pathNode) isValid() bool {
return n.BlockPointer.IsValid()
}
// DebugString returns a string representation of the node with all
// pointer information.
func (n pathNode) DebugString() string {
return fmt.Sprintf("%s(ptr=%s)", n.Name, n.BlockPointer)
}
// BranchName is the name given to a KBFS branch, for a particular
// top-level folder. Currently, the notion of a "branch" is
// client-side only, and can be used to specify which root to use for
// a top-level folder. (For example, viewing a historical archive
// could use a different branch name.)
type BranchName string
const (
// MasterBranch represents the mainline branch for a top-level
// folder. Set to the empty string so that the default will be
// the master branch.
MasterBranch BranchName = ""
)
// FolderBranch represents a unique pair of top-level folder and a
// branch of that folder.
type FolderBranch struct {
Tlf TlfID
Branch BranchName // master branch, by default
}
func (fb FolderBranch) String() string {
s := fb.Tlf.String()
if len(fb.Branch) > 0 {
s += fmt.Sprintf("(branch=%s)", fb.Branch)
}
return s
}
// path represents the full KBFS path to a particular location, so
// that a flush can traverse backwards and fix up ids along the way.
type path struct {
FolderBranch
path []pathNode
}
// isValid() returns true if the path has at least one node (for the
// root).
func (p path) isValid() bool {
if len(p.path) < 1 {
return false
}
for _, n := range p.path {
if !n.isValid() {
return false
}
}
return true
}
// hasValidParent() returns true if this path is valid and
// parentPath() is a valid path.
func (p path) hasValidParent() bool {
return len(p.path) >= 2 && p.parentPath().isValid()
}
// tailName returns the name of the final node in the Path. Must be
// called with a valid path.
func (p path) tailName() string {
return p.path[len(p.path)-1].Name
}
// tailPointer returns the BlockPointer of the final node in the Path.
// Must be called with a valid path.
func (p path) tailPointer() BlockPointer {
return p.path[len(p.path)-1].BlockPointer
}
// DebugString returns a string representation of the path with all
// branch and pointer information.
func (p path) DebugString() string {
debugNames := make([]string, 0, len(p.path))
for _, node := range p.path {
debugNames = append(debugNames, node.DebugString())
}
return fmt.Sprintf("%s:%s", p.FolderBranch, strings.Join(debugNames, "/"))
}
// String implements the fmt.Stringer interface for Path.
func (p path) String() string {
names := make([]string, 0, len(p.path))
for _, node := range p.path {
names = append(names, node.Name)
}
return strings.Join(names, "/")
}
// parentPath returns a new Path representing the parent subdirectory
// of this Path. Must be called with a valid path. Should not be
// called with a path of only a single node, as that would produce an
// invalid path.
func (p path) parentPath() *path {
return &path{p.FolderBranch, p.path[:len(p.path)-1]}
}
// ChildPath returns a new Path with the addition of a new entry
// with the given name and BlockPointer.
func (p path) ChildPath(name string, ptr BlockPointer) path {
child := path{
FolderBranch: p.FolderBranch,
path: make([]pathNode, len(p.path), len(p.path)+1),
}
copy(child.path, p.path)
child.path = append(child.path, pathNode{Name: name, BlockPointer: ptr})
return child
}
// ChildPathNoPtr returns a new Path with the addition of a new entry
// with the given name. That final PathNode will have no BlockPointer.
func (p path) ChildPathNoPtr(name string) path {
return p.ChildPath(name, BlockPointer{})
}
// hasPublic returns whether or not this is a top-level folder that
// should have a "public" subdirectory.
func (p path) hasPublic() bool {
// This directory has a corresponding public subdirectory if the
// path has only one node and the top-level directory is not
// already public TODO: Ideally, we'd also check if there are no
// explicit readers, but for now we expect the caller to check
// that.
return len(p.path) == 1 && !p.Tlf.IsPublic()
}
// BlockChanges tracks the set of blocks that changed in a commit, and
// the operations that made the changes. It might consist of just a
// BlockPointer if the list is too big to embed in the MD structure
// directly.
//
// If this commit represents a conflict-resolution merge, which may
// comprise multiple individual operations, then there will be an
// ordered list of the changes for individual operations. This lets
// the notification and conflict resolution strategies figure out the
// difference between a renamed file and a modified file, for example.
//
// NOTE: Don't add or modify anything in this struct without
// considering how old clients will handle them.
type BlockChanges struct {
// If this is set, the actual changes are stored in a block (where
// the block contains a serialized version of BlockChanges)
Info BlockInfo `codec:"p,omitempty"`
// An ordered list of operations completed in this update
Ops opsList `codec:"o,omitempty"`
// Estimate the number of bytes that this set of changes will take to encode
sizeEstimate uint64
}
// Equals returns true if the given BlockChanges is equal to this
// BlockChanges. Currently does not check for equality at the
// operation level.
func (bc BlockChanges) Equals(other BlockChanges) bool {
if bc.Info != other.Info || len(bc.Ops) != len(other.Ops) ||
(bc.sizeEstimate != 0 && other.sizeEstimate != 0 &&
bc.sizeEstimate != other.sizeEstimate) {
return false
}
// TODO: check for op equality?
return true
}
// AddRefBlock adds the newly-referenced block to this BlockChanges
// and updates the size estimate.
func (bc *BlockChanges) AddRefBlock(ptr BlockPointer) {
if bc.sizeEstimate != 0 {
panic("Can't alter block changes after the size is estimated")
}
bc.Ops[len(bc.Ops)-1].AddRefBlock(ptr)
}
// AddUnrefBlock adds the newly unreferenced block to this BlockChanges
// and updates the size estimate.
func (bc *BlockChanges) AddUnrefBlock(ptr BlockPointer) {
if bc.sizeEstimate != 0 {
panic("Can't alter block changes after the size is estimated")
}
bc.Ops[len(bc.Ops)-1].AddUnrefBlock(ptr)
}
// AddUpdate adds the newly updated block to this BlockChanges
// and updates the size estimate.
func (bc *BlockChanges) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) {
if bc.sizeEstimate != 0 {
panic("Can't alter block changes after the size is estimated")
}
bc.Ops[len(bc.Ops)-1].AddUpdate(oldPtr, newPtr)
}
// AddOp starts a new operation for this BlockChanges. Subsequent
// Add* calls will populate this operation.
func (bc *BlockChanges) AddOp(o op) {
if bc.sizeEstimate != 0 {
panic("Can't alter block changes after the size is estimated")
}
bc.Ops = append(bc.Ops, o)
}
// SizeEstimate calculates the estimated size of the encoded version
// of this BlockChanges.
func (bc *BlockChanges) SizeEstimate() uint64 {
if bc.sizeEstimate == 0 {
for _, op := range bc.Ops {
numPtrs := len(op.Refs()) + len(op.Unrefs()) +
2*len(op.AllUpdates())
bc.sizeEstimate += uint64(numPtrs)*bpSize + op.SizeExceptUpdates()
}
}
return bc.sizeEstimate
}
// EntryType is the type of a directory entry.
type EntryType int
const (
// File is a regular file.
File EntryType = iota
// Exec is an executable file.
Exec
// Dir is a directory.
Dir
// Sym is a symbolic link.
Sym
)
// String implements the fmt.Stringer interface for EntryType
func (et EntryType) String() string {
switch et {
case File:
return "FILE"
case Exec:
return "EXEC"
case Dir:
return "DIR"
case Sym:
return "SYM"
}
return "<invalid EntryType>"
}
// Excl indicates whether O_EXCL is set on a fuse call
type Excl bool
const (
// NoExcl indicates O_EXCL is not set
NoExcl Excl = false
// WithExcl indicates O_EXCL is set
WithExcl Excl = true
)
func (o Excl) String() string {
switch o {
case NoExcl:
return "O_EXCL unset"
case WithExcl:
return "O_EXCL set"
default:
return "<invalid Excl>"
}
}
// EntryInfo is the (non-block-related) info a directory knows about
// its child.
//
// NOTE: Don't add or modify anything in this struct without
// considering how old clients will handle them (since this is
// embedded in DirEntry).
type EntryInfo struct {
Type EntryType
Size uint64
SymPath string `codec:",omitempty"` // must be within the same root dir
// Mtime is in unix nanoseconds
Mtime int64
// Ctime is in unix nanoseconds
Ctime int64
}
// extCode is used to register codec extensions
type extCode uint64
// these track the start of a range of unique extCodes for various
// types of extensions.
const (
extCodeOpsRangeStart = 1
extCodeListRangeStart = 101
)
// ReportedError represents an error reported by KBFS.
type ReportedError struct {
Time time.Time
Error error
Stack []uintptr
}
// MergeStatus represents the merge status of a TLF.
type MergeStatus int
const (
// Merged means that the TLF is merged and no conflict
// resolution needs to be done.
Merged MergeStatus = iota
// Unmerged means that the TLF is unmerged and conflict
// resolution needs to be done. Metadata blocks which
// represent unmerged history should have a non-null
// branch ID defined.
Unmerged
)
func (m MergeStatus) String() string {
switch m {
case Merged:
return "merged"
case Unmerged:
return "unmerged"
default:
return "unknown"
}
}
// UsageType indicates the type of usage that quota manager is keeping stats of
type UsageType int
const (
// UsageWrite indicates a block is written (written blocks include archived blocks)
UsageWrite UsageType = iota
// UsageArchive indicates an existing block is archived
UsageArchive
// UsageRead indicates a block is read
UsageRead
// NumUsage indicates the number of usage types
NumUsage
)
// UsageStat tracks the amount of bytes/blocks used, broken down by usage types
type UsageStat struct {
Bytes map[UsageType]int64
Blocks map[UsageType]int64
// Mtime is in unix nanoseconds
Mtime int64
}
// NewUsageStat creates a new UsageStat
func NewUsageStat() *UsageStat {
return &UsageStat{
Bytes: make(map[UsageType]int64),
Blocks: make(map[UsageType]int64),
}
}
// NonZero checks whether UsageStat has accumulated any usage info
func (u *UsageStat) NonZero() bool {
for i := UsageType(0); i < NumUsage; i++ {
if u.Bytes[i] != 0 {
return true
}
}
return false
}
//AccumOne records the usage of one block, whose size is denoted by change
//A positive change means the block is newly added, negative means the block
//is deleted. If archive is true, it means the block is archived.
func (u *UsageStat) AccumOne(change int, usage UsageType) {
if change == 0 {
return
}
if usage < UsageWrite || usage > UsageRead {
return
}
u.Bytes[usage] += int64(change)
if change > 0 {
u.Blocks[usage]++
} else {
u.Blocks[usage]--
}
}
// Accum combines changes to the existing UserQuotaInfo object using accumulation function accumF.
func (u *UsageStat) Accum(another *UsageStat, accumF func(int64, int64) int64) {
if another == nil {
return
}
for k, v := range another.Bytes {
u.Bytes[k] = accumF(u.Bytes[k], v)
}
for k, v := range another.Blocks {
u.Blocks[k] = accumF(u.Blocks[k], v)
}
}
// UserQuotaInfo contains a user's quota usage information
type UserQuotaInfo struct {
Folders map[string]*UsageStat
Total *UsageStat
Limit int64
}
// NewUserQuotaInfo returns a newly constructed UserQuotaInfo.
func NewUserQuotaInfo() *UserQuotaInfo {
return &UserQuotaInfo{
Folders: make(map[string]*UsageStat),
Total: NewUsageStat(),
}
}
// AccumOne combines one quota charge to the existing UserQuotaInfo
func (u *UserQuotaInfo) AccumOne(change int, folder string, usage UsageType) {
if _, ok := u.Folders[folder]; !ok {
u.Folders[folder] = NewUsageStat()
}
u.Folders[folder].AccumOne(change, usage)
u.Total.AccumOne(change, usage)
}
// Accum combines changes to the existing UserQuotaInfo object using accumulation function accumF.
func (u *UserQuotaInfo) Accum(another *UserQuotaInfo, accumF func(int64, int64) int64) {
if another == nil {
return
}
if u.Total == nil {
u.Total = NewUsageStat()
}
u.Total.Accum(another.Total, accumF)
for f, change := range another.Folders {
if _, ok := u.Folders[f]; !ok {
u.Folders[f] = NewUsageStat()
}
u.Folders[f].Accum(change, accumF)
}
}
// ToBytes marshals this UserQuotaInfo
func (u *UserQuotaInfo) ToBytes(config Config) ([]byte, error) {
return config.Codec().Encode(u)
}
// UserQuotaInfoDecode decodes b into a UserQuotaInfo
func UserQuotaInfoDecode(b []byte, config Config) (*UserQuotaInfo, error) {
var info UserQuotaInfo
err := config.Codec().Decode(b, &info)
if err != nil {
return nil, err
}
return &info, nil
}
// OpSummary describes the changes performed by a single op, and is
// suitable for encoding directly as JSON.
type OpSummary struct {
Op string
Refs []string
Unrefs []string
Updates map[string]string
}
// UpdateSummary describes the operations done by a single MD revision.
type UpdateSummary struct {
Revision MetadataRevision
Date time.Time
Writer string
LiveBytes uint64 // the "DiskUsage" for the TLF as of this revision
Ops []OpSummary
}
// TLFUpdateHistory gives all the summaries of all updates in a TLF's
// history.
type TLFUpdateHistory struct {
ID string
Name string
Updates []UpdateSummary
}
// writerInfo is the keybase username and device that generated the operation.
type writerInfo struct {
name libkb.NormalizedUsername
uid keybase1.UID
kid keybase1.KID
deviceName string
}
// ErrorModeType indicates what type of operation was being attempted
// when an error was reported.
type ErrorModeType int
const (
// ReadMode indicates that an error happened while trying to read.
ReadMode ErrorModeType = iota
// WriteMode indicates that an error happened while trying to write.
WriteMode
)
// UserInfoFromProtocol returns UserInfo from UserPlusKeys
func UserInfoFromProtocol(upk keybase1.UserPlusKeys) (UserInfo, error) {
verifyingKeys, cryptPublicKeys, kidNames, err := filterKeys(upk.DeviceKeys)
if err != nil {
return UserInfo{}, err
}
revokedVerifyingKeys, revokedCryptPublicKeys, revokedKidNames, err := filterRevokedKeys(upk.RevokedDeviceKeys)
if err != nil {
return UserInfo{}, err
}
for k, v := range revokedKidNames {
kidNames[k] = v
}
return UserInfo{
Name: libkb.NewNormalizedUsername(upk.Username),
UID: upk.Uid,
VerifyingKeys: verifyingKeys,
CryptPublicKeys: cryptPublicKeys,
KIDNames: kidNames,
RevokedVerifyingKeys: revokedVerifyingKeys,
RevokedCryptPublicKeys: revokedCryptPublicKeys,
}, nil
}
// SessionInfoFromProtocol returns SessionInfo from Session
func SessionInfoFromProtocol(session keybase1.Session) (SessionInfo, error) {
// Import the KIDs to validate them.
deviceSubkey, err := libkb.ImportKeypairFromKID(session.DeviceSubkeyKid)
if err != nil {
return SessionInfo{}, err
}
deviceSibkey, err := libkb.ImportKeypairFromKID(session.DeviceSibkeyKid)
if err != nil {
return SessionInfo{}, err
}
cryptPublicKey := MakeCryptPublicKey(deviceSubkey.GetKID())
verifyingKey := MakeVerifyingKey(deviceSibkey.GetKID())
return SessionInfo{
Name: libkb.NewNormalizedUsername(session.Username),
UID: keybase1.UID(session.Uid),
Token: session.Token,
CryptPublicKey: cryptPublicKey,
VerifyingKey: verifyingKey,
}, nil
}
| 1 | 12,788 | Maybe we can just put the entire `DirEntry` in here? I can imagine it might be useful for debugging to get the block ID/refnonce, encrypted size, key gen, data version, etc. What do you think? | keybase-kbfs | go |
@@ -49,9 +49,9 @@ ewmh_client_update_hints(lua_State *L)
state[i++] = _NET_WM_STATE_MODAL;
if(c->fullscreen)
state[i++] = _NET_WM_STATE_FULLSCREEN;
- if(c->maximized_vertical)
+ if(c->maximized_vertical || c->maximized)
state[i++] = _NET_WM_STATE_MAXIMIZED_VERT;
- if(c->maximized_horizontal)
+ if(c->maximized_horizontal || c->maximized)
state[i++] = _NET_WM_STATE_MAXIMIZED_HORZ;
if(c->sticky)
state[i++] = _NET_WM_STATE_STICKY; | 1 | /*
* ewmh.c - EWMH support functions
*
* Copyright © 2007-2009 Julien Danjou <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "ewmh.h"
#include "objects/client.h"
#include "objects/tag.h"
#include "common/atoms.h"
#include "xwindow.h"
#include <sys/types.h>
#include <unistd.h>
#include <xcb/xcb.h>
#include <xcb/xcb_atom.h>
#define _NET_WM_STATE_REMOVE 0
#define _NET_WM_STATE_ADD 1
#define _NET_WM_STATE_TOGGLE 2
/** Update client EWMH hints.
* \param L The Lua VM state.
*/
static int
ewmh_client_update_hints(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);
xcb_atom_t state[10]; /* number of defined state atoms */
int i = 0;
if(c->modal)
state[i++] = _NET_WM_STATE_MODAL;
if(c->fullscreen)
state[i++] = _NET_WM_STATE_FULLSCREEN;
if(c->maximized_vertical)
state[i++] = _NET_WM_STATE_MAXIMIZED_VERT;
if(c->maximized_horizontal)
state[i++] = _NET_WM_STATE_MAXIMIZED_HORZ;
if(c->sticky)
state[i++] = _NET_WM_STATE_STICKY;
if(c->skip_taskbar)
state[i++] = _NET_WM_STATE_SKIP_TASKBAR;
if(c->above)
state[i++] = _NET_WM_STATE_ABOVE;
if(c->below)
state[i++] = _NET_WM_STATE_BELOW;
if(c->minimized)
state[i++] = _NET_WM_STATE_HIDDEN;
if(c->urgent)
state[i++] = _NET_WM_STATE_DEMANDS_ATTENTION;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
c->window, _NET_WM_STATE, XCB_ATOM_ATOM, 32, i, state);
return 0;
}
static int
ewmh_update_net_active_window(lua_State *L)
{
xcb_window_t win;
if(globalconf.focus.client)
win = globalconf.focus.client->window;
else
win = XCB_NONE;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
globalconf.screen->root,
_NET_ACTIVE_WINDOW, XCB_ATOM_WINDOW, 32, 1, &win);
return 0;
}
static int
ewmh_update_net_client_list(lua_State *L)
{
xcb_window_t *wins = p_alloca(xcb_window_t, globalconf.clients.len);
int n = 0;
foreach(client, globalconf.clients)
wins[n++] = (*client)->window;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
globalconf.screen->root,
_NET_CLIENT_LIST, XCB_ATOM_WINDOW, 32, n, wins);
return 0;
}
static int
ewmh_client_update_frame_extents(lua_State *L)
{
client_t *c = luaA_checkudata(L, 1, &client_class);;
uint32_t extents[4];
extents[0] = c->border_width + c->titlebar[CLIENT_TITLEBAR_LEFT].size;
extents[1] = c->border_width + c->titlebar[CLIENT_TITLEBAR_RIGHT].size;
extents[2] = c->border_width + c->titlebar[CLIENT_TITLEBAR_TOP].size;
extents[3] = c->border_width + c->titlebar[CLIENT_TITLEBAR_BOTTOM].size;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
c->window, _NET_FRAME_EXTENTS, XCB_ATOM_CARDINAL, 32, 4, extents);
return 0;
}
void
ewmh_init(void)
{
xcb_window_t father;
xcb_screen_t *xscreen = globalconf.screen;
xcb_atom_t atom[] =
{
_NET_SUPPORTED,
_NET_SUPPORTING_WM_CHECK,
_NET_STARTUP_ID,
_NET_CLIENT_LIST,
_NET_CLIENT_LIST_STACKING,
_NET_NUMBER_OF_DESKTOPS,
_NET_CURRENT_DESKTOP,
_NET_DESKTOP_NAMES,
_NET_ACTIVE_WINDOW,
_NET_CLOSE_WINDOW,
_NET_FRAME_EXTENTS,
_NET_WM_NAME,
_NET_WM_STRUT_PARTIAL,
_NET_WM_ICON_NAME,
_NET_WM_VISIBLE_ICON_NAME,
_NET_WM_DESKTOP,
_NET_WM_WINDOW_TYPE,
_NET_WM_WINDOW_TYPE_DESKTOP,
_NET_WM_WINDOW_TYPE_DOCK,
_NET_WM_WINDOW_TYPE_TOOLBAR,
_NET_WM_WINDOW_TYPE_MENU,
_NET_WM_WINDOW_TYPE_UTILITY,
_NET_WM_WINDOW_TYPE_SPLASH,
_NET_WM_WINDOW_TYPE_DIALOG,
_NET_WM_WINDOW_TYPE_DROPDOWN_MENU,
_NET_WM_WINDOW_TYPE_POPUP_MENU,
_NET_WM_WINDOW_TYPE_TOOLTIP,
_NET_WM_WINDOW_TYPE_NOTIFICATION,
_NET_WM_WINDOW_TYPE_COMBO,
_NET_WM_WINDOW_TYPE_DND,
_NET_WM_WINDOW_TYPE_NORMAL,
_NET_WM_ICON,
_NET_WM_PID,
_NET_WM_STATE,
_NET_WM_STATE_STICKY,
_NET_WM_STATE_SKIP_TASKBAR,
_NET_WM_STATE_FULLSCREEN,
_NET_WM_STATE_MAXIMIZED_HORZ,
_NET_WM_STATE_MAXIMIZED_VERT,
_NET_WM_STATE_ABOVE,
_NET_WM_STATE_BELOW,
_NET_WM_STATE_MODAL,
_NET_WM_STATE_HIDDEN,
_NET_WM_STATE_DEMANDS_ATTENTION
};
int i;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
xscreen->root, _NET_SUPPORTED, XCB_ATOM_ATOM, 32,
countof(atom), atom);
/* create our own window */
father = xcb_generate_id(globalconf.connection);
xcb_create_window(globalconf.connection, xscreen->root_depth,
father, xscreen->root, -1, -1, 1, 1, 0,
XCB_COPY_FROM_PARENT, xscreen->root_visual, 0, NULL);
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
xscreen->root, _NET_SUPPORTING_WM_CHECK, XCB_ATOM_WINDOW, 32,
1, &father);
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
father, _NET_SUPPORTING_WM_CHECK, XCB_ATOM_WINDOW, 32,
1, &father);
/* set the window manager name */
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
father, _NET_WM_NAME, UTF8_STRING, 8, 7, "awesome");
/* Set an instance, just because we can */
xwindow_set_class_instance(father);
/* set the window manager PID */
i = getpid();
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
father, _NET_WM_PID, XCB_ATOM_CARDINAL, 32, 1, &i);
}
void
ewmh_init_lua(void)
{
lua_State *L = globalconf_get_lua_State();
luaA_class_connect_signal(L, &client_class, "focus", ewmh_update_net_active_window);
luaA_class_connect_signal(L, &client_class, "unfocus", ewmh_update_net_active_window);
luaA_class_connect_signal(L, &client_class, "manage", ewmh_update_net_client_list);
luaA_class_connect_signal(L, &client_class, "unmanage", ewmh_update_net_client_list);
luaA_class_connect_signal(L, &client_class, "property::modal" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::fullscreen" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::maximized_horizontal" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::maximized_vertical" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::sticky" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::skip_taskbar" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::above" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::below" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::minimized" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::urgent" , ewmh_client_update_hints);
luaA_class_connect_signal(L, &client_class, "property::titlebar_top" , ewmh_client_update_frame_extents);
luaA_class_connect_signal(L, &client_class, "property::titlebar_bottom" , ewmh_client_update_frame_extents);
luaA_class_connect_signal(L, &client_class, "property::titlebar_right" , ewmh_client_update_frame_extents);
luaA_class_connect_signal(L, &client_class, "property::titlebar_left" , ewmh_client_update_frame_extents);
luaA_class_connect_signal(L, &client_class, "property::border_width" , ewmh_client_update_frame_extents);
luaA_class_connect_signal(L, &client_class, "manage", ewmh_client_update_frame_extents);
/* NET_CURRENT_DESKTOP handling */
luaA_class_connect_signal(L, &client_class, "focus", ewmh_update_net_current_desktop);
luaA_class_connect_signal(L, &client_class, "unfocus", ewmh_update_net_current_desktop);
luaA_class_connect_signal(L, &client_class, "tagged", ewmh_update_net_current_desktop);
luaA_class_connect_signal(L, &client_class, "untagged", ewmh_update_net_current_desktop);
luaA_class_connect_signal(L, &tag_class, "property::selected", ewmh_update_net_current_desktop);
}
/** Set the client list in stacking order, bottom to top.
*/
void
ewmh_update_net_client_list_stacking(void)
{
int n = 0;
xcb_window_t *wins = p_alloca(xcb_window_t, globalconf.stack.len);
foreach(client, globalconf.stack)
wins[n++] = (*client)->window;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
globalconf.screen->root,
_NET_CLIENT_LIST_STACKING, XCB_ATOM_WINDOW, 32, n, wins);
}
void
ewmh_update_net_numbers_of_desktop(void)
{
uint32_t count = globalconf.tags.len;
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
globalconf.screen->root,
_NET_NUMBER_OF_DESKTOPS, XCB_ATOM_CARDINAL, 32, 1, &count);
}
int
ewmh_update_net_current_desktop(lua_State *L)
{
uint32_t idx = tags_get_current_or_first_selected_index();
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
globalconf.screen->root,
_NET_CURRENT_DESKTOP, XCB_ATOM_CARDINAL, 32, 1, &idx);
return 0;
}
void
ewmh_update_net_desktop_names(void)
{
buffer_t buf;
buffer_inita(&buf, BUFSIZ);
foreach(tag, globalconf.tags)
{
buffer_adds(&buf, tag_get_name(*tag));
buffer_addc(&buf, '\0');
}
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
globalconf.screen->root,
_NET_DESKTOP_NAMES, UTF8_STRING, 8, buf.len, buf.s);
buffer_wipe(&buf);
}
static void
ewmh_process_state_atom(client_t *c, xcb_atom_t state, int set)
{
lua_State *L = globalconf_get_lua_State();
luaA_object_push(L, c);
if(state == _NET_WM_STATE_STICKY)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_sticky(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_sticky(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_sticky(L, -1, !c->sticky);
}
else if(state == _NET_WM_STATE_SKIP_TASKBAR)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_skip_taskbar(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_skip_taskbar(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_skip_taskbar(L, -1, !c->skip_taskbar);
}
else if(state == _NET_WM_STATE_FULLSCREEN)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_fullscreen(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_fullscreen(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_fullscreen(L, -1, !c->fullscreen);
}
else if(state == _NET_WM_STATE_MAXIMIZED_HORZ)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_maximized_horizontal(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_maximized_horizontal(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_maximized_horizontal(L, -1, !c->maximized_horizontal);
}
else if(state == _NET_WM_STATE_MAXIMIZED_VERT)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_maximized_vertical(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_maximized_vertical(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_maximized_vertical(L, -1, !c->maximized_vertical);
}
else if(state == _NET_WM_STATE_ABOVE)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_above(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_above(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_above(L, -1, !c->above);
}
else if(state == _NET_WM_STATE_BELOW)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_below(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_below(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_below(L, -1, !c->below);
}
else if(state == _NET_WM_STATE_MODAL)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_modal(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_modal(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_modal(L, -1, !c->modal);
}
else if(state == _NET_WM_STATE_HIDDEN)
{
if(set == _NET_WM_STATE_REMOVE)
client_set_minimized(L, -1, false);
else if(set == _NET_WM_STATE_ADD)
client_set_minimized(L, -1, true);
else if(set == _NET_WM_STATE_TOGGLE)
client_set_minimized(L, -1, !c->minimized);
}
else if(state == _NET_WM_STATE_DEMANDS_ATTENTION)
{
if(set == _NET_WM_STATE_REMOVE) {
lua_pushboolean(L, false);
luaA_object_emit_signal(L, -2, "request::urgent", 1);
}
else if(set == _NET_WM_STATE_ADD) {
lua_pushboolean(L, true);
luaA_object_emit_signal(L, -2, "request::urgent", 1);
}
else if(set == _NET_WM_STATE_TOGGLE) {
lua_pushboolean(L, !c->urgent);
luaA_object_emit_signal(L, -2, "request::urgent", 1);
}
}
lua_pop(L, 1);
}
static void
ewmh_process_desktop(client_t *c, uint32_t desktop)
{
lua_State *L = globalconf_get_lua_State();
int idx = desktop;
if(desktop == 0xffffffff)
{
luaA_object_push(L, c);
lua_pushboolean(L, true);
luaA_object_emit_signal(L, -2, "request::tag", 1);
/* Pop the client, arguments are already popped */
lua_pop(L, 1);
}
else if (idx >= 0 && idx < globalconf.tags.len)
{
luaA_object_push(L, c);
luaA_object_push(L, globalconf.tags.tab[idx]);
luaA_object_emit_signal(L, -2, "request::tag", 1);
/* Pop the client, arguments are already popped */
lua_pop(L, 1);
}
}
int
ewmh_process_client_message(xcb_client_message_event_t *ev)
{
client_t *c;
if(ev->type == _NET_CURRENT_DESKTOP)
{
int idx = ev->data.data32[0];
if (idx >= 0 && idx < globalconf.tags.len)
{
lua_State *L = globalconf_get_lua_State();
luaA_object_push(L, globalconf.tags.tab[idx]);
luaA_object_emit_signal(L, -1, "request::select", 0);
lua_pop(L, 1);
}
}
else if(ev->type == _NET_CLOSE_WINDOW)
{
if((c = client_getbywin(ev->window)))
client_kill(c);
}
else if(ev->type == _NET_WM_DESKTOP)
{
if((c = client_getbywin(ev->window)))
{
ewmh_process_desktop(c, ev->data.data32[0]);
}
}
else if(ev->type == _NET_WM_STATE)
{
if((c = client_getbywin(ev->window)))
{
ewmh_process_state_atom(c, (xcb_atom_t) ev->data.data32[1], ev->data.data32[0]);
if(ev->data.data32[2])
ewmh_process_state_atom(c, (xcb_atom_t) ev->data.data32[2],
ev->data.data32[0]);
}
}
else if(ev->type == _NET_ACTIVE_WINDOW)
{
if((c = client_getbywin(ev->window))) {
lua_State *L = globalconf_get_lua_State();
luaA_object_push(L, c);
lua_pushstring(L, "ewmh");
/* Create table argument with raise=true. */
lua_newtable(L);
lua_pushstring(L, "raise");
lua_pushboolean(L, true);
lua_settable(L, -3);
luaA_object_emit_signal(L, -3, "request::activate", 2);
lua_pop(L, 1);
}
}
return 0;
}
/** Update the client active desktop.
* This is "wrong" since it can be on several tags, but EWMH has a strict view
* of desktop system so just take the first tag.
* \param c The client.
*/
void
ewmh_client_update_desktop(client_t *c)
{
int i;
for(i = 0; i < globalconf.tags.len; i++)
if(is_client_tagged(c, globalconf.tags.tab[i]))
{
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
c->window, _NET_WM_DESKTOP, XCB_ATOM_CARDINAL, 32, 1, &i);
return;
}
/* It doesn't have any tags, remove the property */
xcb_delete_property(globalconf.connection, c->window, _NET_WM_DESKTOP);
}
/** Update the client struts.
* \param window The window to update the struts for.
* \param strut The strut type to update the window with.
*/
void
ewmh_update_strut(xcb_window_t window, strut_t *strut)
{
if(window)
{
const uint32_t state[] =
{
strut->left,
strut->right,
strut->top,
strut->bottom,
strut->left_start_y,
strut->left_end_y,
strut->right_start_y,
strut->right_end_y,
strut->top_start_x,
strut->top_end_x,
strut->bottom_start_x,
strut->bottom_end_x
};
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
window, _NET_WM_STRUT_PARTIAL, XCB_ATOM_CARDINAL, 32, countof(state), state);
}
}
/** Update the window type.
* \param window The window to update.
* \param type The new type to set.
*/
void
ewmh_update_window_type(xcb_window_t window, uint32_t type)
{
xcb_change_property(globalconf.connection, XCB_PROP_MODE_REPLACE,
window, _NET_WM_WINDOW_TYPE, XCB_ATOM_ATOM, 32, 1, &type);
}
void
ewmh_client_check_hints(client_t *c)
{
xcb_atom_t *state;
void *data = NULL;
xcb_get_property_cookie_t c0, c1, c2;
xcb_get_property_reply_t *reply;
/* Send the GetProperty requests which will be processed later */
c0 = xcb_get_property_unchecked(globalconf.connection, false, c->window,
_NET_WM_DESKTOP, XCB_GET_PROPERTY_TYPE_ANY, 0, 1);
c1 = xcb_get_property_unchecked(globalconf.connection, false, c->window,
_NET_WM_STATE, XCB_ATOM_ATOM, 0, UINT32_MAX);
c2 = xcb_get_property_unchecked(globalconf.connection, false, c->window,
_NET_WM_WINDOW_TYPE, XCB_ATOM_ATOM, 0, UINT32_MAX);
reply = xcb_get_property_reply(globalconf.connection, c0, NULL);
if(reply && reply->value_len && (data = xcb_get_property_value(reply)))
{
ewmh_process_desktop(c, *(uint32_t *) data);
}
p_delete(&reply);
reply = xcb_get_property_reply(globalconf.connection, c1, NULL);
if(reply && (data = xcb_get_property_value(reply)))
{
state = (xcb_atom_t *) data;
for(int i = 0; i < xcb_get_property_value_length(reply) / ssizeof(xcb_atom_t); i++)
ewmh_process_state_atom(c, state[i], _NET_WM_STATE_ADD);
}
p_delete(&reply);
reply = xcb_get_property_reply(globalconf.connection, c2, NULL);
if(reply && (data = xcb_get_property_value(reply)))
{
c->has_NET_WM_WINDOW_TYPE = true;
state = (xcb_atom_t *) data;
for(int i = 0; i < xcb_get_property_value_length(reply) / ssizeof(xcb_atom_t); i++)
if(state[i] == _NET_WM_WINDOW_TYPE_DESKTOP)
c->type = MAX(c->type, WINDOW_TYPE_DESKTOP);
else if(state[i] == _NET_WM_WINDOW_TYPE_DIALOG)
c->type = MAX(c->type, WINDOW_TYPE_DIALOG);
else if(state[i] == _NET_WM_WINDOW_TYPE_SPLASH)
c->type = MAX(c->type, WINDOW_TYPE_SPLASH);
else if(state[i] == _NET_WM_WINDOW_TYPE_DOCK)
c->type = MAX(c->type, WINDOW_TYPE_DOCK);
else if(state[i] == _NET_WM_WINDOW_TYPE_MENU)
c->type = MAX(c->type, WINDOW_TYPE_MENU);
else if(state[i] == _NET_WM_WINDOW_TYPE_TOOLBAR)
c->type = MAX(c->type, WINDOW_TYPE_TOOLBAR);
else if(state[i] == _NET_WM_WINDOW_TYPE_UTILITY)
c->type = MAX(c->type, WINDOW_TYPE_UTILITY);
} else
c->has_NET_WM_WINDOW_TYPE = false;
p_delete(&reply);
}
/** Process the WM strut of a client.
* \param c The client.
*/
void
ewmh_process_client_strut(client_t *c)
{
void *data;
xcb_get_property_reply_t *strut_r;
xcb_get_property_cookie_t strut_q = xcb_get_property_unchecked(globalconf.connection, false, c->window,
_NET_WM_STRUT_PARTIAL, XCB_ATOM_CARDINAL, 0, 12);
strut_r = xcb_get_property_reply(globalconf.connection, strut_q, NULL);
if(strut_r
&& strut_r->value_len
&& (data = xcb_get_property_value(strut_r)))
{
uint32_t *strut = data;
if(c->strut.left != strut[0]
|| c->strut.right != strut[1]
|| c->strut.top != strut[2]
|| c->strut.bottom != strut[3]
|| c->strut.left_start_y != strut[4]
|| c->strut.left_end_y != strut[5]
|| c->strut.right_start_y != strut[6]
|| c->strut.right_end_y != strut[7]
|| c->strut.top_start_x != strut[8]
|| c->strut.top_end_x != strut[9]
|| c->strut.bottom_start_x != strut[10]
|| c->strut.bottom_end_x != strut[11])
{
c->strut.left = strut[0];
c->strut.right = strut[1];
c->strut.top = strut[2];
c->strut.bottom = strut[3];
c->strut.left_start_y = strut[4];
c->strut.left_end_y = strut[5];
c->strut.right_start_y = strut[6];
c->strut.right_end_y = strut[7];
c->strut.top_start_x = strut[8];
c->strut.top_end_x = strut[9];
c->strut.bottom_start_x = strut[10];
c->strut.bottom_end_x = strut[11];
lua_State *L = globalconf_get_lua_State();
luaA_object_push(L, c);
luaA_object_emit_signal(L, -1, "property::struts", 0);
lua_pop(L, 1);
}
}
p_delete(&strut_r);
}
/** Send request to get NET_WM_ICON (EWMH)
* \param w The window.
* \return The cookie associated with the request.
*/
xcb_get_property_cookie_t
ewmh_window_icon_get_unchecked(xcb_window_t w)
{
return xcb_get_property_unchecked(globalconf.connection, false, w,
_NET_WM_ICON, XCB_ATOM_CARDINAL, 0, UINT32_MAX);
}
static cairo_surface_t *
ewmh_window_icon_from_reply(xcb_get_property_reply_t *r, uint32_t preferred_size)
{
uint32_t *data, *end, *found_data = 0;
uint32_t found_size = 0;
if(!r || r->type != XCB_ATOM_CARDINAL || r->format != 32 || r->length < 2)
return 0;
data = (uint32_t *) xcb_get_property_value(r);
if (!data) return 0;
end = data + r->length;
/* Goes over the icon data and picks the icon that best matches the size preference.
* In case the size match is not exact, picks the closest bigger size if present,
* closest smaller size otherwise.
*/
while (data + 1 < end) {
/* check whether the data size specified by width and height fits into the array we got */
uint64_t data_size = (uint64_t) data[0] * data[1];
if (data_size > (uint64_t) (end - data - 2)) break;
/* use the greater of the two dimensions to match against the preferred size */
uint32_t size = MAX(data[0], data[1]);
/* pick the icon if it's a better match than the one we already have */
bool found_icon_too_small = found_size < preferred_size;
bool found_icon_too_large = found_size > preferred_size;
bool icon_empty = data[0] == 0 || data[1] == 0;
bool better_because_bigger = found_icon_too_small && size > found_size;
bool better_because_smaller = found_icon_too_large &&
size >= preferred_size && size < found_size;
if (!icon_empty && (better_because_bigger || better_because_smaller || found_size == 0))
{
found_data = data;
found_size = size;
}
data += data_size + 2;
}
if (!found_data) return 0;
return draw_surface_from_data(found_data[0], found_data[1], found_data + 2);
}
/** Get NET_WM_ICON.
* \param cookie The cookie.
* \return The number of elements on stack.
*/
cairo_surface_t *
ewmh_window_icon_get_reply(xcb_get_property_cookie_t cookie, uint32_t preferred_size)
{
xcb_get_property_reply_t *r = xcb_get_property_reply(globalconf.connection, cookie, NULL);
cairo_surface_t *surface = ewmh_window_icon_from_reply(r, preferred_size);
p_delete(&r);
return surface;
}
// vim: filetype=c:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=80
| 1 | 12,025 | For the commit message: The paragraphs seem to be out of order? The `Would not work because` refers to the stuff before, but there is a `This may seem pointless, but` in-between. | awesomeWM-awesome | c |
@@ -88,12 +88,6 @@ var (
prelude = []string{
"universe",
"influxdata/influxdb",
- "math",
- "strings",
- "regexp",
- "date",
- "json",
- "http",
}
preludeScope = &scopeSet{
packages: make([]*interpreter.Package, len(prelude)), | 1 | package flux
import (
"fmt"
"path"
"regexp"
"strings"
"time"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/interpreter"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
"github.com/pkg/errors"
)
const (
TablesParameter = "tables"
tableKindKey = "kind"
tableParentsKey = "parents"
tableSpecKey = "spec"
)
// Parse parses a Flux script and produces an ast.Package.
func Parse(flux string) (*ast.Package, error) {
astPkg := parser.ParseSource(flux)
if ast.Check(astPkg) > 0 {
return nil, ast.GetError(astPkg)
}
return astPkg, nil
}
// Eval accepts a Flux script and evaluates it to produce a set of side effects (as a slice of values) and a scope.
func Eval(flux string, opts ...ScopeMutator) ([]interpreter.SideEffect, interpreter.Scope, error) {
astPkg, err := Parse(flux)
if err != nil {
return nil, nil, err
}
return EvalAST(astPkg, opts...)
}
// EvalAST accepts a Flux AST and evaluates it to produce a set of side effects (as a slice of values) and a scope.
func EvalAST(astPkg *ast.Package, opts ...ScopeMutator) ([]interpreter.SideEffect, interpreter.Scope, error) {
semPkg, err := semantic.New(astPkg)
if err != nil {
return nil, nil, err
}
itrp := interpreter.NewInterpreter()
universe := Prelude()
for _, opt := range opts {
opt(universe)
}
sideEffects, err := itrp.Eval(semPkg, universe, StdLib())
if err != nil {
return nil, nil, err
}
return sideEffects, universe, nil
}
// ScopeMutator is any function that mutates the scope of an identifier.
type ScopeMutator = func(interpreter.Scope)
// SetOption returns a func that adds a var binding to a scope.
func SetOption(name string, v values.Value) ScopeMutator {
return func(scope interpreter.Scope) {
scope.Set(name, v)
}
}
type CreateOperationSpec func(args Arguments, a *Administration) (OperationSpec, error)
// set of builtins
var (
finalized bool
builtinPackages = make(map[string]*ast.Package)
// list of packages included in the prelude.
// Packages must be listed in import order
prelude = []string{
"universe",
"influxdata/influxdb",
"math",
"strings",
"regexp",
"date",
"json",
"http",
}
preludeScope = &scopeSet{
packages: make([]*interpreter.Package, len(prelude)),
}
stdlib = &importer{make(map[string]*interpreter.Package)}
)
type scopeSet struct {
packages []*interpreter.Package
}
func (s *scopeSet) Lookup(name string) (values.Value, bool) {
for _, pkg := range s.packages {
if v, ok := pkg.Get(name); ok {
return v, ok
}
}
return nil, false
}
func (s *scopeSet) Set(name string, v values.Value) {
panic("cannot mutate the universe block")
}
func (s *scopeSet) Nest(obj values.Object) interpreter.Scope {
return interpreter.NewNestedScope(s, obj)
}
func (s *scopeSet) Pop() interpreter.Scope {
return nil
}
func (s *scopeSet) Size() int {
var size int
for _, pkg := range s.packages {
size += pkg.Len()
}
return size
}
func (s *scopeSet) Range(f func(k string, v values.Value)) {
for _, pkg := range s.packages {
pkg.Range(f)
}
}
func (s *scopeSet) LocalRange(f func(k string, v values.Value)) {
for _, pkg := range s.packages {
if pkg == nil {
panic(`nil package in scope; try importing "github.com/influxdata/flux/builtin"`)
}
pkg.Range(f)
}
}
func (s *scopeSet) SetReturn(v values.Value) {
panic("cannot set return value on universe block")
}
func (s *scopeSet) Return() values.Value {
return nil
}
func (s *scopeSet) Copy() interpreter.Scope {
packages := make([]*interpreter.Package, len(s.packages))
for i, pkg := range s.packages {
packages[i] = pkg.Copy()
}
return &scopeSet{packages}
}
// StdLib returns an importer for the Flux standard library.
func StdLib() interpreter.Importer {
return stdlib.Copy()
}
// Prelude returns a scope object representing the Flux universe block
func Prelude() interpreter.Scope {
return preludeScope.Nest(nil)
}
// RegisterPackage adds a builtin package
func RegisterPackage(pkg *ast.Package) {
if finalized {
panic(errors.New("already finalized, cannot register builtin package"))
}
if _, ok := builtinPackages[pkg.Path]; ok {
panic(fmt.Errorf("duplicate builtin package %q", pkg.Path))
}
builtinPackages[pkg.Path] = pkg
_, ok := stdlib.pkgs[pkg.Path]
if !ok {
// Lazy creation of interpreter package
// registration order is not known so we must create it lazily
stdlib.pkgs[pkg.Path] = interpreter.NewPackage(path.Base(pkg.Path))
}
}
// RegisterPackageValue adds a value for an identifier in a builtin package
func RegisterPackageValue(pkgpath, name string, value values.Value) {
registerPackageValue(pkgpath, name, value, false)
}
// ReplacePackageValue replaces a value for an identifier in a builtin package
func ReplacePackageValue(pkgpath, name string, value values.Value) {
registerPackageValue(pkgpath, name, value, true)
}
func registerPackageValue(pkgpath, name string, value values.Value, replace bool) {
if finalized {
panic(errors.New("already finalized, cannot register builtin package value"))
}
packg, ok := stdlib.pkgs[pkgpath]
if !ok {
// Lazy creation of interpreter package
// registration order is not known so we must create it lazily
packg = interpreter.NewPackage(path.Base(pkgpath))
stdlib.pkgs[pkgpath] = packg
}
if _, ok := packg.Get(name); ok && !replace {
panic(fmt.Errorf("duplicate builtin package value %q %q", pkgpath, name))
} else if !ok && replace {
panic(fmt.Errorf("missing builtin package value %q %q", pkgpath, name))
}
packg.Set(name, value)
}
// FunctionValue creates a values.Value from the operation spec and signature.
// Name is the name of the function as it would be called.
// c is a function reference of type CreateOperationSpec
// sig is a function signature type that specifies the names and types of each argument for the function.
func FunctionValue(name string, c CreateOperationSpec, sig semantic.FunctionPolySignature) values.Value {
return functionValue(name, c, sig, false)
}
// FunctionValueWithSideEffect creates a values.Value from the operation spec and signature.
// Name is the name of the function as it would be called.
// c is a function reference of type CreateOperationSpec
// sig is a function signature type that specifies the names and types of each argument for the function.
func FunctionValueWithSideEffect(name string, c CreateOperationSpec, sig semantic.FunctionPolySignature) values.Value {
return functionValue(name, c, sig, true)
}
func functionValue(name string, c CreateOperationSpec, sig semantic.FunctionPolySignature, sideEffects bool) values.Value {
if c == nil {
c = func(args Arguments, a *Administration) (OperationSpec, error) {
return nil, fmt.Errorf("function %q is not implemented", name)
}
}
return &function{
t: semantic.NewFunctionPolyType(sig),
name: name,
createOpSpec: c,
hasSideEffect: sideEffects,
}
}
// FinalizeBuiltIns must be called to complete registration.
// Future calls to RegisterFunction or RegisterPackageValue will panic.
func FinalizeBuiltIns() {
if finalized {
panic("already finalized")
}
finalized = true
for i, path := range prelude {
pkg, ok := stdlib.ImportPackageObject(path)
if !ok {
panic(fmt.Sprintf("missing prelude package %q", path))
}
preludeScope.packages[i] = pkg
}
if err := evalBuiltInPackages(); err != nil {
panic(err)
}
}
func evalBuiltInPackages() error {
order, err := packageOrder(prelude, builtinPackages)
if err != nil {
return err
}
for _, astPkg := range order {
if ast.Check(astPkg) > 0 {
err := ast.GetError(astPkg)
return errors.Wrapf(err, "failed to parse builtin package %q", astPkg.Path)
}
semPkg, err := semantic.New(astPkg)
if err != nil {
return errors.Wrapf(err, "failed to create semantic graph for builtin package %q", astPkg.Path)
}
pkg := stdlib.pkgs[astPkg.Path]
if pkg == nil {
return errors.Wrapf(err, "package does not exist %q", astPkg.Path)
}
// Validate packages before evaluating them
if err := validatePackageBuiltins(pkg, astPkg); err != nil {
return errors.Wrapf(err, "package has invalid builtins %q", astPkg.Path)
}
itrp := interpreter.NewInterpreter()
if _, err := itrp.Eval(semPkg, preludeScope.Nest(pkg), stdlib); err != nil {
return errors.Wrapf(err, "failed to evaluate builtin package %q", astPkg.Path)
}
}
return nil
}
// validatePackageBuiltins ensures that all package builtins have both an AST builtin statement and a registered value.
func validatePackageBuiltins(pkg *interpreter.Package, astPkg *ast.Package) error {
builtinStmts := make(map[string]*ast.BuiltinStatement)
ast.Walk(ast.CreateVisitor(func(n ast.Node) {
if bs, ok := n.(*ast.BuiltinStatement); ok {
builtinStmts[bs.ID.Name] = bs
}
}), astPkg)
missing := make([]string, 0, len(builtinStmts))
extra := make([]string, 0, len(builtinStmts))
for n := range builtinStmts {
if _, ok := pkg.Get(n); !ok {
missing = append(missing, n)
continue
}
// TODO(nathanielc): Ensure that the value's type matches the type expression
}
pkg.Range(func(k string, v values.Value) {
if _, ok := builtinStmts[k]; !ok {
extra = append(extra, k)
return
}
})
if len(missing) > 0 || len(extra) > 0 {
return fmt.Errorf("missing builtin values %v, extra builtin values %v", missing, extra)
}
return nil
}
var TableObjectType = semantic.NewObjectPolyType(
//TODO: When values.Value support polytyped values, we can add the commented fields back in
map[string]semantic.PolyType{
tableKindKey: semantic.String,
//tableSpecKey: semantic.Tvar(1),
//tableParentsKey: semantic.Tvar(2),
},
nil,
//semantic.LabelSet{tableKindKey, tableSpecKey, tableParentsKey},
semantic.LabelSet{tableKindKey},
)
var _ = tableSpecKey // So that linter doesn't think tableSpecKey is unused, considering above TODO.
var TableObjectMonoType semantic.Type
func init() {
TableObjectMonoType, _ = TableObjectType.MonoType()
}
// IDer produces the mapping of table Objects to OperationIDs
type IDer interface {
ID(*TableObject) OperationID
}
// IDerOpSpec is the interface any operation spec that needs
// access to OperationIDs in the query spec must implement.
type IDerOpSpec interface {
IDer(ider IDer)
}
// TableObject represents the value returned by a transformation.
// As such, it holds the OperationSpec of the transformation it is associated with,
// and it is a values.Value (and, also, a values.Object).
// It can be compiled and executed as a flux.Program by using a lang.TableObjectCompiler.
type TableObject struct {
// TODO(Josh): Remove args once the
// OperationSpec interface has an Equal method.
args Arguments
Kind OperationKind
Spec OperationSpec
Parents values.Array
}
func (t *TableObject) Operation(ider IDer) *Operation {
if iderOpSpec, ok := t.Spec.(IDerOpSpec); ok {
iderOpSpec.IDer(ider)
}
return &Operation{
ID: ider.ID(t),
Spec: t.Spec,
}
}
func (t *TableObject) IsNull() bool {
return false
}
func (t *TableObject) String() string {
str := new(strings.Builder)
t.str(str, false)
return str.String()
}
func (t *TableObject) str(b *strings.Builder, arrow bool) {
multiParent := t.Parents.Len() > 1
if multiParent {
b.WriteString("( ")
}
t.Parents.Range(func(i int, p values.Value) {
parent := p.Object().(*TableObject)
parent.str(b, !multiParent)
if multiParent {
b.WriteString("; ")
}
})
if multiParent {
b.WriteString(" ) -> ")
}
b.WriteString(string(t.Kind))
if arrow {
b.WriteString(" -> ")
}
}
func (t *TableObject) Type() semantic.Type {
typ, _ := TableObjectType.MonoType()
return typ
}
func (t *TableObject) PolyType() semantic.PolyType {
return TableObjectType
}
func (t *TableObject) Str() string {
panic(values.UnexpectedKind(semantic.Object, semantic.String))
}
func (t *TableObject) Bytes() []byte {
panic(values.UnexpectedKind(semantic.Object, semantic.Bytes))
}
func (t *TableObject) Int() int64 {
panic(values.UnexpectedKind(semantic.Object, semantic.Int))
}
func (t *TableObject) UInt() uint64 {
panic(values.UnexpectedKind(semantic.Object, semantic.UInt))
}
func (t *TableObject) Float() float64 {
panic(values.UnexpectedKind(semantic.Object, semantic.Float))
}
func (t *TableObject) Bool() bool {
panic(values.UnexpectedKind(semantic.Object, semantic.Bool))
}
func (t *TableObject) Time() values.Time {
panic(values.UnexpectedKind(semantic.Object, semantic.Time))
}
func (t *TableObject) Duration() values.Duration {
panic(values.UnexpectedKind(semantic.Object, semantic.Duration))
}
func (t *TableObject) Regexp() *regexp.Regexp {
panic(values.UnexpectedKind(semantic.Object, semantic.Regexp))
}
func (t *TableObject) Array() values.Array {
panic(values.UnexpectedKind(semantic.Object, semantic.Array))
}
func (t *TableObject) Object() values.Object {
return t
}
func (t *TableObject) Equal(rhs values.Value) bool {
if t.Type() != rhs.Type() {
return false
}
r := rhs.Object()
if t.Len() != r.Len() {
return false
}
var isEqual = true
// Range over both TableObjects and
// compare their properties for equality
t.Range(func(k string, v values.Value) {
w, ok := r.Get(k)
isEqual = isEqual && ok && v.Equal(w)
})
return isEqual
}
func (t *TableObject) Function() values.Function {
panic(values.UnexpectedKind(semantic.Object, semantic.Function))
}
func (t *TableObject) Get(name string) (values.Value, bool) {
switch name {
case tableKindKey:
return values.NewString(string(t.Kind)), true
case tableParentsKey:
return t.Parents, true
default:
return t.args.Get(name)
}
}
func (t *TableObject) Set(name string, v values.Value) {
// immutable
}
func (t *TableObject) Len() int {
return len(t.args.GetAll()) + 2
}
func (t *TableObject) Range(f func(name string, v values.Value)) {
for _, arg := range t.args.GetAll() {
val, _ := t.args.Get(arg)
f(arg, val)
}
f(tableKindKey, values.NewString(string(t.Kind)))
f(tableParentsKey, t.Parents)
}
// FunctionSignature returns a standard functions signature which accepts a table piped argument,
// with any additional arguments.
func FunctionSignature(parameters map[string]semantic.PolyType, required []string) semantic.FunctionPolySignature {
if parameters == nil {
parameters = make(map[string]semantic.PolyType)
}
parameters[TablesParameter] = TableObjectType
return semantic.FunctionPolySignature{
Parameters: parameters,
Required: semantic.LabelSet(required),
Return: TableObjectType,
PipeArgument: TablesParameter,
}
}
// BuiltIns returns a copy of the builtin values and their declarations.
func BuiltIns() map[string]values.Value {
if !finalized {
panic("builtins not finalized")
}
cpy := make(map[string]values.Value, preludeScope.Size())
preludeScope.Range(func(k string, v values.Value) {
cpy[k] = v
})
return cpy
}
type Administration struct {
parents values.Array
}
func newAdministration() *Administration {
return &Administration{
// TODO(nathanielc): Once we can support recursive types change this to,
// interpreter.NewArray(TableObjectType)
parents: values.NewArray(semantic.EmptyObject),
}
}
// AddParentFromArgs reads the args for the `table` argument and adds the value as a parent.
func (a *Administration) AddParentFromArgs(args Arguments) error {
parent, err := args.GetRequiredObject(TablesParameter)
if err != nil {
return err
}
p, ok := parent.(*TableObject)
if !ok {
return fmt.Errorf("argument is not a table object: got %T", parent)
}
a.AddParent(p)
return nil
}
// AddParent instructs the evaluation Context that a new edge should be created from the parent to the current operation.
// Duplicate parents will be removed, so the caller need not concern itself with which parents have already been added.
func (a *Administration) AddParent(np *TableObject) {
// Check for duplicates
found := false
a.parents.Range(func(i int, v values.Value) {
if p, ok := v.(*TableObject); ok && p == np {
found = true
}
})
if !found {
a.parents.Append(np)
}
}
type function struct {
name string
t semantic.PolyType
createOpSpec CreateOperationSpec
hasSideEffect bool
}
func (f *function) Type() semantic.Type {
// TODO(nathanielc): Update values.Value interface to use PolyTypes
t, _ := f.t.MonoType()
return t
}
func (f *function) PolyType() semantic.PolyType {
return f.t
}
func (f *function) IsNull() bool {
return false
}
func (f *function) Str() string {
panic(values.UnexpectedKind(semantic.Function, semantic.String))
}
func (f *function) Bytes() []byte {
panic(values.UnexpectedKind(semantic.Function, semantic.Bytes))
}
func (f *function) Int() int64 {
panic(values.UnexpectedKind(semantic.Function, semantic.Int))
}
func (f *function) UInt() uint64 {
panic(values.UnexpectedKind(semantic.Function, semantic.UInt))
}
func (f *function) Float() float64 {
panic(values.UnexpectedKind(semantic.Function, semantic.Float))
}
func (f *function) Bool() bool {
panic(values.UnexpectedKind(semantic.Function, semantic.Bool))
}
func (f *function) Time() values.Time {
panic(values.UnexpectedKind(semantic.Function, semantic.Time))
}
func (f *function) Duration() values.Duration {
panic(values.UnexpectedKind(semantic.Function, semantic.Duration))
}
func (f *function) Regexp() *regexp.Regexp {
panic(values.UnexpectedKind(semantic.Function, semantic.Regexp))
}
func (f *function) Array() values.Array {
panic(values.UnexpectedKind(semantic.Function, semantic.Array))
}
func (f *function) Object() values.Object {
panic(values.UnexpectedKind(semantic.Function, semantic.Object))
}
func (f *function) Function() values.Function {
return f
}
func (f *function) Equal(rhs values.Value) bool {
if f.Type() != rhs.Type() {
return false
}
v, ok := rhs.(*function)
return ok && (f == v)
}
func (f *function) HasSideEffect() bool {
return f.hasSideEffect
}
func (f *function) Call(argsObj values.Object) (values.Value, error) {
return interpreter.DoFunctionCall(f.call, argsObj)
}
func (f *function) call(args interpreter.Arguments) (values.Value, error) {
a := newAdministration()
arguments := Arguments{Arguments: args}
spec, err := f.createOpSpec(arguments, a)
if err != nil {
return nil, err
}
t := &TableObject{
args: arguments,
Kind: spec.Kind(),
Spec: spec,
Parents: a.parents,
}
return t, nil
}
func (f *function) String() string {
return fmt.Sprintf("%v", f.t)
}
type Arguments struct {
interpreter.Arguments
}
func (a Arguments) GetTime(name string) (Time, bool, error) {
v, ok := a.Get(name)
if !ok {
return Time{}, false, nil
}
qt, err := ToQueryTime(v)
if err != nil {
return Time{}, ok, err
}
return qt, ok, nil
}
func (a Arguments) GetRequiredTime(name string) (Time, error) {
qt, ok, err := a.GetTime(name)
if err != nil {
return Time{}, err
}
if !ok {
return Time{}, fmt.Errorf("missing required keyword argument %q", name)
}
return qt, nil
}
func (a Arguments) GetDuration(name string) (Duration, bool, error) {
v, ok := a.Get(name)
if !ok {
return 0, false, nil
}
return Duration(v.Duration()), true, nil
}
func (a Arguments) GetRequiredDuration(name string) (Duration, error) {
d, ok, err := a.GetDuration(name)
if err != nil {
return 0, err
}
if !ok {
return 0, fmt.Errorf("missing required keyword argument %q", name)
}
return d, nil
}
func ToQueryTime(value values.Value) (Time, error) {
switch value.Type().Nature() {
case semantic.Time:
return Time{
Absolute: value.Time().Time(),
}, nil
case semantic.Duration:
return Time{
Relative: value.Duration().Duration(),
IsRelative: true,
}, nil
case semantic.Int:
return Time{
Absolute: time.Unix(value.Int(), 0),
}, nil
default:
return Time{}, fmt.Errorf("value is not a time, got %v", value.Type())
}
}
type importer struct {
pkgs map[string]*interpreter.Package
}
func (imp *importer) Copy() *importer {
packages := make(map[string]*interpreter.Package, len(imp.pkgs))
for k, v := range imp.pkgs {
packages[k] = v.Copy()
}
return &importer{
pkgs: packages,
}
}
func (imp *importer) Import(path string) (semantic.PackageType, bool) {
p, ok := imp.pkgs[path]
if !ok {
return semantic.PackageType{}, false
}
return semantic.PackageType{
Name: p.Name(),
Type: p.PolyType(),
}, true
}
func (imp *importer) ImportPackageObject(path string) (*interpreter.Package, bool) {
p, ok := imp.pkgs[path]
return p, ok
}
// packageOrder determines a safe order to process builtin packages such that all dependent packages are previously processed.
func packageOrder(prelude []string, pkgs map[string]*ast.Package) (order []*ast.Package, err error) {
//TODO(nathanielc): Add import cycle detection, this is not needed until this code is promoted to work with third party imports
// Always import prelude first so other packages need not explicitly import the prelude packages.
for _, path := range prelude {
pkg := pkgs[path]
order, err = insertPkg(pkg, pkgs, order)
if err != nil {
return
}
}
// Import all other packages
for _, pkg := range pkgs {
order, err = insertPkg(pkg, pkgs, order)
if err != nil {
return
}
}
return
}
func insertPkg(pkg *ast.Package, pkgs map[string]*ast.Package, order []*ast.Package) (_ []*ast.Package, err error) {
imports := findImports(pkg)
for _, path := range imports {
dep, ok := pkgs[path]
if !ok {
return nil, fmt.Errorf("unknown builtin package %q", path)
}
order, err = insertPkg(dep, pkgs, order)
if err != nil {
return nil, err
}
}
return appendPkg(pkg, order), nil
}
func appendPkg(pkg *ast.Package, pkgs []*ast.Package) []*ast.Package {
if containsPkg(pkg.Path, pkgs) {
return pkgs
}
return append(pkgs, pkg)
}
func containsPkg(path string, pkgs []*ast.Package) bool {
for _, pkg := range pkgs {
if pkg.Path == path {
return true
}
}
return false
}
func findImports(pkg *ast.Package) (imports []string) {
for _, f := range pkg.Files {
for _, i := range f.Imports {
imports = append(imports, i.Path.Value)
}
}
return
}
| 1 | 11,417 | Why did you remove these packages from the prelude, will your new functions not work without this change? | influxdata-flux | go |
@@ -36,10 +36,14 @@ GetProcessor::asyncProcess(PartitionID part,
const std::vector<std::string>& keys) {
folly::Promise<std::pair<PartitionID, kvstore::ResultCode>> promise;
auto future = promise.getFuture();
+ std::vector<std::string> kvKeys;
+ std::transform(keys.begin(), keys.end(), std::back_inserter(kvKeys),
+ [part](const auto& key) { return NebulaKeyUtils::kvKey(part, key); });
- executor_->add([this, p = std::move(promise), part, keys] () mutable {
+ executor_->add([this, p = std::move(promise), part, &keys,
+ kvKeys = std::move(kvKeys)] () mutable {
std::vector<std::string> values;
- auto ret = this->kvstore_->multiGet(space_, part, keys, &values);
+ auto ret = this->kvstore_->multiGet(space_, part, kvKeys, &values);
if (ret == kvstore::ResultCode::SUCCEEDED) {
std::lock_guard<std::mutex> lg(this->lock_);
for (int32_t i = 0; i < static_cast<int32_t>(keys.size()); i++) { | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "storage/GetProcessor.h"
#include "base/NebulaKeyUtils.h"
namespace nebula {
namespace storage {
void GetProcessor::process(const cpp2::GetRequest& req) {
space_ = req.get_space_id();
std::vector<folly::Future<std::pair<PartitionID, kvstore::ResultCode>>> results;
for (auto& part : req.get_parts()) {
results.emplace_back(asyncProcess(part.first, part.second));
}
folly::collectAll(results).via(executor_)
.then([&] (const std::vector<folly::Try<PartCode>>& tries) mutable {
for (const auto& t : tries) {
auto ret = t.value();
auto part = std::get<0>(ret);
auto resultCode = std::get<1>(ret);
this->pushResultCode(this->to(resultCode), part);
}
resp_.set_values(std::move(pairs_));
this->onFinished();
});
}
folly::Future<std::pair<PartitionID, kvstore::ResultCode>>
GetProcessor::asyncProcess(PartitionID part,
const std::vector<std::string>& keys) {
folly::Promise<std::pair<PartitionID, kvstore::ResultCode>> promise;
auto future = promise.getFuture();
executor_->add([this, p = std::move(promise), part, keys] () mutable {
std::vector<std::string> values;
auto ret = this->kvstore_->multiGet(space_, part, keys, &values);
if (ret == kvstore::ResultCode::SUCCEEDED) {
std::lock_guard<std::mutex> lg(this->lock_);
for (int32_t i = 0; i < static_cast<int32_t>(keys.size()); i++) {
pairs_.emplace(keys[i], values[i]);
}
}
p.setValue(std::make_pair(part, ret));
});
return future;
}
} // namespace storage
} // namespace nebula
| 1 | 23,451 | We'd better reserve enough space before using kvKeys to avoid extra malloc. | vesoft-inc-nebula | cpp |
@@ -213,3 +213,12 @@ func GetLocalPVType(pv *v1.PersistentVolume) string {
}
return ""
}
+
+// GetNodeHostname extracts the Hostname from the labels on the Node
+func GetNodeHostname(n *v1.Node) string {
+ hostname, found := n.Labels[KeyNodeHostname]
+ if found {
+ return hostname
+ }
+ return n.Name
+} | 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
//"fmt"
//"path/filepath"
"strings"
"github.com/golang/glog"
mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
cast "github.com/openebs/maya/pkg/castemplate/v1alpha1"
hostpath "github.com/openebs/maya/pkg/hostpath/v1alpha1"
"github.com/openebs/maya/pkg/util"
//"github.com/pkg/errors"
errors "github.com/openebs/maya/pkg/errors/v1alpha1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//storagev1 "k8s.io/api/storage/v1"
)
const (
//KeyPVStorageType defines if the PV should be backed
// a hostpath ( sub directory or a storage device)
KeyPVStorageType = "StorageType"
//KeyPVBasePath defines base directory for hostpath volumes
// can be configured via the StorageClass annotations.
KeyPVBasePath = "BasePath"
//KeyPVFSType defines filesystem type to be used with devices
// and can be configured via the StorageClass annotations.
KeyPVFSType = "FSType"
//KeyPVRelativePath defines the alternate folder name under the BasePath
// By default, the pv name will be used as the folder name.
// KeyPVBasePath can be useful for providing the same underlying folder
// name for all replicas in a Statefulset.
// Will be a property of the PVC annotations.
//KeyPVRelativePath = "RelativePath"
//KeyPVAbsolutePath specifies a complete hostpath instead of
// auto-generating using BasePath and RelativePath. This option
// is specified with PVC and is useful for granting shared access
// to underlying hostpaths across multiple pods.
//KeyPVAbsolutePath = "AbsolutePath"
)
const (
// Some of the PVCs launched with older helm charts, still
// refer to the StorageClass via beta annotations.
betaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
)
//GetVolumeConfig creates a new VolumeConfig struct by
// parsing and merging the configuration provided in the PVC
// annotation - cas.openebs.io/config with the
// default configuration of the provisioner.
func (p *Provisioner) GetVolumeConfig(pvName string, pvc *v1.PersistentVolumeClaim) (*VolumeConfig, error) {
pvConfig := p.defaultConfig
//Fetch the SC
scName := GetStorageClassName(pvc)
sc, err := p.kubeClient.StorageV1().StorageClasses().Get(*scName, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrapf(err, "failed to get storageclass: missing sc name {%v}", scName)
}
// extract and merge the cas config from storageclass
scCASConfigStr := sc.ObjectMeta.Annotations[string(mconfig.CASConfigKey)]
glog.Infof("SC %v has config:%v", *scName, scCASConfigStr)
if len(strings.TrimSpace(scCASConfigStr)) != 0 {
scCASConfig, err := cast.UnMarshallToConfig(scCASConfigStr)
if err == nil {
pvConfig = cast.MergeConfig(scCASConfig, pvConfig)
} else {
return nil, errors.Wrapf(err, "failed to get config: invalid sc config {%v}", scCASConfigStr)
}
}
//TODO : extract and merge the cas volume config from pvc
//This block can be added once validation checks are added
// as to the type of config that can be passed via PVC
//pvcCASConfigStr := pvc.ObjectMeta.Annotations[string(mconfig.CASConfigKey)]
//if len(strings.TrimSpace(pvcCASConfigStr)) != 0 {
// pvcCASConfig, err := cast.UnMarshallToConfig(pvcCASConfigStr)
// if err == nil {
// pvConfig = cast.MergeConfig(pvcCASConfig, pvConfig)
// }
//}
pvConfigMap, err := cast.ConfigToMap(pvConfig)
if err != nil {
return nil, errors.Wrapf(err, "unable to read volume config: pvc {%v}", pvc.ObjectMeta.Name)
}
c := &VolumeConfig{
pvName: pvName,
pvcName: pvc.ObjectMeta.Name,
scName: *scName,
options: pvConfigMap,
}
return c, nil
}
//GetStorageType returns the StorageType value configured
// in StorageClass. Default is hostpath
func (c *VolumeConfig) GetStorageType() string {
stgType := c.getValue(KeyPVStorageType)
if len(strings.TrimSpace(stgType)) == 0 {
return "hostpath"
}
return stgType
}
//GetFSType returns the FSType value configured
// in StorageClass. Default is "", auto-determined
// by Local PV
func (c *VolumeConfig) GetFSType() string {
fsType := c.getValue(KeyPVFSType)
if len(strings.TrimSpace(fsType)) == 0 {
return ""
}
return fsType
}
//GetPath returns a valid PV path based on the configuration
// or an error. The Path is constructed using the following rules:
// If AbsolutePath is specified return it. (Future)
// If PVPath is specified, suffix it with BasePath and return it. (Future)
// If neither of above are specified, suffix the PVName to BasePath
// and return it
// Also before returning the path, validate that path is safe
// and matches the filters specified in StorageClass.
func (c *VolumeConfig) GetPath() (string, error) {
//This feature need to be supported with some more
// security checks are in place, so that rouge pods
// don't get access to node directories.
//absolutePath := c.getValue(KeyPVAbsolutePath)
//if len(strings.TrimSpace(absolutePath)) != 0 {
// return c.validatePath(absolutePath)
//}
basePath := c.getValue(KeyPVBasePath)
if strings.TrimSpace(basePath) == "" {
return "", errors.Errorf("failed to get path: base path is empty")
}
//This feature need to be supported after the
// security checks are in place.
//pvRelPath := c.getValue(KeyPVRelativePath)
//if len(strings.TrimSpace(pvRelPath)) == 0 {
// pvRelPath = c.pvName
//}
pvRelPath := c.pvName
//path := filepath.Join(basePath, pvRelPath)
return hostpath.NewBuilder().
WithPathJoin(basePath, pvRelPath).
WithCheckf(hostpath.IsNonRoot(), "path should not be a root directory: %s/%s", basePath, pvRelPath).
ValidateAndBuild()
}
//getValue is a utility function to extract the value
// of the `key` from the ConfigMap object - which is
// map[string]interface{map[string][string]}
// Example:
// {
// key1: {
// value: value1
// enabled: true
// }
// }
// In the above example, if `key1` is passed as input,
// `value1` will be returned.
func (c *VolumeConfig) getValue(key string) string {
if configObj, ok := util.GetNestedField(c.options, key).(map[string]string); ok {
if val, p := configObj[string(mconfig.ValuePTP)]; p {
return val
}
}
return ""
}
// GetStorageClassName extracts the StorageClass name from PVC
func GetStorageClassName(pvc *v1.PersistentVolumeClaim) *string {
// Use beta annotation first
class, found := pvc.Annotations[betaStorageClassAnnotation]
if found {
return &class
}
return pvc.Spec.StorageClassName
}
// GetLocalPVType extracts the Local PV Type from PV
func GetLocalPVType(pv *v1.PersistentVolume) string {
casType, found := pv.Labels[string(mconfig.CASTypeKey)]
if found {
return casType
}
return ""
}
| 1 | 17,252 | would it make sense to return empty or error if label doesn't exists? | openebs-maya | go |
@@ -38,13 +38,15 @@ var PATH_SEPARATOR = process.platform === 'win32' ? ';' : ':';
exports.rmDir = function(path) {
return new promise.Promise(function(fulfill, reject) {
var numAttempts = 0;
+ var maxAttempts = 5;
+ var attemptTimeout = 250;
attemptRm();
function attemptRm() {
numAttempts += 1;
rimraf(path, function(err) {
if (err) {
- if (err.code === 'ENOTEMPTY' && numAttempts < 2) {
- attemptRm();
+ if (err.code === 'ENOTEMPTY' && numAttempts <= maxAttempts) {
+ setTimeout(attemptRm, attemptTimeout);
return;
}
reject(err); | 1 | // Copyright 2013 Selenium committers
// Copyright 2013 Software Freedom Conservancy
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var fs = require('fs'),
path = require('path'),
rimraf = require('rimraf'),
tmp = require('tmp');
var promise = require('..').promise;
var PATH_SEPARATOR = process.platform === 'win32' ? ';' : ':';
// PUBLIC API
/**
* Recursively removes a directory and all of its contents. This is equivalent
* to {@code rm -rf} on a POSIX system.
* @param {string} path Path to the directory to remove.
* @return {!promise.Promise} A promise to be resolved when the operation has
* completed.
*/
exports.rmDir = function(path) {
return new promise.Promise(function(fulfill, reject) {
var numAttempts = 0;
attemptRm();
function attemptRm() {
numAttempts += 1;
rimraf(path, function(err) {
if (err) {
if (err.code === 'ENOTEMPTY' && numAttempts < 2) {
attemptRm();
return;
}
reject(err);
} else {
fulfill();
}
});
}
});
};
/**
* Copies one file to another.
* @param {string} src The source file.
* @param {string} dst The destination file.
* @return {!promise.Promise.<string>} A promise for the copied file's path.
*/
exports.copy = function(src, dst) {
var copied = promise.defer();
var rs = fs.createReadStream(src);
rs.on('error', copied.reject);
rs.on('end', function() {
copied.fulfill(dst);
});
var ws = fs.createWriteStream(dst);
ws.on('error', copied.reject);
rs.pipe(ws);
return copied.promise;
};
/**
* Recursively copies the contents of one directory to another.
* @param {string} src The source directory to copy.
* @param {string} dst The directory to copy into.
* @param {(RegEx|function(string): boolean)=} opt_exclude An exclusion filter
* as either a regex or predicate function. All files matching this filter
* will not be copied.
* @return {!promise.Promise.<string>} A promise for the destination
* directory's path once all files have been copied.
*/
exports.copyDir = function(src, dst, opt_exclude) {
var predicate = opt_exclude;
if (opt_exclude && typeof opt_exclude !== 'function') {
predicate = function(p) {
return !opt_exclude.test(p);
};
}
// TODO(jleyba): Make this function completely async.
if (!fs.existsSync(dst)) {
fs.mkdirSync(dst);
}
var files = fs.readdirSync(src);
files = files.map(function(file) {
return path.join(src, file);
});
if (predicate) {
files = files.filter(predicate);
}
var results = [];
files.forEach(function(file) {
var stats = fs.statSync(file);
var target = path.join(dst, path.basename(file));
if (stats.isDirectory()) {
if (!fs.existsSync(target)) {
fs.mkdirSync(target, stats.mode);
}
results.push(exports.copyDir(file, target, predicate));
} else {
results.push(exports.copy(file, target));
}
});
return promise.all(results).then(function() {
return dst;
});
};
/**
* Tests if a file path exists.
* @param {string} path The path to test.
* @return {!promise.Promise.<boolean>} A promise for whether the file exists.
*/
exports.exists = function(path) {
var result = promise.defer();
fs.exists(path, result.fulfill);
return result.promise;
};
/**
* Deletes a name from the filesystem and possibly the file it refers to. Has
* no effect if the file does not exist.
* @param {string} path The path to remove.
* @return {!promise.Promise} A promise for when the file has been removed.
*/
exports.unlink = function(path) {
return new promise.Promise(function(fulfill, reject) {
fs.exists(path, function(exists) {
if (exists) {
fs.unlink(path, function(err) {
err && reject(err) || fulfill();
});
} else {
fulfill();
}
});
});
};
/**
* @return {!promise.Promise.<string>} A promise for the path to a temporary
* directory.
* @see https://www.npmjs.org/package/tmp
*/
exports.tmpDir = function() {
return promise.checkedNodeCall(tmp.dir);
};
/**
* @param {{postfix: string}=} opt_options Temporary file options.
* @return {!promise.Promise.<string>} A promise for the path to a temporary
* file.
* @see https://www.npmjs.org/package/tmp
*/
exports.tmpFile = function(opt_options) {
// |tmp.file| checks arguments length to detect options rather than doing a
// truthy check, so we must only pass options if there are some to pass.
return opt_options ?
promise.checkedNodeCall(tmp.file, opt_options) :
promise.checkedNodeCall(tmp.file);
};
/**
* Searches the {@code PATH} environment variable for the given file.
* @param {string} file The file to locate on the PATH.
* @param {boolean=} opt_checkCwd Whether to always start with the search with
* the current working directory, regardless of whether it is explicitly
* listed on the PATH.
* @return {?string} Path to the located file, or {@code null} if it could
* not be found.
*/
exports.findInPath = function(file, opt_checkCwd) {
if (opt_checkCwd) {
var tmp = path.join(process.cwd(), file);
if (fs.existsSync(tmp)) {
return tmp;
}
}
var dirs = process.env['PATH'].split(PATH_SEPARATOR);
var found = null;
dirs.forEach(function(dir) {
var tmp = path.join(dir, file);
if (!found && fs.existsSync(tmp)) {
found = tmp;
}
});
return found;
};
| 1 | 11,567 | Don't penalize everyone with 250ms delay b/c some machines have problems. | SeleniumHQ-selenium | py |
@@ -119,6 +119,15 @@ func (s SortedCidSet) Equals(s2 SortedCidSet) bool {
return true
}
+// String returns a string listing the cids in the set.
+func (s SortedCidSet) String() string {
+ out := "{"
+ for it := s.Iter(); !it.Complete(); it.Next() {
+ out = fmt.Sprintf("%s %s", out, it.Value().String())
+ }
+ return out + " }"
+}
+
// MarshalJSON serializes the set to JSON.
func (s SortedCidSet) MarshalJSON() ([]byte, error) {
return json.Marshal(s.s) | 1 | package types
import (
"bytes"
"encoding/json"
"fmt"
"sort"
cbor "gx/ipfs/QmRiRJhn427YVuufBEHofLreKWNw7P7BWNq86Sb9kzqdbd/go-ipld-cbor"
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
"gx/ipfs/QmcrriCMhjb5ZWzmPNxmP53px47tSPcXBNaMtLdgcKFJYk/refmt/obj/atlas"
)
func init() {
cbor.RegisterCborType(atlas.BuildEntry(SortedCidSet{}).Transform().
TransformMarshal(atlas.MakeMarshalTransformFunc(
func(s SortedCidSet) ([]*cid.Cid, error) {
return s.s, nil
})).
TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(
func(s []*cid.Cid) (SortedCidSet, error) {
for i := 0; i < len(s)-1; i++ {
// Note that this will also catch duplicates.
if !cidLess(s[i], s[i+1]) {
return SortedCidSet{}, fmt.Errorf(
"invalid serialization of SortedCidSet - %s not less than %s", s[i].String(), s[i+1].String())
}
}
return SortedCidSet{s: s}, nil
})).
Complete())
}
// SortedCidSet is a set of Cids that is maintained sorted. The externally visible effect as
// compared to cid.Set is that iteration is cheap and always in-order.
// Sort order is lexicographic ascending, by serialization of the cid.
// TODO: This should probably go into go-cid package - see https://github.com/ipfs/go-cid/issues/45.
type SortedCidSet struct {
s []*cid.Cid // should be maintained sorted
}
// NewSortedCidSet returns a SortedCidSet with the specified items.
func NewSortedCidSet(ids ...*cid.Cid) (res SortedCidSet) {
for _, id := range ids {
res.Add(id)
}
return
}
// Add adds a cid to the set. Returns true if the item was added (didn't already exist), false
// otherwise.
func (s *SortedCidSet) Add(id *cid.Cid) bool {
idx := s.search(id)
if idx < len(s.s) && s.s[idx].Equals(id) {
return false
}
s.s = append(s.s, nil)
copy(s.s[idx+1:], s.s[idx:])
s.s[idx] = id
return true
}
// Has returns true if the set contains the specified cid.
func (s SortedCidSet) Has(id *cid.Cid) bool {
idx := s.search(id)
return idx < len(s.s) && s.s[idx].Equals(id)
}
// Len returns the number of items in the set.
func (s SortedCidSet) Len() int {
return len(s.s)
}
// Empty returns true if the set is empty.
func (s SortedCidSet) Empty() bool {
return s.Len() == 0
}
// Remove removes a cid from the set. Returns true if the item was removed (did in fact exist in
// the set), false otherwise.
func (s *SortedCidSet) Remove(id *cid.Cid) bool {
idx := s.search(id)
if idx < len(s.s) && s.s[idx].Equals(id) {
copy(s.s[idx:], s.s[idx+1:])
s.s = s.s[0 : len(s.s)-1]
return true
}
return false
}
// Clear removes all entries from the set.
func (s *SortedCidSet) Clear() {
s.s = s.s[:0]
}
// Iter returns an iterator that allows the caller to iterate the set in its sort order.
func (s SortedCidSet) Iter() sortedCidSetIterator { // nolint
return sortedCidSetIterator{
s: s.s,
i: 0,
}
}
// Equals returns true if the set contains the same items as another set.
func (s SortedCidSet) Equals(s2 SortedCidSet) bool {
if s.Len() != s2.Len() {
return false
}
i1 := s.Iter()
i2 := s2.Iter()
for i := 0; i < s.Len(); i++ {
if !i1.Value().Equals(i2.Value()) {
return false
}
}
return true
}
// MarshalJSON serializes the set to JSON.
func (s SortedCidSet) MarshalJSON() ([]byte, error) {
return json.Marshal(s.s)
}
// UnmarshalJSON parses JSON into the set.
func (s *SortedCidSet) UnmarshalJSON(b []byte) error {
var ts []*cid.Cid
if err := json.Unmarshal(b, &ts); err != nil {
return err
}
for i := 0; i < len(ts)-1; i++ {
if !cidLess(ts[i], ts[i+1]) {
return fmt.Errorf("invalid input - cids not sorted")
}
}
s.s = ts
return nil
}
func (s SortedCidSet) search(id *cid.Cid) int {
return sort.Search(len(s.s), func(i int) bool {
return !cidLess(s.s[i], id)
})
}
type sortedCidSetIterator struct {
s []*cid.Cid
i int
}
// Complete returns true if the iterator has reached the end of the set.
func (si *sortedCidSetIterator) Complete() bool {
return si.i >= len(si.s)
}
// Next advances the iterator to the next item and returns true if there is such an item.
func (si *sortedCidSetIterator) Next() bool {
switch {
case si.i < len(si.s):
si.i++
return si.i < len(si.s)
case si.i == len(si.s):
return false
default:
panic("unreached")
}
}
// Value returns the current item for the iterator
func (si sortedCidSetIterator) Value() *cid.Cid {
switch {
case si.i < len(si.s):
return si.s[si.i]
case si.i == len(si.s):
return nil
default:
panic("unreached")
}
}
// Note: this relies on knowledge of internal layout of Cid.
// TODO: ideally cid would just implement this. See: https://github.com/ipfs/go-cid/issues/46
func cidLess(c1, c2 *cid.Cid) bool {
p1 := c1.Prefix()
p2 := c2.Prefix()
return p1.Version < p2.Version || p1.Codec < p2.Codec || bytes.Compare(c1.Hash(), c2.Hash()) < 0
}
| 1 | 12,739 | Probably want a space after the second %s? | filecoin-project-venus | go |
@@ -206,7 +206,7 @@ func (h *historyArchiver) Get(
dirPath := URI.Path()
exists, err := directoryExists(dirPath)
if err != nil {
- return nil, serviceerror.NewInternal(err.Error())
+ return nil, serviceerror.NewUnavailable(err.Error())
}
if !exists {
return nil, serviceerror.NewInvalidArgument(archiver.ErrHistoryNotExist.Error()) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Filestore History Archiver will archive workflow histories to local disk.
// Each Archive() request results in a file named in the format of
// hash(namespaceID, workflowID, runID)_version.history being created in the specified
// directory. Workflow histories stored in that file are encoded in JSON format.
// The Get() method retrieves the archived histories from the directory specified in the
// URI. It optionally takes in a NextPageToken which specifies the workflow close failover
// version and the index of the first history batch that should be returned. Instead of
// NextPageToken, caller can also provide a close failover version, in which case, Get() method
// will return history batches starting from the beginning of that history version. If neither
// of NextPageToken or close failover version is specified, the highest close failover version
// will be picked.
package filestore
import (
"context"
"errors"
"os"
"path"
"strconv"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"
archiverspb "go.temporal.io/server/api/archiver/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/archiver"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/codec"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/log/tag"
)
const (
// URIScheme is the scheme for the filestore implementation
URIScheme = "file"
errEncodeHistory = "failed to encode history batches"
errMakeDirectory = "failed to make directory"
errWriteFile = "failed to write history to file"
targetHistoryBlobSize = 2 * 1024 * 1024 // 2MB
)
var (
errInvalidFileMode = errors.New("invalid file mode")
errInvalidDirMode = errors.New("invalid directory mode")
)
type (
historyArchiver struct {
container *archiver.HistoryBootstrapContainer
fileMode os.FileMode
dirMode os.FileMode
// only set in test code
historyIterator archiver.HistoryIterator
}
getHistoryToken struct {
CloseFailoverVersion int64
NextBatchIdx int
}
)
// NewHistoryArchiver creates a new archiver.HistoryArchiver based on filestore
func NewHistoryArchiver(
container *archiver.HistoryBootstrapContainer,
config *config.FilestoreArchiver,
) (archiver.HistoryArchiver, error) {
return newHistoryArchiver(container, config, nil)
}
func newHistoryArchiver(
container *archiver.HistoryBootstrapContainer,
config *config.FilestoreArchiver,
historyIterator archiver.HistoryIterator,
) (*historyArchiver, error) {
fileMode, err := strconv.ParseUint(config.FileMode, 0, 32)
if err != nil {
return nil, errInvalidFileMode
}
dirMode, err := strconv.ParseUint(config.DirMode, 0, 32)
if err != nil {
return nil, errInvalidDirMode
}
return &historyArchiver{
container: container,
fileMode: os.FileMode(fileMode),
dirMode: os.FileMode(dirMode),
historyIterator: historyIterator,
}, nil
}
func (h *historyArchiver) Archive(
ctx context.Context,
URI archiver.URI,
request *archiver.ArchiveHistoryRequest,
opts ...archiver.ArchiveOption,
) (err error) {
featureCatalog := archiver.GetFeatureCatalog(opts...)
defer func() {
if err != nil && !common.IsPersistenceTransientError(err) && featureCatalog.NonRetryableError != nil {
err = featureCatalog.NonRetryableError()
}
}()
logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String())
if err := h.ValidateURI(URI); err != nil {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err))
return err
}
if err := archiver.ValidateHistoryArchiveRequest(request); err != nil {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidArchiveRequest), tag.Error(err))
return err
}
historyIterator := h.historyIterator
if historyIterator == nil { // will only be set by testing code
historyIterator = archiver.NewHistoryIterator(request, h.container.ExecutionManager, targetHistoryBlobSize)
}
var historyBatches []*historypb.History
for historyIterator.HasNext() {
historyBlob, err := getNextHistoryBlob(ctx, historyIterator)
if err != nil {
if !common.IsPersistenceTransientError(err) {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err))
} else {
logger.Error(archiver.ArchiveTransientErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonReadHistory), tag.Error(err))
}
return err
}
if historyMutated(request, historyBlob.Body, historyBlob.Header.IsLast) {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonHistoryMutated))
return archiver.ErrHistoryMutated
}
historyBatches = append(historyBatches, historyBlob.Body...)
}
encoder := codec.NewJSONPBEncoder()
encodedHistoryBatches, err := encoder.EncodeHistories(historyBatches)
if err != nil {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errEncodeHistory), tag.Error(err))
return err
}
dirPath := URI.Path()
if err = mkdirAll(dirPath, h.dirMode); err != nil {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err))
return err
}
filename := constructHistoryFilename(request.NamespaceID, request.WorkflowID, request.RunID, request.CloseFailoverVersion)
if err := writeFile(path.Join(dirPath, filename), encodedHistoryBatches, h.fileMode); err != nil {
logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err))
return err
}
return nil
}
func (h *historyArchiver) Get(
ctx context.Context,
URI archiver.URI,
request *archiver.GetHistoryRequest,
) (*archiver.GetHistoryResponse, error) {
if err := h.ValidateURI(URI); err != nil {
return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidURI.Error())
}
if err := archiver.ValidateGetRequest(request); err != nil {
return nil, serviceerror.NewInvalidArgument(archiver.ErrInvalidGetHistoryRequest.Error())
}
dirPath := URI.Path()
exists, err := directoryExists(dirPath)
if err != nil {
return nil, serviceerror.NewInternal(err.Error())
}
if !exists {
return nil, serviceerror.NewInvalidArgument(archiver.ErrHistoryNotExist.Error())
}
var token *getHistoryToken
if request.NextPageToken != nil {
token, err = deserializeGetHistoryToken(request.NextPageToken)
if err != nil {
return nil, serviceerror.NewInvalidArgument(archiver.ErrNextPageTokenCorrupted.Error())
}
} else if request.CloseFailoverVersion != nil {
token = &getHistoryToken{
CloseFailoverVersion: *request.CloseFailoverVersion,
NextBatchIdx: 0,
}
} else {
highestVersion, err := getHighestVersion(dirPath, request)
if err != nil {
return nil, serviceerror.NewInternal(err.Error())
}
token = &getHistoryToken{
CloseFailoverVersion: *highestVersion,
NextBatchIdx: 0,
}
}
filename := constructHistoryFilename(request.NamespaceID, request.WorkflowID, request.RunID, token.CloseFailoverVersion)
filepath := path.Join(dirPath, filename)
exists, err = fileExists(filepath)
if err != nil {
return nil, serviceerror.NewInternal(err.Error())
}
if !exists {
return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error())
}
encodedHistoryBatches, err := readFile(filepath)
if err != nil {
return nil, serviceerror.NewInternal(err.Error())
}
encoder := codec.NewJSONPBEncoder()
historyBatches, err := encoder.DecodeHistories(encodedHistoryBatches)
if err != nil {
return nil, serviceerror.NewInternal(err.Error())
}
historyBatches = historyBatches[token.NextBatchIdx:]
response := &archiver.GetHistoryResponse{}
numOfEvents := 0
numOfBatches := 0
for _, batch := range historyBatches {
response.HistoryBatches = append(response.HistoryBatches, batch)
numOfBatches++
numOfEvents += len(batch.Events)
if numOfEvents >= request.PageSize {
break
}
}
if numOfBatches < len(historyBatches) {
token.NextBatchIdx += numOfBatches
nextToken, err := serializeToken(token)
if err != nil {
return nil, serviceerror.NewInternal(err.Error())
}
response.NextPageToken = nextToken
}
return response, nil
}
func (h *historyArchiver) ValidateURI(URI archiver.URI) error {
if URI.Scheme() != URIScheme {
return archiver.ErrURISchemeMismatch
}
return validateDirPath(URI.Path())
}
func getNextHistoryBlob(ctx context.Context, historyIterator archiver.HistoryIterator) (*archiverspb.HistoryBlob, error) {
historyBlob, err := historyIterator.Next()
op := func() error {
historyBlob, err = historyIterator.Next()
return err
}
for err != nil {
if !common.IsPersistenceTransientError(err) {
return nil, err
}
if contextExpired(ctx) {
return nil, archiver.ErrContextTimeout
}
err = backoff.Retry(op, common.CreatePersistanceRetryPolicy(), common.IsPersistenceTransientError)
}
return historyBlob, nil
}
func getHighestVersion(dirPath string, request *archiver.GetHistoryRequest) (*int64, error) {
filenames, err := listFilesByPrefix(dirPath, constructHistoryFilenamePrefix(request.NamespaceID, request.WorkflowID, request.RunID))
if err != nil {
return nil, err
}
var highestVersion *int64
for _, filename := range filenames {
version, err := extractCloseFailoverVersion(filename)
if err != nil {
continue
}
if highestVersion == nil || version > *highestVersion {
highestVersion = &version
}
}
if highestVersion == nil {
return nil, archiver.ErrHistoryNotExist
}
return highestVersion, nil
}
| 1 | 12,570 | This seems like an internal error still? | temporalio-temporal | go |
@@ -85,7 +85,7 @@ describe PurchasesController do
post :create, purchase: customer_params(stripe_token), product_id: product.to_param
- FakeStripe.should have_charged(1500).to('[email protected]').with_token(stripe_token)
+ expect(FakeStripe).to have_charged(1500).to('[email protected]').with_token(stripe_token)
end
end
| 1 | require 'spec_helper'
include StubCurrentUserHelper
describe PurchasesController do
describe '#new when purchasing a screencast as a user with an active subscription' do
it 'renders a subscriber-specific layout' do
user = create(:subscriber)
product = create(:screencast)
stub_current_user_with(user)
get :new, product_id: product
expect(response).to(
redirect_to(new_subscriber_screencast_purchase_path(product))
)
end
end
describe '#new when purchasing a plan as a user with an active subscription' do
context 'when purchasing an individual plan' do
it 'renders a subscriber-specific layout' do
user = create(:subscriber)
stub_current_user_with(user)
get :new, individual_plan_id: user.subscription.plan
expect(response).to redirect_to dashboard_path
end
end
context 'when purchasing a team plan' do
it 'renders a subscriber-specific layout' do
user = create(:subscriber)
stub_current_user_with(user)
get :new, teams_team_plan_id: user.subscription.plan
expect(response).to redirect_to dashboard_path
end
end
end
describe '#new with no variant specified' do
it 'defaults purchase to individual' do
user = create(:user)
product = create(:screencast)
stub_current_user_with(user)
get :new, product_id: product
expect(assigns(:purchase).variant).to eq 'individual'
end
end
describe '#new with company variant specified' do
it 'defaults purchase to company' do
user = create(:user)
product = create(:screencast)
stub_current_user_with(user)
get :new, product_id: product, variant: 'company'
expect(assigns(:purchase).variant).to eq 'company'
end
end
describe '#new when attempting to purchase a workshop' do
it 'redirects to the subscription page' do
user = create(:user)
workshop = create(:workshop)
stub_current_user_with(user)
get :new, workshop_id: workshop.id
expect(response).to redirect_to new_subscription_path
end
end
describe 'processing on stripe' do
it 'creates and saves a stripe customer and charges it for the product' do
stub_current_user_with(create(:user))
product = create(:product)
stripe_token = 'token'
post :create, purchase: customer_params(stripe_token), product_id: product.to_param
FakeStripe.should have_charged(1500).to('[email protected]').with_token(stripe_token)
end
end
it 'sets flash[:purchase_paid_price]' do
stub_current_user_with(create(:user))
product = create(:product)
post :create, purchase: customer_params, product_id: product
flash[:purchase_amount].should eq product.individual_price
end
describe "processing on paypal" do
it "starts a paypal transaction and saves a purchase for the product" do
stub_current_user_with(create(:user))
product = create(:product)
post :create, purchase: { variant: "individual", name: "User", email: "[email protected]", payment_method: "paypal" }, product_id: product.to_param
response.status.should == 302
response.location.should == FakePaypal.outgoing_uri
assigns(:purchase).should_not be_paid
end
end
describe "product is not paid" do
let(:product) { create(:book, individual_price: 15) }
let(:purchase) { create(:purchase, purchaseable: product) }
it "redirects from show to the product page" do
purchase.paid = false
purchase.save
stub_current_user_with(create(:user))
get :show, id: purchase.to_param
response.should redirect_to(book_path(product))
end
end
describe 'purchasing a team plan when there is more than one' do
it 'uses the requested plan' do
user = create(:user)
stub_current_user_with(user)
create(:team_plan, sku: 'sku1')
desired_plan = create(:team_plan, sku: 'sku2')
get :new, teams_team_plan_id: desired_plan.sku
expect(assigns(:purchase).purchaseable).to eq desired_plan
end
end
describe '#show' do
it 'should response with 404 if no purchase found' do
expect{
get :show, id: 'robots.txt'
}.to raise_error(ActiveRecord::RecordNotFound)
end
end
describe '#index' do
it 'assigns paid purchases belonging to the current user' do
user = create(:user, :with_github)
purchase_two = create(
:paid_purchase,
user: user,
created_at: 5.minute.ago
)
purchase_one = create(
:paid_purchase,
user: user,
created_at: 1.minute.ago
)
create(:unpaid_purchase, user: user)
create(:plan_purchase, user: user)
create(:paid_purchase, user: create(:user))
stub_current_user_with(user)
get :index
expect(assigns(:purchases)).to eq([purchase_one, purchase_two])
end
end
def customer_params(token='stripe token')
{
name: 'User',
email: '[email protected]',
variant: "individual",
stripe_token: token,
payment_method: "stripe"
}
end
end
| 1 | 9,607 | Line is too long. [94/80] | thoughtbot-upcase | rb |
@@ -125,10 +125,16 @@ func (a *ClusterIdentityAllocator) Run(stopCh <-chan struct{}) {
}
}
+type ClusterIdentity struct {
+ UUID uuid.UUID
+}
+
// ClusterIdentityProvider is an interface used to retrieve the cluster identity information (UUID),
-// as provided by the user or generated by the Antrea Controller.
+// as provided by the user or generated by the Antrea Controller. It also returns the time at which
+// the antrea-cluster-identity was created, which can typically be considered as the time at which
+// Antrea was deployed to the cluster.
type ClusterIdentityProvider interface {
- Get() (uuid.UUID, error)
+ Get() (ClusterIdentity, time.Time, error)
}
type clusterIdentityProvider struct { | 1 | // Copyright 2021 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clusteridentity
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
const (
DefaultClusterIdentityConfigMapName = "antrea-cluster-identity"
uuidConfigMapKey = "uuid"
)
// ClusterIdentityAllocator ensures that the antrea-cluster-identity ConfigMap is populated
// correctly, with a valid UUID. It is meant to be used by the Antrea Controller.
type ClusterIdentityAllocator struct {
clusterIdentityConfigMapNamespace string
clusterIdentityConfigMapName string
k8sClient clientset.Interface
}
// NewClusterIdentityAllocator creates a ClusterIdentityAllocator object
func NewClusterIdentityAllocator(
clusterIdentityConfigMapNamespace string,
clusterIdentityConfigMapName string,
k8sClient clientset.Interface,
) *ClusterIdentityAllocator {
return &ClusterIdentityAllocator{
clusterIdentityConfigMapNamespace: clusterIdentityConfigMapNamespace,
clusterIdentityConfigMapName: clusterIdentityConfigMapName,
k8sClient: k8sClient,
}
}
func (a *ClusterIdentityAllocator) updateConfigMapIfNeeded() error {
configMap, err := a.k8sClient.CoreV1().ConfigMaps(a.clusterIdentityConfigMapNamespace).Get(context.TODO(), a.clusterIdentityConfigMapName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error when getting '%s/%s' ConfigMap: %v", a.clusterIdentityConfigMapNamespace, a.clusterIdentityConfigMapName, err)
}
// returns a triplet consisting of the cluster UUID, a boolean indicating if the UUID needs
// to be written to the ConfigMap, and an error if applicable
inspectUUID := func() (uuid.UUID, bool, error) {
clusterUUIDStr, ok := configMap.Data[uuidConfigMapKey]
if ok && clusterUUIDStr != "" {
clusterUUID, err := uuid.Parse(clusterUUIDStr)
if err != nil {
return uuid.Nil, false, fmt.Errorf("cluster already has UUID '%s' but it is not valid: %v", clusterUUIDStr, err)
}
return clusterUUID, false, nil
}
// generate a new random UUID
clusterUUID := uuid.New()
return clusterUUID, true, nil
}
clusterUUID, clusterUUIDNeedsUpdate, err := inspectUUID()
if err != nil {
return err
}
if !clusterUUIDNeedsUpdate {
klog.Infof("Existing cluster UUID: %v", clusterUUID)
return nil
}
configMap.Data = map[string]string{
uuidConfigMapKey: clusterUUID.String(),
}
if _, err := a.k8sClient.CoreV1().ConfigMaps(a.clusterIdentityConfigMapNamespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("error when updating '%s/%s' ConfigMap with new cluster identity: %v", a.clusterIdentityConfigMapNamespace, a.clusterIdentityConfigMapName, err)
}
klog.Infof("New cluster UUID: %v", clusterUUID)
return nil
}
// Run will ensure that the antrea-cluster-identity ConfigMap is up-to-date. It is meant to be
// called asynchronously in its own goroutine, and will keep retrying in case of error, using an
// exponential backoff mechanism.
func (a *ClusterIdentityAllocator) Run(stopCh <-chan struct{}) {
// Exponential backoff, starting at 100ms with a factor of 2. A "steps" value of 8 means we
// will increase the backoff duration at most 8 times, so the max duration is (100ms * //
// 2^8), which is about 25s.
retry := wait.Backoff{
Steps: 8,
Duration: 100 * time.Millisecond,
Factor: 2.0,
Jitter: 0.0,
}
for {
err := a.updateConfigMapIfNeeded()
if err == nil {
return
}
sleepDuration := retry.Step()
klog.Errorf("Cannot validate or update cluster UUID because of the following error, will retry in %v: %v", sleepDuration, err)
select {
case <-stopCh:
return
case <-time.After(sleepDuration):
continue
}
}
}
// ClusterIdentityProvider is an interface used to retrieve the cluster identity information (UUID),
// as provided by the user or generated by the Antrea Controller.
type ClusterIdentityProvider interface {
Get() (uuid.UUID, error)
}
type clusterIdentityProvider struct {
clusterIdentityConfigMapNamespace string
clusterIdentityConfigMapName string
k8sClient clientset.Interface
}
// NewClusterIdentityProvider returns a new object implementing the ClusterIdentityProvider
// interface.
func NewClusterIdentityProvider(
clusterIdentityConfigMapNamespace string,
clusterIdentityConfigMapName string,
k8sClient clientset.Interface,
) *clusterIdentityProvider {
return &clusterIdentityProvider{
clusterIdentityConfigMapNamespace: clusterIdentityConfigMapNamespace,
clusterIdentityConfigMapName: clusterIdentityConfigMapName,
k8sClient: k8sClient,
}
}
// Get will retrieve the cluster identity (UUID) stored in the antrea-cluster-identity ConfigMap. In
// case of error, clients are invited to retry as the information may not be available yet.
func (p *clusterIdentityProvider) Get() (uuid.UUID, error) {
configMap, err := p.k8sClient.CoreV1().ConfigMaps(p.clusterIdentityConfigMapNamespace).Get(context.TODO(), p.clusterIdentityConfigMapName, metav1.GetOptions{})
if err != nil {
return uuid.Nil, fmt.Errorf("error when getting '%s/%s' ConfigMap: %v", p.clusterIdentityConfigMapNamespace, p.clusterIdentityConfigMapName, err)
}
getUUID := func() (uuid.UUID, error) {
clusterUUIDStr, ok := configMap.Data[uuidConfigMapKey]
if !ok || clusterUUIDStr == "" {
return uuid.Nil, fmt.Errorf("cluster UUID has not been set yet")
}
clusterUUID, err := uuid.Parse(clusterUUIDStr)
if err != nil {
return uuid.Nil, fmt.Errorf("cluster UUID cannot be parsed")
}
return clusterUUID, nil
}
return getUUID()
}
| 1 | 34,102 | out of curiosity, why creating another struct to wrap it? | antrea-io-antrea | go |
@@ -9549,8 +9549,15 @@ bool Bot::UseDiscipline(uint32 spell_id, uint32 target) {
if(spells[spell_id].timer_id > 0 && spells[spell_id].timer_id < MAX_DISCIPLINE_TIMERS)
SetDisciplineRecastTimer(spells[spell_id].timer_id, spell.recast_time);
} else {
- uint32 remain = (GetDisciplineRemainingTime(this, spells[spell_id].timer_id) / 1000);
- GetOwner()->Message(Chat::White, "%s can use this discipline in %d minutes %d seconds.", GetCleanName(), (remain / 60), (remain % 60));
+ uint32 remaining_time = (GetDisciplineRemainingTime(this, spells[spell_id].timer_id) / 1000);
+ GetOwner()->Message(
+ Chat::White,
+ fmt::format(
+ "{} can use this Discipline in {}.",
+ GetCleanName(),
+ ConvertSecondsToTime(remaining_time)
+ ).c_str()
+ );
return false;
}
} | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2016 EQEMu Development Team (http://eqemulator.org)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef BOTS
#include "bot.h"
#include "object.h"
#include "doors.h"
#include "quest_parser_collection.h"
#include "lua_parser.h"
#include "../common/string_util.h"
#include "../common/say_link.h"
extern volatile bool is_zone_loaded;
// This constructor is used during the bot create command
Bot::Bot(NPCType *npcTypeData, Client* botOwner) : NPC(npcTypeData, nullptr, glm::vec4(), Ground, false), rest_timer(1), ping_timer(1) {
GiveNPCTypeData(npcTypeData);
if(botOwner) {
this->SetBotOwner(botOwner);
this->_botOwnerCharacterID = botOwner->CharacterID();
} else {
this->SetBotOwner(0);
this->_botOwnerCharacterID = 0;
}
m_inv.SetInventoryVersion(EQ::versions::MobVersion::Bot);
m_inv.SetGMInventory(false); // bot expansions are not currently implemented (defaults to static)
_guildRank = 0;
_guildId = 0;
_lastTotalPlayTime = 0;
_startTotalPlayTime = time(&_startTotalPlayTime);
_lastZoneId = 0;
_baseMR = npcTypeData->MR;
_baseCR = npcTypeData->CR;
_baseDR = npcTypeData->DR;
_baseFR = npcTypeData->FR;
_basePR = npcTypeData->PR;
_baseCorrup = npcTypeData->Corrup;
_baseAC = npcTypeData->AC;
_baseSTR = npcTypeData->STR;
_baseSTA = npcTypeData->STA;
_baseDEX = npcTypeData->DEX;
_baseAGI = npcTypeData->AGI;
_baseINT = npcTypeData->INT;
_baseWIS = npcTypeData->WIS;
_baseCHA = npcTypeData->CHA;
_baseATK = npcTypeData->ATK;
_baseRace = npcTypeData->race;
_baseGender = npcTypeData->gender;
RestRegenHP = 0;
RestRegenMana = 0;
RestRegenEndurance = 0;
SetBotID(0);
SetBotSpellID(0);
SetSpawnStatus(false);
SetBotArcher(false);
SetBotCharmer(false);
SetPetChooser(false);
SetRangerAutoWeaponSelect(false);
SetTaunting(GetClass() == WARRIOR);
SetDefaultBotStance();
SetAltOutOfCombatBehavior(GetClass() == BARD); // will need to be updated if more classes make use of this flag
SetShowHelm(true);
SetPauseAI(false);
m_alt_combat_hate_timer.Start(250);
m_auto_defend_timer.Disable();
//m_combat_jitter_timer.Disable();
//SetCombatJitterFlag(false);
SetGuardFlag(false);
SetHoldFlag(false);
SetAttackFlag(false);
SetAttackingFlag(false);
SetPullFlag(false);
SetPullingFlag(false);
SetReturningFlag(false);
m_previous_pet_order = SPO_Guard;
rest_timer.Disable();
ping_timer.Disable();
SetFollowDistance(BOT_FOLLOW_DISTANCE_DEFAULT);
if (IsCasterClass(GetClass()))
SetStopMeleeLevel((uint8)RuleI(Bots, CasterStopMeleeLevel));
else
SetStopMeleeLevel(255);
// Do this once and only in this constructor
GenerateAppearance();
GenerateBaseStats();
// Calculate HitPoints Last As It Uses Base Stats
current_hp = GenerateBaseHitPoints();
current_mana = GenerateBaseManaPoints();
cur_end = CalcBaseEndurance();
hp_regen = CalcHPRegen();
mana_regen = CalcManaRegen();
end_regen = CalcEnduranceRegen();
for (int i = 0; i < MaxTimer; i++)
timers[i] = 0;
strcpy(this->name, this->GetCleanName());
memset(&_botInspectMessage, 0, sizeof(InspectMessage_Struct));
}
// This constructor is used when the bot is loaded out of the database
Bot::Bot(uint32 botID, uint32 botOwnerCharacterID, uint32 botSpellsID, double totalPlayTime, uint32 lastZoneId, NPCType *npcTypeData)
: NPC(npcTypeData, nullptr, glm::vec4(), Ground, false), rest_timer(1), ping_timer(1)
{
GiveNPCTypeData(npcTypeData);
this->_botOwnerCharacterID = botOwnerCharacterID;
if(this->_botOwnerCharacterID > 0)
this->SetBotOwner(entity_list.GetClientByCharID(this->_botOwnerCharacterID));
auto bot_owner = GetBotOwner();
m_inv.SetInventoryVersion(EQ::versions::MobVersion::Bot);
m_inv.SetGMInventory(false); // bot expansions are not currently implemented (defaults to static)
_guildRank = 0;
_guildId = 0;
_lastTotalPlayTime = totalPlayTime;
_startTotalPlayTime = time(&_startTotalPlayTime);
_lastZoneId = lastZoneId;
berserk = false;
_baseMR = npcTypeData->MR;
_baseCR = npcTypeData->CR;
_baseDR = npcTypeData->DR;
_baseFR = npcTypeData->FR;
_basePR = npcTypeData->PR;
_baseCorrup = npcTypeData->Corrup;
_baseAC = npcTypeData->AC;
_baseSTR = npcTypeData->STR;
_baseSTA = npcTypeData->STA;
_baseDEX = npcTypeData->DEX;
_baseAGI = npcTypeData->AGI;
_baseINT = npcTypeData->INT;
_baseWIS = npcTypeData->WIS;
_baseCHA = npcTypeData->CHA;
_baseATK = npcTypeData->ATK;
_baseRace = npcTypeData->race;
_baseGender = npcTypeData->gender;
current_hp = npcTypeData->current_hp;
current_mana = npcTypeData->Mana;
RestRegenHP = 0;
RestRegenMana = 0;
RestRegenEndurance = 0;
SetBotID(botID);
SetBotSpellID(botSpellsID);
SetSpawnStatus(false);
SetBotArcher(false);
SetBotCharmer(false);
SetPetChooser(false);
SetRangerAutoWeaponSelect(false);
bool stance_flag = false;
if (!database.botdb.LoadStance(this, stance_flag) && bot_owner)
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::LoadStance(), GetCleanName());
if (!stance_flag && bot_owner)
bot_owner->Message(Chat::Red, "Could not locate stance for '%s'", GetCleanName());
SetTaunting((GetClass() == WARRIOR || GetClass() == PALADIN || GetClass() == SHADOWKNIGHT) && (GetBotStance() == EQ::constants::stanceAggressive));
SetPauseAI(false);
m_alt_combat_hate_timer.Start(250);
m_auto_defend_timer.Disable();
//m_combat_jitter_timer.Disable();
//SetCombatJitterFlag(false);
SetGuardFlag(false);
SetHoldFlag(false);
SetAttackFlag(false);
SetAttackingFlag(false);
SetPullFlag(false);
SetPullingFlag(false);
SetReturningFlag(false);
m_previous_pet_order = SPO_Guard;
rest_timer.Disable();
ping_timer.Disable();
SetFollowDistance(BOT_FOLLOW_DISTANCE_DEFAULT);
if (IsCasterClass(GetClass()))
SetStopMeleeLevel((uint8)RuleI(Bots, CasterStopMeleeLevel));
else
SetStopMeleeLevel(255);
strcpy(this->name, this->GetCleanName());
memset(&_botInspectMessage, 0, sizeof(InspectMessage_Struct));
if (!database.botdb.LoadInspectMessage(GetBotID(), _botInspectMessage) && bot_owner)
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::LoadInspectMessage(), GetCleanName());
if (!database.botdb.LoadGuildMembership(GetBotID(), _guildId, _guildRank, _guildName) && bot_owner)
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::LoadGuildMembership(), GetCleanName());
std::string error_message;
EquipBot(&error_message);
if(!error_message.empty()) {
if(bot_owner)
bot_owner->Message(Chat::Red, error_message.c_str());
error_message.clear();
}
for (int i = 0; i < MaxTimer; i++)
timers[i] = 0;
if (GetClass() == ROGUE) {
m_evade_timer.Start();
}
m_CastingRoles.GroupHealer = false;
m_CastingRoles.GroupSlower = false;
m_CastingRoles.GroupNuker = false;
m_CastingRoles.GroupDoter = false;
GenerateBaseStats();
if (!database.botdb.LoadTimers(this) && bot_owner)
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::LoadTimers(), GetCleanName());
LoadAAs();
// copied from client CompleteConnect() handler - watch for problems
// (may have to move to post-spawn location if certain buffs still don't process correctly)
if (database.botdb.LoadBuffs(this) && bot_owner) {
//reapply some buffs
uint32 buff_count = GetMaxTotalSlots();
for (uint32 j1 = 0; j1 < buff_count; j1++) {
if (!IsValidSpell(buffs[j1].spellid))
continue;
const SPDat_Spell_Struct& spell = spells[buffs[j1].spellid];
int NimbusEffect = GetNimbusEffect(buffs[j1].spellid);
if (NimbusEffect) {
if (!IsNimbusEffectActive(NimbusEffect))
SendSpellEffect(NimbusEffect, 500, 0, 1, 3000, true);
}
for (int x1 = 0; x1 < EFFECT_COUNT; x1++) {
switch (spell.effect_id[x1]) {
case SE_IllusionCopy:
case SE_Illusion: {
if (spell.base_value[x1] == -1) {
if (gender == 1)
gender = 0;
else if (gender == 0)
gender = 1;
SendIllusionPacket(GetRace(), gender, 0xFF, 0xFF);
}
else if (spell.base_value[x1] == -2) // WTF IS THIS
{
if (GetRace() == 128 || GetRace() == 130 || GetRace() <= 12)
SendIllusionPacket(GetRace(), GetGender(), spell.limit_value[x1], spell.max_value[x1]);
}
else if (spell.max_value[x1] > 0)
{
SendIllusionPacket(spell.base_value[x1], 0xFF, spell.limit_value[x1], spell.max_value[x1]);
}
else
{
SendIllusionPacket(spell.base_value[x1], 0xFF, 0xFF, 0xFF);
}
switch (spell.base_value[x1]) {
case OGRE:
SendAppearancePacket(AT_Size, 9);
break;
case TROLL:
SendAppearancePacket(AT_Size, 8);
break;
case VAHSHIR:
case BARBARIAN:
SendAppearancePacket(AT_Size, 7);
break;
case HALF_ELF:
case WOOD_ELF:
case DARK_ELF:
case FROGLOK:
SendAppearancePacket(AT_Size, 5);
break;
case DWARF:
SendAppearancePacket(AT_Size, 4);
break;
case HALFLING:
case GNOME:
SendAppearancePacket(AT_Size, 3);
break;
default:
SendAppearancePacket(AT_Size, 6);
break;
}
break;
}
//case SE_SummonHorse: {
// SummonHorse(buffs[j1].spellid);
// //hasmount = true; //this was false, is that the correct thing?
// break;
//}
case SE_Silence:
{
Silence(true);
break;
}
case SE_Amnesia:
{
Amnesia(true);
break;
}
case SE_DivineAura:
{
invulnerable = true;
break;
}
case SE_Invisibility2:
case SE_Invisibility:
{
invisible = true;
SendAppearancePacket(AT_Invis, 1);
break;
}
case SE_Levitate:
{
if (!zone->CanLevitate())
{
//if (!GetGM())
//{
SendAppearancePacket(AT_Levitate, 0);
BuffFadeByEffect(SE_Levitate);
//Message(Chat::Red, "You can't levitate in this zone.");
//}
}
else {
SendAppearancePacket(AT_Levitate, 2);
}
break;
}
case SE_InvisVsUndead2:
case SE_InvisVsUndead:
{
invisible_undead = true;
break;
}
case SE_InvisVsAnimals:
{
invisible_animals = true;
break;
}
case SE_AddMeleeProc:
case SE_WeaponProc:
{
AddProcToWeapon(GetProcID(buffs[j1].spellid, x1), false, 100 + spells[buffs[j1].spellid].limit_value[x1], buffs[j1].spellid, buffs[j1].casterlevel);
break;
}
case SE_DefensiveProc:
{
AddDefensiveProc(GetProcID(buffs[j1].spellid, x1), 100 + spells[buffs[j1].spellid].limit_value[x1], buffs[j1].spellid);
break;
}
case SE_RangedProc:
{
AddRangedProc(GetProcID(buffs[j1].spellid, x1), 100 + spells[buffs[j1].spellid].limit_value[x1], buffs[j1].spellid);
break;
}
}
}
}
}
else {
bot_owner->Message(Chat::Red, "&s for '%s'", BotDatabase::fail::LoadBuffs(), GetCleanName());
}
CalcBotStats(false);
hp_regen = CalcHPRegen();
mana_regen = CalcManaRegen();
end_regen = CalcEnduranceRegen();
if(current_hp > max_hp)
current_hp = max_hp;
if(current_hp <= 0) {
BuffFadeNonPersistDeath();
if (RuleB(Bots, ResurrectionSickness)) {
int resurrection_sickness_spell_id = (
RuleB(Bots, OldRaceRezEffects) &&
(
GetRace() == BARBARIAN ||
GetRace() == DWARF ||
GetRace() == TROLL ||
GetRace() == OGRE
) ?
RuleI(Bots, OldResurrectionSicknessSpell) :
RuleI(Bots, ResurrectionSicknessSpell)
);
SetHP(max_hp / 5);
SetMana(0);
SpellOnTarget(resurrection_sickness_spell_id, this); // Rezz effects
} else {
SetHP(GetMaxHP());
SetMana(GetMaxMana());
}
}
if(current_mana > max_mana)
current_mana = max_mana;
cur_end = max_end;
}
Bot::~Bot() {
AI_Stop();
LeaveHealRotationMemberPool();
if(HasGroup())
Bot::RemoveBotFromGroup(this, GetGroup());
if(HasPet())
GetPet()->Depop();
entity_list.RemoveBot(GetID());
}
void Bot::SetBotID(uint32 botID) {
this->_botID = botID;
this->npctype_id = botID;
}
void Bot::SetBotSpellID(uint32 newSpellID) {
this->npc_spells_id = newSpellID;
}
void Bot::SetSurname(std::string bot_surname) {
_surname = bot_surname.substr(0, 31);
if (spawned) {
auto outapp = new EQApplicationPacket(OP_GMLastName, sizeof(GMLastName_Struct));
GMLastName_Struct* gmn = (GMLastName_Struct*)outapp->pBuffer;
strcpy(gmn->name, GetCleanName());
strcpy(gmn->gmname, GetCleanName());
strcpy(gmn->lastname, GetSurname().c_str());
gmn->unknown[0] = 1;
gmn->unknown[1] = 1;
gmn->unknown[2] = 1;
gmn->unknown[3] = 1;
entity_list.QueueClients(this, outapp);
safe_delete(outapp);
}
}
void Bot::SetTitle(std::string bot_title) {
_title = bot_title.substr(0, 31);
if (spawned) {
auto outapp = new EQApplicationPacket(OP_SetTitleReply, sizeof(SetTitleReply_Struct));
SetTitleReply_Struct* strs = (SetTitleReply_Struct*)outapp->pBuffer;
strs->is_suffix = 0;
strn0cpy(strs->title, _title.c_str(), sizeof(strs->title));
strs->entity_id = GetID();
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
}
}
void Bot::SetSuffix(std::string bot_suffix) {
_suffix = bot_suffix.substr(0, 31);
if (spawned) {
auto outapp = new EQApplicationPacket(OP_SetTitleReply, sizeof(SetTitleReply_Struct));
SetTitleReply_Struct* strs = (SetTitleReply_Struct*)outapp->pBuffer;
strs->is_suffix = 1;
strn0cpy(strs->title, _suffix.c_str(), sizeof(strs->title));
strs->entity_id = GetID();
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
}
}
uint32 Bot::GetBotArcheryRange() {
const EQ::ItemInstance *range_inst = GetBotItem(EQ::invslot::slotRange);
const EQ::ItemInstance *ammo_inst = GetBotItem(EQ::invslot::slotAmmo);
if (!range_inst || !ammo_inst)
return 0;
const EQ::ItemData *range_item = range_inst->GetItem();
const EQ::ItemData *ammo_item = ammo_inst->GetItem();
if (!range_item || !ammo_item || range_item->ItemType != EQ::item::ItemTypeBow || ammo_item->ItemType != EQ::item::ItemTypeArrow)
return 0;
// everything is good!
return (range_item->Range + ammo_item->Range);
}
void Bot::ChangeBotArcherWeapons(bool isArcher) {
if((GetClass()==WARRIOR) || (GetClass()==PALADIN) || (GetClass()==RANGER) || (GetClass()==SHADOWKNIGHT) || (GetClass()==ROGUE)) {
if(!isArcher) {
BotAddEquipItem(EQ::invslot::slotPrimary, GetBotItemBySlot(EQ::invslot::slotPrimary));
BotAddEquipItem(EQ::invslot::slotSecondary, GetBotItemBySlot(EQ::invslot::slotSecondary));
SetAttackTimer();
BotGroupSay(this, "My blade is ready");
} else {
BotRemoveEquipItem(EQ::invslot::slotPrimary);
BotRemoveEquipItem(EQ::invslot::slotSecondary);
BotAddEquipItem(EQ::invslot::slotAmmo, GetBotItemBySlot(EQ::invslot::slotAmmo));
BotAddEquipItem(EQ::invslot::slotSecondary, GetBotItemBySlot(EQ::invslot::slotRange));
SetAttackTimer();
BotGroupSay(this, "My bow is true and ready");
}
}
else
BotGroupSay(this, "I don't know how to use a bow");
}
void Bot::Sit() {
if(IsMoving()) {
moved = false;
StopNavigation();
}
SetAppearance(eaSitting);
}
void Bot::Stand() {
SetAppearance(eaStanding);
}
bool Bot::IsSitting() {
bool result = false;
if(GetAppearance() == eaSitting && !IsMoving())
result = true;
return result;
}
bool Bot::IsStanding() {
bool result = false;
if(GetAppearance() == eaStanding)
result = true;
return result;
}
NPCType *Bot::FillNPCTypeStruct(uint32 botSpellsID, std::string botName, std::string botLastName, uint8 botLevel, uint16 botRace, uint8 botClass, uint8 gender, float size, uint32 face, uint32 hairStyle, uint32 hairColor, uint32 eyeColor, uint32 eyeColor2, uint32 beardColor, uint32 beard, uint32 drakkinHeritage, uint32 drakkinTattoo, uint32 drakkinDetails, int32 hp, int32 mana, int32 mr, int32 cr, int32 dr, int32 fr, int32 pr, int32 corrup, int32 ac, uint32 str, uint32 sta, uint32 dex, uint32 agi, uint32 _int, uint32 wis, uint32 cha, uint32 attack) {
auto bot_npc_type = new NPCType{ 0 };
int copy_length = 0;
copy_length = botName.copy(bot_npc_type->name, 63);
bot_npc_type->name[copy_length] = '\0';
copy_length = 0;
copy_length = botLastName.copy(bot_npc_type->lastname, 69);
bot_npc_type->lastname[copy_length] = '\0';
copy_length = 0;
bot_npc_type->current_hp = hp;
bot_npc_type->max_hp = hp;
bot_npc_type->size = size;
bot_npc_type->runspeed = 0.7f;
bot_npc_type->gender = gender;
bot_npc_type->race = botRace;
bot_npc_type->class_ = botClass;
bot_npc_type->bodytype = 1;
bot_npc_type->deity = EQ::deity::DeityAgnostic;
bot_npc_type->level = botLevel;
//bot_npc_type->npc_id = 0;
//bot_npc_type->texture = 0;
//bot_npc_type->helmtexture = 0;
//bot_npc_type->herosforgemodel = 0;
//bot_npc_type->loottable_id = 0;
bot_npc_type->npc_spells_id = botSpellsID;
//bot_npc_type->npc_spells_effects_id = 0;
//bot_npc_type->npc_faction_id = 0;
//bot_npc_type->merchanttype = 0;
//bot_npc_type->alt_currency_type = 0;
//bot_npc_type->adventure_template = 0;
//bot_npc_type->trap_template = 0;
//bot_npc_type->light = 0;
bot_npc_type->AC = ac;
bot_npc_type->Mana = mana;
bot_npc_type->ATK = attack;
bot_npc_type->STR = str;
bot_npc_type->STA = sta;
bot_npc_type->DEX = dex;
bot_npc_type->AGI = agi;
bot_npc_type->INT = _int;
bot_npc_type->WIS = wis;
bot_npc_type->CHA = cha;
bot_npc_type->MR = mr;
bot_npc_type->FR = fr;
bot_npc_type->CR = cr;
bot_npc_type->PR = pr;
bot_npc_type->DR = dr;
bot_npc_type->Corrup = corrup;
//bot_npc_type->PhR = 0;
bot_npc_type->haircolor = hairColor;
bot_npc_type->beardcolor = beardColor;
bot_npc_type->eyecolor1 = eyeColor;
bot_npc_type->eyecolor2 = eyeColor2;
bot_npc_type->hairstyle = hairStyle;
bot_npc_type->luclinface = face;
bot_npc_type->beard = beard;
bot_npc_type->drakkin_heritage = drakkinHeritage;
bot_npc_type->drakkin_tattoo = drakkinTattoo;
bot_npc_type->drakkin_details = drakkinDetails;
//bot_npc_type->armor_tint = { 0 };
//bot_npc_type->min_dmg = 0;
//bot_npc_type->max_dmg = 0;
//bot_npc_type->charm_ac = 0;
//bot_npc_type->charm_min_dmg = 0;
//bot_npc_type->charm_max_dmg = 0;
//bot_npc_type->charm_attack_delay = 0;
//bot_npc_type->charm_accuracy_rating = 0;
//bot_npc_type->charm_avoidance_rating = 0;
//bot_npc_type->charm_atk = 0;
//bot_npc_type->attack_count = 0;
//*bot_npc_type->special_abilities = { 0 };
//bot_npc_type->d_melee_texture1 = 0;
//bot_npc_type->d_melee_texture2 = 0;
//*bot_npc_type->ammo_idfile = { 0 };
//bot_npc_type->prim_melee_type = 0;
//bot_npc_type->sec_melee_type = 0;
//bot_npc_type->ranged_type = 0;
bot_npc_type->hp_regen = 1;
bot_npc_type->mana_regen = 1;
//bot_npc_type->aggroradius = 0;
//bot_npc_type->assistradius = 0;
//bot_npc_type->see_invis = 0;
//bot_npc_type->see_invis_undead = false;
//bot_npc_type->see_hide = false;
//bot_npc_type->see_improved_hide = false;
//bot_npc_type->qglobal = false;
//bot_npc_type->npc_aggro = false;
//bot_npc_type->spawn_limit = 0;
//bot_npc_type->mount_color = 0;
//bot_npc_type->attack_speed = 0.0f;
//bot_npc_type->attack_delay = 0;
//bot_npc_type->accuracy_rating = 0;
//bot_npc_type->avoidance_rating = 0;
//bot_npc_type->findable = false;
bot_npc_type->trackable = true;
//bot_npc_type->slow_mitigation = 0;
bot_npc_type->maxlevel = botLevel;
//bot_npc_type->scalerate = 0;
//bot_npc_type->private_corpse = false;
//bot_npc_type->unique_spawn_by_name = false;
//bot_npc_type->underwater = false;
//bot_npc_type->emoteid = 0;
//bot_npc_type->spellscale = 0.0f;
//bot_npc_type->healscale = 0.0f;
//bot_npc_type->no_target_hotkey = false;
//bot_npc_type->raid_target = false;
//bot_npc_type->armtexture = 0;
//bot_npc_type->bracertexture = 0;
//bot_npc_type->handtexture = 0;
//bot_npc_type->legtexture = 0;
//bot_npc_type->feettexture = 0;
//bot_npc_type->ignore_despawn = false;
bot_npc_type->show_name = true;
//bot_npc_type->untargetable = false;
bot_npc_type->skip_global_loot = true;
//bot_npc_type->rare_spawn = false;
bot_npc_type->stuck_behavior = Ground;
bot_npc_type->skip_auto_scale = true;
return bot_npc_type;
}
NPCType *Bot::CreateDefaultNPCTypeStructForBot(std::string botName, std::string botLastName, uint8 botLevel, uint16 botRace, uint8 botClass, uint8 gender) {
auto bot_npc_type = new NPCType{ 0 };
int copy_length = 0;
copy_length = botName.copy(bot_npc_type->name, 63);
bot_npc_type->name[copy_length] = '\0';
copy_length = 0;
copy_length = botLastName.copy(bot_npc_type->lastname, 69);
bot_npc_type->lastname[copy_length] = '\0';
copy_length = 0;
//bot_npc_type->current_hp = 0;
//bot_npc_type->max_hp = 0;
bot_npc_type->size = 6.0f;
bot_npc_type->runspeed = 0.7f;
bot_npc_type->gender = gender;
bot_npc_type->race = botRace;
bot_npc_type->class_ = botClass;
bot_npc_type->bodytype = 1;
bot_npc_type->deity = EQ::deity::DeityAgnostic;
bot_npc_type->level = botLevel;
//bot_npc_type->npc_id = 0;
//bot_npc_type->texture = 0;
//bot_npc_type->helmtexture = 0;
//bot_npc_type->herosforgemodel = 0;
//bot_npc_type->loottable_id = 0;
//bot_npc_type->npc_spells_id = 0;
//bot_npc_type->npc_spells_effects_id = 0;
//bot_npc_type->npc_faction_id = 0;
//bot_npc_type->merchanttype = 0;
//bot_npc_type->alt_currency_type = 0;
//bot_npc_type->adventure_template = 0;
//bot_npc_type->trap_template = 0;
//bot_npc_type->light = 0;
bot_npc_type->AC = 12;
//bot_npc_type->Mana = 0;
bot_npc_type->ATK = 75;
bot_npc_type->STR = 75;
bot_npc_type->STA = 75;
bot_npc_type->DEX = 75;
bot_npc_type->AGI = 75;
bot_npc_type->INT = 75;
bot_npc_type->WIS = 75;
bot_npc_type->CHA = 75;
bot_npc_type->MR = 25;
bot_npc_type->FR = 25;
bot_npc_type->CR = 25;
bot_npc_type->PR = 15;
bot_npc_type->DR = 15;
bot_npc_type->Corrup = 15;
//bot_npc_type->PhR = 0;
//bot_npc_type->haircolor = 0;
//bot_npc_type->beardcolor = 0;
//bot_npc_type->eyecolor1 = 0;
//bot_npc_type->eyecolor2 = 0;
//bot_npc_type->hairstyle = 0;
//bot_npc_type->luclinface = 0;
//bot_npc_type->beard = 0;
//bot_npc_type->drakkin_heritage = 0;
//bot_npc_type->drakkin_tattoo = 0;
//bot_npc_type->drakkin_details = 0;
//bot_npc_type->armor_tint = { 0 };
//bot_npc_type->min_dmg = 0;
//bot_npc_type->max_dmg = 0;
//bot_npc_type->charm_ac = 0;
//bot_npc_type->charm_min_dmg = 0;
//bot_npc_type->charm_max_dmg = 0;
//bot_npc_type->charm_attack_delay = 0;
//bot_npc_type->charm_accuracy_rating = 0;
//bot_npc_type->charm_avoidance_rating = 0;
//bot_npc_type->charm_atk = 0;
//bot_npc_type->attack_count = 0;
//*bot_npc_type->special_abilities = { 0 };
//bot_npc_type->d_melee_texture1 = 0;
//bot_npc_type->d_melee_texture2 = 0;
//*bot_npc_type->ammo_idfile = { 0 };
//bot_npc_type->prim_melee_type = 0;
//bot_npc_type->sec_melee_type = 0;
//bot_npc_type->ranged_type = 0;
bot_npc_type->hp_regen = 1;
bot_npc_type->mana_regen = 1;
//bot_npc_type->aggroradius = 0;
//bot_npc_type->assistradius = 0;
//bot_npc_type->see_invis = 0;
//bot_npc_type->see_invis_undead = false;
//bot_npc_type->see_hide = false;
//bot_npc_type->see_improved_hide = false;
//bot_npc_type->qglobal = false;
//bot_npc_type->npc_aggro = false;
//bot_npc_type->spawn_limit = 0;
//bot_npc_type->mount_color = 0;
//bot_npc_type->attack_speed = 0.0f;
//bot_npc_type->attack_delay = 0;
//bot_npc_type->accuracy_rating = 0;
//bot_npc_type->avoidance_rating = 0;
//bot_npc_type->findable = false;
bot_npc_type->trackable = true;
//bot_npc_type->slow_mitigation = 0;
bot_npc_type->maxlevel = botLevel;
//bot_npc_type->scalerate = 0;
//bot_npc_type->private_corpse = false;
//bot_npc_type->unique_spawn_by_name = false;
//bot_npc_type->underwater = false;
//bot_npc_type->emoteid = 0;
//bot_npc_type->spellscale = 0.0f;
//bot_npc_type->healscale = 0.0f;
//bot_npc_type->no_target_hotkey = false;
//bot_npc_type->raid_target = false;
//bot_npc_type->armtexture = 0;
//bot_npc_type->bracertexture = 0;
//bot_npc_type->handtexture = 0;
//bot_npc_type->legtexture = 0;
//bot_npc_type->feettexture = 0;
//bot_npc_type->ignore_despawn = false;
bot_npc_type->show_name = true;
//bot_npc_type->untargetable = false;
bot_npc_type->skip_global_loot = true;
//bot_npc_type->rare_spawn = false;
bot_npc_type->stuck_behavior = Ground;
return bot_npc_type;
}
void Bot::GenerateBaseStats()
{
int BotSpellID = 0;
// base stats
uint32 Strength = _baseSTR;
uint32 Stamina = _baseSTA;
uint32 Dexterity = _baseDEX;
uint32 Agility = _baseAGI;
uint32 Wisdom = _baseWIS;
uint32 Intelligence = _baseINT;
uint32 Charisma = _baseCHA;
uint32 Attack = _baseATK;
int32 MagicResist = _baseMR;
int32 FireResist = _baseFR;
int32 DiseaseResist = _baseDR;
int32 PoisonResist = _basePR;
int32 ColdResist = _baseCR;
int32 CorruptionResist = _baseCorrup;
// pulling fixed values from an auto-increment field is dangerous...
switch(this->GetClass()) {
case WARRIOR:
BotSpellID = 3001;
Strength += 10;
Stamina += 20;
Agility += 10;
Dexterity += 10;
Attack += 12;
break;
case CLERIC:
BotSpellID = 3002;
Strength += 5;
Stamina += 5;
Agility += 10;
Wisdom += 30;
Attack += 8;
break;
case PALADIN:
BotSpellID = 3003;
Strength += 15;
Stamina += 5;
Wisdom += 15;
Charisma += 10;
Dexterity += 5;
Attack += 17;
break;
case RANGER:
BotSpellID = 3004;
Strength += 15;
Stamina += 10;
Agility += 10;
Wisdom += 15;
Attack += 17;
break;
case SHADOWKNIGHT:
BotSpellID = 3005;
Strength += 10;
Stamina += 15;
Intelligence += 20;
Charisma += 5;
Attack += 17;
break;
case DRUID:
BotSpellID = 3006;
Stamina += 15;
Wisdom += 35;
Attack += 5;
break;
case MONK:
BotSpellID = 3007;
Strength += 5;
Stamina += 15;
Agility += 15;
Dexterity += 15;
Attack += 17;
break;
case BARD:
BotSpellID = 3008;
Strength += 15;
Dexterity += 10;
Charisma += 15;
Intelligence += 10;
Attack += 17;
break;
case ROGUE:
BotSpellID = 3009;
Strength += 10;
Stamina += 20;
Agility += 10;
Dexterity += 10;
Attack += 12;
break;
case SHAMAN:
BotSpellID = 3010;
Stamina += 10;
Wisdom += 30;
Charisma += 10;
Attack += 28;
break;
case NECROMANCER:
BotSpellID = 3011;
Dexterity += 10;
Agility += 10;
Intelligence += 30;
Attack += 5;
break;
case WIZARD:
BotSpellID = 3012;
Stamina += 20;
Intelligence += 30;
Attack += 5;
break;
case MAGICIAN:
BotSpellID = 3013;
Stamina += 20;
Intelligence += 30;
Attack += 5;
break;
case ENCHANTER:
BotSpellID = 3014;
Intelligence += 25;
Charisma += 25;
Attack += 5;
break;
case BEASTLORD:
BotSpellID = 3015;
Stamina += 10;
Agility += 10;
Dexterity += 5;
Wisdom += 20;
Charisma += 5;
Attack += 31;
break;
case BERSERKER:
BotSpellID = 3016;
Strength += 10;
Stamina += 15;
Dexterity += 15;
Agility += 10;
Attack += 25;
break;
default:
break;
}
float BotSize = GetSize();
switch(this->GetRace()) {
case HUMAN: // Humans have no race bonus
break;
case BARBARIAN:
Strength += 28;
Stamina += 20;
Agility += 7;
Dexterity -= 5;
Wisdom -= 5;
Intelligence -= 10;
Charisma -= 20;
BotSize = 7.0;
ColdResist += 10;
break;
case ERUDITE:
Strength -= 15;
Stamina -= 5;
Agility -= 5;
Dexterity -= 5;
Wisdom += 8;
Intelligence += 32;
Charisma -= 5;
MagicResist += 5;
DiseaseResist -= 5;
break;
case WOOD_ELF:
Strength -= 10;
Stamina -= 10;
Agility += 20;
Dexterity += 5;
Wisdom += 5;
BotSize = 5.0;
break;
case HIGH_ELF:
Strength -= 20;
Stamina -= 10;
Agility += 10;
Dexterity -= 5;
Wisdom += 20;
Intelligence += 12;
Charisma += 5;
break;
case DARK_ELF:
Strength -= 15;
Stamina -= 10;
Agility += 15;
Wisdom += 8;
Intelligence += 24;
Charisma -= 15;
BotSize = 5.0;
break;
case HALF_ELF:
Strength -= 5;
Stamina -= 5;
Agility += 15;
Dexterity += 10;
Wisdom -= 15;
BotSize = 5.5;
break;
case DWARF:
Strength += 15;
Stamina += 15;
Agility -= 5;
Dexterity += 15;
Wisdom += 8;
Intelligence -= 15;
Charisma -= 30;
BotSize = 4.0;
MagicResist -= 5;
PoisonResist += 5;
break;
case TROLL:
Strength += 33;
Stamina += 34;
Agility += 8;
Wisdom -= 15;
Intelligence -= 23;
Charisma -= 35;
BotSize = 8.0;
FireResist -= 20;
break;
case OGRE:
Strength += 55;
Stamina += 77;
Agility -= 5;
Dexterity -= 5;
Wisdom -= 8;
Intelligence -= 15;
Charisma -= 38;
BotSize = 9.0;
break;
case HALFLING:
Strength -= 5;
Agility += 20;
Dexterity += 15;
Wisdom += 5;
Intelligence -= 8;
Charisma -= 25;
BotSize = 3.5;
PoisonResist += 5;
DiseaseResist += 5;
break;
case GNOME:
Strength -= 15;
Stamina -= 5;
Agility += 10;
Dexterity += 10;
Wisdom -= 8;
Intelligence += 23;
Charisma -= 15;
BotSize = 3.0;
break;
case IKSAR:
Strength -= 5;
Stamina -= 5;
Agility += 15;
Dexterity += 10;
Wisdom += 5;
Charisma -= 20;
MagicResist -= 5;
FireResist -= 5;
break;
case VAHSHIR:
Strength += 15;
Agility += 15;
Dexterity -= 5;
Wisdom -= 5;
Intelligence -= 10;
Charisma -= 10;
BotSize = 7.0;
MagicResist -= 5;
FireResist -= 5;
break;
case FROGLOK:
Strength -= 5;
Stamina += 5;
Agility += 25;
Dexterity += 25;
Charisma -= 25;
BotSize = 5.0;
MagicResist -= 5;
FireResist -= 5;
break;
case DRAKKIN:
Strength -= 5;
Stamina += 5;
Agility += 10;
Intelligence += 10;
Wisdom += 5;
BotSize = 5.0;
PoisonResist += 2;
DiseaseResist += 2;
MagicResist += 2;
FireResist += 2;
ColdResist += 2;
break;
default:
break;
}
this->STR = Strength;
this->STA = Stamina;
this->DEX = Dexterity;
this->AGI = Agility;
this->WIS = Wisdom;
this->INT = Intelligence;
this->CHA = Charisma;
this->ATK = Attack;
this->MR = MagicResist;
this->FR = FireResist;
this->DR = DiseaseResist;
this->PR = PoisonResist;
this->CR = ColdResist;
this->PhR = 0;
this->Corrup = CorruptionResist;
SetBotSpellID(BotSpellID);
this->size = BotSize;
this->pAggroRange = 0;
this->pAssistRange = 0;
this->raid_target = false;
this->deity = 396;
}
void Bot::GenerateAppearance() {
// Randomize facial appearance
int iFace = 0;
if(this->GetRace() == 2) // Barbarian w/Tatoo
iFace = zone->random.Int(0, 79);
else
iFace = zone->random.Int(0, 7);
int iHair = 0;
int iBeard = 0;
int iBeardColor = 1;
if(this->GetRace() == 522) {
iHair = zone->random.Int(0, 8);
iBeard = zone->random.Int(0, 11);
iBeardColor = zone->random.Int(0, 3);
} else if(this->GetGender()) {
iHair = zone->random.Int(0, 2);
if(this->GetRace() == 8) { // Dwarven Females can have a beard
if(zone->random.Int(1, 100) < 50)
iFace += 10;
}
} else {
iHair = zone->random.Int(0, 3);
iBeard = zone->random.Int(0, 5);
iBeardColor = zone->random.Int(0, 19);
}
int iHairColor = 0;
if(this->GetRace() == 522)
iHairColor = zone->random.Int(0, 3);
else
iHairColor = zone->random.Int(0, 19);
uint8 iEyeColor1 = (uint8)zone->random.Int(0, 9);
uint8 iEyeColor2 = 0;
if(this->GetRace() == 522)
iEyeColor1 = iEyeColor2 = (uint8)zone->random.Int(0, 11);
else if(zone->random.Int(1, 100) > 96)
iEyeColor2 = zone->random.Int(0, 9);
else
iEyeColor2 = iEyeColor1;
int iHeritage = 0;
int iTattoo = 0;
int iDetails = 0;
if(this->GetRace() == 522) {
iHeritage = zone->random.Int(0, 6);
iTattoo = zone->random.Int(0, 7);
iDetails = zone->random.Int(0, 7);
}
this->luclinface = iFace;
this->hairstyle = iHair;
this->beard = iBeard;
this->beardcolor = iBeardColor;
this->haircolor = iHairColor;
this->eyecolor1 = iEyeColor1;
this->eyecolor2 = iEyeColor2;
this->drakkin_heritage = iHeritage;
this->drakkin_tattoo = iTattoo;
this->drakkin_details = iDetails;
}
int32 Bot::acmod() {
int agility = GetAGI();
int level = GetLevel();
if(agility < 1 || level < 1)
return 0;
if(agility <= 74) {
if(agility == 1)
return -24;
else if(agility <= 3)
return -23;
else if(agility == 4)
return -22;
else if(agility <= 6)
return -21;
else if(agility <= 8)
return -20;
else if(agility == 9)
return -19;
else if(agility <= 11)
return -18;
else if(agility == 12)
return -17;
else if(agility <= 14)
return -16;
else if(agility <= 16)
return -15;
else if(agility == 17)
return -14;
else if(agility <= 19)
return -13;
else if(agility == 20)
return -12;
else if(agility <= 22)
return -11;
else if(agility <= 24)
return -10;
else if(agility == 25)
return -9;
else if(agility <= 27)
return -8;
else if(agility == 28)
return -7;
else if(agility <= 30)
return -6;
else if(agility <= 32)
return -5;
else if(agility == 33)
return -4;
else if(agility <= 35)
return -3;
else if(agility == 36)
return -2;
else if(agility <= 38)
return -1;
else if(agility <= 65)
return 0;
else if(agility <= 70)
return 1;
else if(agility <= 74)
return 5;
} else if(agility <= 137) {
if(agility == 75) {
if(level <= 6)
return 9;
else if(level <= 19)
return 23;
else if(level <= 39)
return 33;
else
return 39;
} else if(agility >= 76 && agility <= 79) {
if(level <= 6)
return 10;
else if(level <= 19)
return 23;
else if(level <= 39)
return 33;
else
return 40;
} else if(agility == 80) {
if(level <= 6)
return 11;
else if(level <= 19)
return 24;
else if(level <= 39)
return 34;
else
return 41;
} else if(agility >= 81 && agility <= 85) {
if(level <= 6)
return 12;
else if(level <= 19)
return 25;
else if(level <= 39)
return 35;
else
return 42;
} else if(agility >= 86 && agility <= 90) {
if(level <= 6)
return 12;
else if(level <= 19)
return 26;
else if(level <= 39)
return 36;
else
return 42;
} else if(agility >= 91 && agility <= 95) {
if(level <= 6)
return 13;
else if(level <= 19)
return 26;
else if(level <= 39)
return 36;
else
return 43;
} else if(agility >= 96 && agility <= 99) {
if(level <= 6)
return 14;
else if(level <= 19)
return 27;
else if(level <= 39)
return 37;
else
return 44;
} else if(agility == 100 && level >= 7) {
if(level <= 19)
return 28;
else if (level <= 39)
return 38;
else
return 45;
}
else if(level <= 6)
return 15;
//level is >6
else if(agility >= 101 && agility <= 105) {
if(level <= 19)
return 29;
else if(level <= 39)
return 39;// not verified
else
return 45;
} else if(agility >= 106 && agility <= 110) {
if(level <= 19)
return 29;
else if(level <= 39)
return 39;// not verified
else
return 46;
} else if(agility >= 111 && agility <= 115) {
if(level <= 19)
return 30;
else if(level <= 39)
return 40;// not verified
else
return 47;
} else if(agility >= 116 && agility <= 119) {
if(level <= 19)
return 31;
else if(level <= 39)
return 41;
else
return 47;
}
else if(level <= 19)
return 32;
//level is > 19
else if(agility == 120) {
if(level <= 39)
return 42;
else
return 48;
} else if(agility <= 125) {
if(level <= 39)
return 42;
else
return 49;
} else if(agility <= 135) {
if(level <= 39)
return 42;
else
return 50;
} else {
if(level <= 39)
return 42;
else
return 51;
}
} else if(agility <= 300) {
if(level <= 6) {
if(agility <= 139)
return 21;
else if(agility == 140)
return 22;
else if(agility <= 145)
return 23;
else if(agility <= 150)
return 23;
else if(agility <= 155)
return 24;
else if(agility <= 159)
return 25;
else if(agility == 160)
return 26;
else if(agility <= 165)
return 26;
else if(agility <= 170)
return 27;
else if(agility <= 175)
return 28;
else if(agility <= 179)
return 28;
else if(agility == 180)
return 29;
else if(agility <= 185)
return 30;
else if(agility <= 190)
return 31;
else if(agility <= 195)
return 31;
else if(agility <= 199)
return 32;
else if(agility <= 219)
return 33;
else if(agility <= 239)
return 34;
else
return 35;
} else if(level <= 19) {
if(agility <= 139)
return 34;
else if(agility == 140)
return 35;
else if(agility <= 145)
return 36;
else if(agility <= 150)
return 37;
else if(agility <= 155)
return 37;
else if(agility <= 159)
return 38;
else if(agility == 160)
return 39;
else if(agility <= 165)
return 40;
else if(agility <= 170)
return 40;
else if(agility <= 175)
return 41;
else if(agility <= 179)
return 42;
else if(agility == 180)
return 43;
else if(agility <= 185)
return 43;
else if(agility <= 190)
return 44;
else if(agility <= 195)
return 45;
else if(agility <= 199)
return 45;
else if(agility <= 219)
return 46;
else if(agility <= 239)
return 47;
else
return 48;
} else if(level <= 39) {
if(agility <= 139)
return 44;
else if(agility == 140)
return 45;
else if(agility <= 145)
return 46;
else if(agility <= 150)
return 47;
else if(agility <= 155)
return 47;
else if(agility <= 159)
return 48;
else if(agility == 160)
return 49;
else if(agility <= 165)
return 50;
else if(agility <= 170)
return 50;
else if(agility <= 175)
return 51;
else if(agility <= 179)
return 52;
else if(agility == 180)
return 53;
else if(agility <= 185)
return 53;
else if(agility <= 190)
return 54;
else if(agility <= 195)
return 55;
else if(agility <= 199)
return 55;
else if(agility <= 219)
return 56;
else if(agility <= 239)
return 57;
else
return 58;
} else { //lvl >= 40
if(agility <= 139)
return 51;
else if(agility == 140)
return 52;
else if(agility <= 145)
return 53;
else if(agility <= 150)
return 53;
else if(agility <= 155)
return 54;
else if(agility <= 159)
return 55;
else if(agility == 160)
return 56;
else if(agility <= 165)
return 56;
else if(agility <= 170)
return 57;
else if(agility <= 175)
return 58;
else if(agility <= 179)
return 58;
else if(agility == 180)
return 59;
else if(agility <= 185)
return 60;
else if(agility <= 190)
return 61;
else if(agility <= 195)
return 61;
else if(agility <= 199)
return 62;
else if(agility <= 219)
return 63;
else if(agility <= 239)
return 64;
else
return 65;
}
}
else
return (65 + ((agility - 300) / 21));
#if EQDEBUG >= 11
LogError("Error in Bot::acmod(): Agility: [{}], Level: [{}]",agility,level);
#endif
return 0;
}
uint16 Bot::GetPrimarySkillValue() {
EQ::skills::SkillType skill = EQ::skills::HIGHEST_SKILL; //because nullptr == 0, which is 1H Slashing, & we want it to return 0 from GetSkill
bool equiped = m_inv.GetItem(EQ::invslot::slotPrimary);
if(!equiped)
skill = EQ::skills::SkillHandtoHand;
else {
uint8 type = m_inv.GetItem(EQ::invslot::slotPrimary)->GetItem()->ItemType; //is this the best way to do this?
switch(type) {
case EQ::item::ItemType1HSlash:
skill = EQ::skills::Skill1HSlashing;
break;
case EQ::item::ItemType2HSlash:
skill = EQ::skills::Skill2HSlashing;
break;
case EQ::item::ItemType1HPiercing:
skill = EQ::skills::Skill1HPiercing;
break;
case EQ::item::ItemType1HBlunt:
skill = EQ::skills::Skill1HBlunt;
break;
case EQ::item::ItemType2HBlunt:
skill = EQ::skills::Skill2HBlunt;
break;
case EQ::item::ItemType2HPiercing:
skill = EQ::skills::Skill2HPiercing;
break;
case EQ::item::ItemTypeMartial:
skill = EQ::skills::SkillHandtoHand;
break;
default:
skill = EQ::skills::SkillHandtoHand;
break;
}
}
return GetSkill(skill);
}
uint16 Bot::MaxSkill(EQ::skills::SkillType skillid, uint16 class_, uint16 level) const {
return(content_db.GetSkillCap(class_, skillid, level));
}
uint32 Bot::GetTotalATK() {
uint32 AttackRating = 0;
uint32 WornCap = itembonuses.ATK;
if(IsBot()) {
AttackRating = ((WornCap * 1.342) + (GetSkill(EQ::skills::SkillOffense) * 1.345) + ((GetSTR() - 66) * 0.9) + (GetPrimarySkillValue() * 2.69));
AttackRating += aabonuses.ATK + GroupLeadershipAAOffenseEnhancement();
if (AttackRating < 10)
AttackRating = 10;
}
else
AttackRating = GetATK();
AttackRating += spellbonuses.ATK;
return AttackRating;
}
uint32 Bot::GetATKRating() {
uint32 AttackRating = 0;
if(IsBot()) {
AttackRating = (GetSkill(EQ::skills::SkillOffense) * 1.345) + ((GetSTR() - 66) * 0.9) + (GetPrimarySkillValue() * 2.69);
if (AttackRating < 10)
AttackRating = 10;
}
return AttackRating;
}
int32 Bot::GenerateBaseHitPoints() {
// Calc Base Hit Points
int new_base_hp = 0;
uint32 lm = GetClassLevelFactor();
int32 Post255;
int32 NormalSTA = GetSTA();
if (GetOwner() && GetOwner()->CastToClient() && GetOwner()->CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::SoD && RuleB(Character, SoDClientUseSoDHPManaEnd)) {
float SoDPost255;
if(((NormalSTA - 255) / 2) > 0)
SoDPost255 = ((NormalSTA - 255) / 2);
else
SoDPost255 = 0;
int hp_factor = GetClassHPFactor();
if(level < 41)
new_base_hp = (5 + (GetLevel() * hp_factor / 12) + ((NormalSTA - SoDPost255) * GetLevel() * hp_factor / 3600));
else if(level < 81)
new_base_hp = (5 + (40 * hp_factor / 12) + ((GetLevel() - 40) * hp_factor / 6) + ((NormalSTA - SoDPost255) * hp_factor / 90) + ((NormalSTA - SoDPost255) * (GetLevel() - 40) * hp_factor / 1800));
else
new_base_hp = (5 + (80 * hp_factor / 8) + ((GetLevel() - 80) * hp_factor / 10) + ((NormalSTA - SoDPost255) * hp_factor / 90) + ((NormalSTA - SoDPost255) * hp_factor / 45));
} else {
if(((NormalSTA - 255) / 2) > 0)
Post255 = ((NormalSTA - 255) / 2);
else
Post255 = 0;
new_base_hp = (5) + (GetLevel() * lm / 10) + (((NormalSTA - Post255) * GetLevel() * lm / 3000)) + ((Post255 * 1) * lm / 6000);
}
this->base_hp = new_base_hp;
return new_base_hp;
}
void Bot::LoadAAs() {
aa_ranks.clear();
int id = 0;
int points = 0;
auto iter = zone->aa_abilities.begin();
while(iter != zone->aa_abilities.end()) {
AA::Ability *ability = (*iter).second.get();
//skip expendables
if(!ability->first || ability->charges > 0) {
++iter;
continue;
}
id = ability->first->id;
points = 0;
AA::Rank *current = ability->first;
if (current->level_req > GetLevel()) {
++iter;
continue;
}
while(current) {
if(!CanUseAlternateAdvancementRank(current)) {
current = nullptr;
} else {
current = current->next;
points++;
}
}
if(points > 0) {
SetAA(id, points);
}
++iter;
}
}
bool Bot::IsValidRaceClassCombo()
{
return Bot::IsValidRaceClassCombo(GetRace(), GetClass());
}
bool Bot::IsValidRaceClassCombo(uint16 bot_race, uint8 bot_class)
{
bool is_valid = false;
auto classes = database.botdb.GetRaceClassBitmask(bot_race);
auto bot_class_bitmask = GetPlayerClassBit(bot_class);
if (classes & bot_class_bitmask) {
is_valid = true;
}
return is_valid;
}
bool Bot::IsValidName()
{
std::string name = this->GetCleanName();
return Bot::IsValidName(name);
}
bool Bot::IsValidName(std::string& name)
{
if (name.length() < 4)
return false;
if (!isupper(name[0]))
return false;
for (int i = 1; i < name.length(); ++i) {
if ((!RuleB(Bots, AllowCamelCaseNames) && !islower(name[i])) && name[i] != '_') {
return false;
}
}
return true;
}
bool Bot::Save()
{
auto bot_owner = GetBotOwner();
if (!bot_owner)
return false;
std::string error_message;
if(!GetBotID()) { // New bot record
uint32 bot_id = 0;
if (!database.botdb.SaveNewBot(this, bot_id) || !bot_id) {
bot_owner->Message(Chat::Red, "%s '%s'", BotDatabase::fail::SaveNewBot(), GetCleanName());
return false;
}
SetBotID(bot_id);
}
else { // Update existing bot record
if (!database.botdb.SaveBot(this)) {
bot_owner->Message(Chat::Red, "%s '%s'", BotDatabase::fail::SaveBot(), GetCleanName());
return false;
}
}
// All of these continue to process if any fail
if (!database.botdb.SaveBuffs(this))
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::SaveBuffs(), GetCleanName());
if (!database.botdb.SaveTimers(this))
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::SaveTimers(), GetCleanName());
if (!database.botdb.SaveStance(this))
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::SaveStance(), GetCleanName());
if (!SavePet())
bot_owner->Message(Chat::Red, "Failed to save pet for '%s'", GetCleanName());
return true;
}
bool Bot::DeleteBot()
{
auto bot_owner = GetBotOwner();
if (!bot_owner)
return false;
if (!database.botdb.DeleteHealRotation(GetBotID())) {
bot_owner->Message(Chat::Red, "%s", BotDatabase::fail::DeleteHealRotation());
return false;
}
std::string query = StringFormat("DELETE FROM `bot_heal_rotation_members` WHERE `bot_id` = '%u'", GetBotID());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
bot_owner->Message(Chat::Red, "Failed to delete heal rotation member '%s'", GetCleanName());
return false;
}
query = StringFormat("DELETE FROM `bot_heal_rotation_targets` WHERE `target_name` LIKE '%s'", GetCleanName());
results = database.QueryDatabase(query);
if (!results.Success()) {
bot_owner->Message(Chat::Red, "Failed to delete heal rotation target '%s'", GetCleanName());
return false;
}
if (!DeletePet()) {
bot_owner->Message(Chat::Red, "Failed to delete pet for '%s'", GetCleanName());
return false;
}
if (GetGroup())
RemoveBotFromGroup(this, GetGroup());
std::string error_message;
if (!database.botdb.RemoveMemberFromBotGroup(GetBotID())) {
bot_owner->Message(Chat::Red, "%s - '%s'", BotDatabase::fail::RemoveMemberFromBotGroup(), GetCleanName());
return false;
}
if (!database.botdb.DeleteItems(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::DeleteItems(), GetCleanName());
return false;
}
if (!database.botdb.DeleteTimers(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::DeleteTimers(), GetCleanName());
return false;
}
if (!database.botdb.DeleteBuffs(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::DeleteBuffs(), GetCleanName());
return false;
}
if (!database.botdb.DeleteStance(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::DeleteStance(), GetCleanName());
return false;
}
if (!database.botdb.DeleteBot(GetBotID())) {
bot_owner->Message(Chat::Red, "%s '%s'", BotDatabase::fail::DeleteBot(), GetCleanName());
return false;
}
return true;
}
// Returns the current total play time for the bot
uint32 Bot::GetTotalPlayTime() {
uint32 Result = 0;
double TempTotalPlayTime = 0;
time_t currentTime = time(¤tTime);
TempTotalPlayTime = difftime(currentTime, _startTotalPlayTime);
TempTotalPlayTime += _lastTotalPlayTime;
Result = (uint32)TempTotalPlayTime;
return Result;
}
bool Bot::LoadPet()
{
if (GetPet())
return true;
auto bot_owner = GetBotOwner();
if (!bot_owner)
return false;
if (GetClass() == WIZARD) {
auto buffs_max = GetMaxBuffSlots();
auto my_buffs = GetBuffs();
if (buffs_max && my_buffs) {
for (int index = 0; index < buffs_max; ++index) {
if (IsEffectInSpell(my_buffs[index].spellid, SE_Familiar)) {
MakePet(my_buffs[index].spellid, spells[my_buffs[index].spellid].teleport_zone);
return true;
}
}
}
}
std::string error_message;
uint32 pet_index = 0;
if (!database.botdb.LoadPetIndex(GetBotID(), pet_index)) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::LoadPetIndex(), GetCleanName());
return false;
}
if (!pet_index)
return true;
uint32 saved_pet_spell_id = 0;
if (!database.botdb.LoadPetSpellID(GetBotID(), saved_pet_spell_id)) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::LoadPetSpellID(), GetCleanName());
}
if (!IsValidSpell(saved_pet_spell_id)) {
bot_owner->Message(Chat::Red, "Invalid spell id for %s's pet", GetCleanName());
DeletePet();
return false;
}
std::string pet_name;
uint32 pet_mana = 0;
uint32 pet_hp = 0;
uint32 pet_spell_id = 0;
if (!database.botdb.LoadPetStats(GetBotID(), pet_name, pet_mana, pet_hp, pet_spell_id)) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::LoadPetStats(), GetCleanName());
return false;
}
MakePet(pet_spell_id, spells[pet_spell_id].teleport_zone, pet_name.c_str());
if (!GetPet() || !GetPet()->IsNPC()) {
DeletePet();
return false;
}
NPC *pet_inst = GetPet()->CastToNPC();
SpellBuff_Struct pet_buffs[PET_BUFF_COUNT];
memset(pet_buffs, 0, (sizeof(SpellBuff_Struct) * PET_BUFF_COUNT));
if (!database.botdb.LoadPetBuffs(GetBotID(), pet_buffs))
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::LoadPetBuffs(), GetCleanName());
uint32 pet_items[EQ::invslot::EQUIPMENT_COUNT];
memset(pet_items, 0, (sizeof(uint32) * EQ::invslot::EQUIPMENT_COUNT));
if (!database.botdb.LoadPetItems(GetBotID(), pet_items))
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::LoadPetItems(), GetCleanName());
pet_inst->SetPetState(pet_buffs, pet_items);
pet_inst->CalcBonuses();
pet_inst->SetHP(pet_hp);
pet_inst->SetMana(pet_mana);
return true;
}
bool Bot::SavePet()
{
if (!GetPet() || GetPet()->IsFamiliar()) // dead?
return true;
NPC *pet_inst = GetPet()->CastToNPC();
if (!pet_inst->GetPetSpellID() || !IsValidSpell(pet_inst->GetPetSpellID()))
return false;
auto bot_owner = GetBotOwner();
if (!bot_owner)
return false;
char* pet_name = new char[64];
SpellBuff_Struct pet_buffs[PET_BUFF_COUNT];
uint32 pet_items[EQ::invslot::EQUIPMENT_COUNT];
memset(pet_name, 0, 64);
memset(pet_buffs, 0, (sizeof(SpellBuff_Struct) * PET_BUFF_COUNT));
memset(pet_items, 0, (sizeof(uint32) * EQ::invslot::EQUIPMENT_COUNT));
pet_inst->GetPetState(pet_buffs, pet_items, pet_name);
std::string pet_name_str = pet_name;
safe_delete_array(pet_name);
std::string error_message;
if (!database.botdb.SavePetStats(GetBotID(), pet_name_str, pet_inst->GetMana(), pet_inst->GetHP(), pet_inst->GetPetSpellID())) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::SavePetStats(), GetCleanName());
return false;
}
if (!database.botdb.SavePetBuffs(GetBotID(), pet_buffs))
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::SavePetBuffs(), GetCleanName());
if (!database.botdb.SavePetItems(GetBotID(), pet_items))
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::SavePetItems(), GetCleanName());
return true;
}
bool Bot::DeletePet()
{
auto bot_owner = GetBotOwner();
if (!bot_owner)
return false;
std::string error_message;
if (!database.botdb.DeletePetItems(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::DeletePetItems(), GetCleanName());
return false;
}
if (!database.botdb.DeletePetBuffs(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::DeletePetBuffs(), GetCleanName());
return false;
}
if (!database.botdb.DeletePetStats(GetBotID())) {
bot_owner->Message(Chat::Red, "%s for %s's pet", BotDatabase::fail::DeletePetStats(), GetCleanName());
return false;
}
if (!GetPet() || !GetPet()->IsNPC())
return true;
NPC* pet_inst = GetPet()->CastToNPC();
pet_inst->SetOwnerID(0);
SetPet(nullptr);
return true;
}
bool Bot::Process()
{
if (IsStunned() && stunned_timer.Check()) {
Mob::UnStun();
}
if (!GetBotOwner()) {
return false;
}
if (GetDepop()) {
_botOwner = 0;
_botOwnerCharacterID = 0;
_previousTarget = 0;
return false;
}
if (mob_close_scan_timer.Check()) {
LogAIScanClose(
"is_moving [{}] bot [{}] timer [{}]",
moving ? "true" : "false",
GetCleanName(),
mob_close_scan_timer.GetDuration()
);
entity_list.ScanCloseClientMobs(close_mobs, this);
}
SpellProcess();
if(tic_timer.Check()) {
// 6 seconds, or whatever the rule is set to has passed, send this position to everyone to avoid ghosting
if(!IsMoving() && !IsEngaged()) {
if(IsSitting()) {
if (!rest_timer.Enabled()) {
rest_timer.Start(RuleI(Character, RestRegenTimeToActivate) * 1000);
}
}
}
BuffProcess();
CalcRestState();
if (currently_fleeing) {
ProcessFlee();
}
if (GetHP() < GetMaxHP()) {
SetHP(GetHP() + CalcHPRegen() + RestRegenHP);
}
if (GetMana() < GetMaxMana()) {
SetMana(GetMana() + CalcManaRegen() + RestRegenMana);
}
CalcATK();
if (GetEndurance() < GetMaxEndurance()) {
SetEndurance(GetEndurance() + CalcEnduranceRegen() + RestRegenEndurance);
}
}
if (send_hp_update_timer.Check(false)) {
SendHPUpdate();
if (HasPet()) {
GetPet()->SendHPUpdate();
}
// hack fix until percentage changes can be implemented
auto g = GetGroup();
if (g) {
g->SendManaPacketFrom(this);
g->SendEndurancePacketFrom(this);
}
}
if (GetAppearance() == eaDead && GetHP() > 0) {
SetAppearance(eaStanding);
}
if (IsMoving()) {
ping_timer.Disable();
}
else {
if (!ping_timer.Enabled()) {
ping_timer.Start(BOT_KEEP_ALIVE_INTERVAL);
}
if (ping_timer.Check()) {
SentPositionPacket(0.0f, 0.0f, 0.0f, 0.0f, 0);
}
}
if (IsStunned() || IsMezzed()) {
return true;
}
// Bot AI
AI_Process();
return true;
}
void Bot::SpellProcess() {
if(spellend_timer.Check(false)) {
NPC::SpellProcess();
if(GetClass() == BARD) {
if (casting_spell_id != 0)
casting_spell_id = 0;
}
}
}
void Bot::BotMeditate(bool isSitting) {
if(isSitting) {
if(GetManaRatio() < 99.0f || GetHPRatio() < 99.0f) {
if (!IsEngaged() && !IsSitting())
Sit();
} else {
if(IsSitting())
Stand();
}
} else {
if(IsSitting())
Stand();
}
if(IsSitting()) {
if(!rest_timer.Enabled())
rest_timer.Start(RuleI(Character, RestRegenTimeToActivate) * 1000);
}
else
rest_timer.Disable();
}
void Bot::BotRangedAttack(Mob* other) {
//make sure the attack and ranged timers are up
//if the ranged timer is disabled, then they have no ranged weapon and shouldent be attacking anyhow
if((attack_timer.Enabled() && !attack_timer.Check(false)) || (ranged_timer.Enabled() && !ranged_timer.Check())) {
LogCombat("Bot Archery attack canceled. Timer not up. Attack [{}], ranged [{}]", attack_timer.GetRemainingTime(), ranged_timer.GetRemainingTime());
Message(0, "Error: Timer not up. Attack %d, ranged %d", attack_timer.GetRemainingTime(), ranged_timer.GetRemainingTime());
return;
}
EQ::ItemInstance* rangedItem = GetBotItem(EQ::invslot::slotRange);
const EQ::ItemData* RangeWeapon = nullptr;
if(rangedItem)
RangeWeapon = rangedItem->GetItem();
EQ::ItemInstance* ammoItem = GetBotItem(EQ::invslot::slotAmmo);
const EQ::ItemData* Ammo = nullptr;
if(ammoItem)
Ammo = ammoItem->GetItem();
if(!RangeWeapon || !Ammo)
return;
LogCombat("Shooting [{}] with bow [{}] ([{}]) and arrow [{}] ([{}])", other->GetCleanName(), RangeWeapon->Name, RangeWeapon->ID, Ammo->Name, Ammo->ID);
if(!IsAttackAllowed(other) || IsCasting() || DivineAura() || IsStunned() || IsMezzed() || (GetAppearance() == eaDead))
return;
SendItemAnimation(other, Ammo, EQ::skills::SkillArchery);
//DoArcheryAttackDmg(GetTarget(), rangedItem, ammoItem);
DoArcheryAttackDmg(other, rangedItem, ammoItem); // watch
//break invis when you attack
if(invisible) {
LogCombat("Removing invisibility due to melee attack");
BuffFadeByEffect(SE_Invisibility);
BuffFadeByEffect(SE_Invisibility2);
invisible = false;
}
if(invisible_undead) {
LogCombat("Removing invisibility vs. undead due to melee attack");
BuffFadeByEffect(SE_InvisVsUndead);
BuffFadeByEffect(SE_InvisVsUndead2);
invisible_undead = false;
}
if(invisible_animals) {
LogCombat("Removing invisibility vs. animals due to melee attack");
BuffFadeByEffect(SE_InvisVsAnimals);
invisible_animals = false;
}
if (spellbonuses.NegateIfCombat)
BuffFadeByEffect(SE_NegateIfCombat);
if(hidden || improved_hidden){
hidden = false;
improved_hidden = false;
EQApplicationPacket* outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 0;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
}
}
bool Bot::CheckBotDoubleAttack(bool tripleAttack) {
//Check for bonuses that give you a double attack chance regardless of skill (ie Bestial Frenzy/Harmonious Attack AA)
uint32 bonusGiveDA = (aabonuses.GiveDoubleAttack + spellbonuses.GiveDoubleAttack + itembonuses.GiveDoubleAttack);
// If you don't have the double attack skill, return
if (!GetSkill(EQ::skills::SkillDoubleAttack) && !(GetClass() == BARD || GetClass() == BEASTLORD))
return false;
// You start with no chance of double attacking
float chance = 0.0f;
uint16 skill = GetSkill(EQ::skills::SkillDoubleAttack);
int32 bonusDA = (aabonuses.DoubleAttackChance + spellbonuses.DoubleAttackChance + itembonuses.DoubleAttackChance);
//Use skill calculations otherwise, if you only have AA applied GiveDoubleAttack chance then use that value as the base.
if (skill)
chance = ((float(skill + GetLevel()) * (float(100.0f + bonusDA + bonusGiveDA) / 100.0f)) / 500.0f);
else
chance = ((float(bonusGiveDA) * (float(100.0f + bonusDA) / 100.0f)) / 100.0f);
//Live now uses a static Triple Attack skill (lv 46 = 2% lv 60 = 20%) - We do not have this skill on EMU ATM.
//A reasonable forumla would then be TA = 20% * chance
//AA's can also give triple attack skill over cap. (ie Burst of Power) NOTE: Skill ID in spell data is 76 (Triple Attack)
//Kayen: Need to decide if we can implement triple attack skill before working in over the cap effect.
if(tripleAttack) {
// Only some Double Attack classes get Triple Attack [This is already checked in client_processes.cpp]
int32 triple_bonus = (spellbonuses.TripleAttackChance + itembonuses.TripleAttackChance);
chance *= 0.2f; //Baseline chance is 20% of your double attack chance.
chance *= (float(100.0f + triple_bonus) / 100.0f); //Apply modifiers.
}
if((zone->random.Real(0, 1) < chance))
return true;
return false;
}
void Bot::ApplySpecialAttackMod(EQ::skills::SkillType skill, int32 &dmg, int32 &mindmg) {
int item_slot = -1;
//1: Apply bonus from AC (BOOT/SHIELD/HANDS) est. 40AC=6dmg
switch (skill) {
case EQ::skills::SkillFlyingKick:
case EQ::skills::SkillRoundKick:
case EQ::skills::SkillKick:
item_slot = EQ::invslot::slotFeet;
break;
case EQ::skills::SkillBash:
item_slot = EQ::invslot::slotSecondary;
break;
case EQ::skills::SkillDragonPunch:
case EQ::skills::SkillEagleStrike:
case EQ::skills::SkillTigerClaw:
item_slot = EQ::invslot::slotHands;
break;
}
if (item_slot >= EQ::invslot::EQUIPMENT_BEGIN){
const EQ::ItemInstance* inst = GetBotItem(item_slot);
const EQ::ItemData* botweapon = nullptr;
if(inst)
botweapon = inst->GetItem();
if(botweapon)
dmg += botweapon->AC * (RuleI(Combat, SpecialAttackACBonus))/100;
}
}
bool Bot::CanDoSpecialAttack(Mob *other) {
//Make sure everything is valid before doing any attacks.
if (!other) {
SetTarget(nullptr);
return false;
}
if(!GetTarget())
SetTarget(other);
if ((other == nullptr || ((GetAppearance() == eaDead) || (other->IsClient() && other->CastToClient()->IsDead())) || HasDied() || (!IsAttackAllowed(other))) || other->GetInvul() || other->GetSpecialAbility(IMMUNE_MELEE))
return false;
return true;
}
void Bot::SetTarget(Mob* mob) {
if(mob != this) {
if(mob != GetTarget())
_previousTarget = GetTarget();
NPC::SetTarget(mob);
}
}
void Bot::SetStopMeleeLevel(uint8 level) {
if (IsCasterClass(GetClass()) || IsHybridClass(GetClass()))
_stopMeleeLevel = level;
else
_stopMeleeLevel = 255;
}
void Bot::SetGuardMode() {
StopMoving();
m_GuardPoint = GetPosition();
SetGuardFlag();
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
GetPet()->StopMoving();
}
}
void Bot::SetHoldMode() {
SetHoldFlag();
}
// AI Processing for the Bot object
constexpr float MAX_CASTER_DISTANCE[PLAYER_CLASS_COUNT] = {
0, (34 * 34), (24 * 24), (28 * 28), (26 * 26), (42 * 42), 0, (30 * 30), 0, (38 * 38), (54 * 54), (48 * 48), (52 * 52), (50 * 50), (32 * 32), 0
// W C P R S D M B R S N W M E B B
// A L A N H R N R O H E I A N S E
// R R L G D U K D G M C Z G C T R
};
void Bot::AI_Process()
{
#define TEST_COMBATANTS() if (!GetTarget() || GetAppearance() == eaDead) { return; }
#define PULLING_BOT (GetPullingFlag() || GetReturningFlag())
#define NOT_PULLING_BOT (!GetPullingFlag() && !GetReturningFlag())
#define GUARDING (GetGuardFlag())
#define NOT_GUARDING (!GetGuardFlag())
#define HOLDING (GetHoldFlag())
#define NOT_HOLDING (!GetHoldFlag())
#define PASSIVE (GetBotStance() == EQ::constants::stancePassive)
#define NOT_PASSIVE (GetBotStance() != EQ::constants::stancePassive)
Client* bot_owner = (GetBotOwner() && GetBotOwner()->IsClient() ? GetBotOwner()->CastToClient() : nullptr);
Group* bot_group = GetGroup();
//#pragma region PRIMARY AI SKIP CHECKS
// Primary reasons for not processing AI
if (!bot_owner || !bot_group || !IsAIControlled()) {
return;
}
if (bot_owner->IsDead()) {
SetTarget(nullptr);
SetBotOwner(nullptr);
return;
}
// We also need a leash owner and follow mob (subset of primary AI criteria)
Client* leash_owner = (bot_group->GetLeader() && bot_group->GetLeader()->IsClient() ? bot_group->GetLeader()->CastToClient() : bot_owner);
if (!leash_owner) {
return;
}
//#pragma endregion
Mob* follow_mob = entity_list.GetMob(GetFollowID());
if (!follow_mob) {
follow_mob = leash_owner;
SetFollowID(leash_owner->GetID());
}
// Berserk updates should occur if primary AI criteria are met
if (GetClass() == WARRIOR || GetClass() == BERSERKER) {
if (!berserk && GetHP() > 0 && GetHPRatio() < 30.0f) {
entity_list.MessageCloseString(this, false, 200, 0, BERSERK_START, GetName());
berserk = true;
}
if (berserk && GetHPRatio() >= 30.0f) {
entity_list.MessageCloseString(this, false, 200, 0, BERSERK_END, GetName());
berserk = false;
}
}
//#pragma region SECONDARY AI SKIP CHECKS
// Secondary reasons for not processing AI
if (GetPauseAI() || IsStunned() || IsMezzed() || (GetAppearance() == eaDead)) {
if (IsCasting()) {
InterruptSpell();
}
if (IsMyHealRotationSet() || (AmICastingForHealRotation() && m_member_of_heal_rotation->CastingMember() == this)) {
AdvanceHealRotation(false);
m_member_of_heal_rotation->SetMemberIsCasting(this, false);
}
return;
}
//#pragma endregion
float fm_distance = DistanceSquared(m_Position, follow_mob->GetPosition());
float lo_distance = DistanceSquared(m_Position, leash_owner->GetPosition());
float leash_distance = RuleR(Bots, LeashDistance);
//#pragma region CURRENTLY CASTING CHECKS
if (IsCasting()) {
if (IsHealRotationMember() &&
m_member_of_heal_rotation->CastingOverride() &&
m_member_of_heal_rotation->CastingTarget() != nullptr &&
m_member_of_heal_rotation->CastingReady() &&
m_member_of_heal_rotation->CastingMember() == this &&
!m_member_of_heal_rotation->MemberIsCasting(this))
{
InterruptSpell();
}
else if (AmICastingForHealRotation() && m_member_of_heal_rotation->CastingMember() == this) {
AdvanceHealRotation(false);
return;
}
else if (GetClass() != BARD) {
if (IsEngaged()) {
return;
}
if (
(NOT_GUARDING && fm_distance > GetFollowDistance()) || // Cancel out-of-combat casting if movement to follow mob is required
(GUARDING && DistanceSquared(GetPosition(), GetGuardPoint()) > GetFollowDistance()) // Cancel out-of-combat casting if movement to guard point is required
) {
InterruptSpell();
}
return;
}
}
else if (IsHealRotationMember()) {
m_member_of_heal_rotation->SetMemberIsCasting(this, false);
}
//#pragma endregion
// Can't move if rooted...
if (IsRooted() && IsMoving()) {
StopMoving();
return;
}
//#pragma region HEAL ROTATION CASTING CHECKS
if (IsMyHealRotationSet()) {
if (AIHealRotation(HealRotationTarget(), UseHealRotationFastHeals())) {
m_member_of_heal_rotation->SetMemberIsCasting(this);
m_member_of_heal_rotation->UpdateTargetHealingStats(HealRotationTarget());
AdvanceHealRotation();
}
else {
m_member_of_heal_rotation->SetMemberIsCasting(this, false);
AdvanceHealRotation(false);
}
}
//#pragma endregion
bool bo_alt_combat = (RuleB(Bots, AllowOwnerOptionAltCombat) && bot_owner->GetBotOption(Client::booAltCombat));
//#pragma region ATTACK FLAG
if (GetAttackFlag()) { // Push owner's target onto our hate list
if (GetPet() && PULLING_BOT) {
GetPet()->SetPetOrder(m_previous_pet_order);
}
SetAttackFlag(false);
SetAttackingFlag(false);
SetPullFlag(false);
SetPullingFlag(false);
SetReturningFlag(false);
bot_owner->SetBotPulling(false);
if (NOT_HOLDING && NOT_PASSIVE) {
auto attack_target = bot_owner->GetTarget();
if (attack_target) {
InterruptSpell();
WipeHateList();
AddToHateList(attack_target, 1);
SetTarget(attack_target);
SetAttackingFlag();
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
GetPet()->WipeHateList();
GetPet()->AddToHateList(attack_target, 1);
GetPet()->SetTarget(attack_target);
}
}
}
}
//#pragma endregion
//#pragma region PULL FLAG
else if (GetPullFlag()) { // Push owner's target onto our hate list and set flags so other bots do not aggro
SetAttackFlag(false);
SetAttackingFlag(false);
SetPullFlag(false);
SetPullingFlag(false);
SetReturningFlag(false);
bot_owner->SetBotPulling(false);
if (NOT_HOLDING && NOT_PASSIVE) {
auto pull_target = bot_owner->GetTarget();
if (pull_target) {
Bot::BotGroupSay(this, "Pulling %s to the group..", pull_target->GetCleanName());
InterruptSpell();
WipeHateList();
AddToHateList(pull_target, 1);
SetTarget(pull_target);
SetPullingFlag();
bot_owner->SetBotPulling();
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 1)) {
GetPet()->WipeHateList();
GetPet()->SetTarget(nullptr);
m_previous_pet_order = GetPet()->GetPetOrder();
GetPet()->SetPetOrder(SPO_Guard);
}
}
}
}
//#pragma endregion
//#pragma region ALT COMBAT (ACQUIRE HATE)
else if (bo_alt_combat && m_alt_combat_hate_timer.Check(false)) { // 'Alt Combat' gives some more 'control' options on how bots process aggro
// Empty hate list - let's find some aggro
if (!IsEngaged() && NOT_HOLDING && NOT_PASSIVE && (!bot_owner->GetBotPulling() || NOT_PULLING_BOT)) {
Mob* lo_target = leash_owner->GetTarget();
if (lo_target &&
lo_target->IsNPC() &&
!lo_target->IsMezzed() &&
((bot_owner->GetBotOption(Client::booAutoDefend) && lo_target->GetHateAmount(leash_owner)) || leash_owner->AutoAttackEnabled()) &&
lo_distance <= leash_distance &&
DistanceSquared(m_Position, lo_target->GetPosition()) <= leash_distance &&
(CheckLosFN(lo_target) || leash_owner->CheckLosFN(lo_target)) &&
IsAttackAllowed(lo_target))
{
AddToHateList(lo_target, 1);
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
GetPet()->AddToHateList(lo_target, 1);
GetPet()->SetTarget(lo_target);
}
}
else {
for (int counter = 0; counter < bot_group->GroupCount(); counter++) {
Mob* bg_member = bot_group->members[counter];
if (!bg_member) {
continue;
}
Mob* bgm_target = bg_member->GetTarget();
if (!bgm_target || !bgm_target->IsNPC()) {
continue;
}
if (!bgm_target->IsMezzed() &&
((bot_owner->GetBotOption(Client::booAutoDefend) && bgm_target->GetHateAmount(bg_member)) || leash_owner->AutoAttackEnabled()) &&
lo_distance <= leash_distance &&
DistanceSquared(m_Position, bgm_target->GetPosition()) <= leash_distance &&
(CheckLosFN(bgm_target) || leash_owner->CheckLosFN(bgm_target)) &&
IsAttackAllowed(bgm_target))
{
AddToHateList(bgm_target, 1);
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
GetPet()->AddToHateList(bgm_target, 1);
GetPet()->SetTarget(bgm_target);
}
break;
}
}
}
}
}
//#pragma endregion
glm::vec3 Goal(0, 0, 0);
// We have aggro to choose from
if (IsEngaged()) {
if (rest_timer.Enabled()) {
rest_timer.Disable();
}
//#pragma region PULLING FLAG (TARGET VALIDATION)
if (GetPullingFlag()) {
if (!GetTarget()) {
WipeHateList();
SetTarget(nullptr);
SetPullingFlag(false);
SetReturningFlag(false);
bot_owner->SetBotPulling(false);
if (GetPet()) {
GetPet()->SetPetOrder(m_previous_pet_order);
}
return;
}
else if (GetTarget()->GetHateList().size()) {
WipeHateList();
SetTarget(nullptr);
SetPullingFlag(false);
SetReturningFlag();
return;
}
else {
// Default action is to aggress towards enemy
}
}
//#pragma endregion
//#pragma region RETURNING FLAG
else if (GetReturningFlag()) {
// Need to make it back to group before clearing return flag
if (fm_distance <= GetFollowDistance()) {
// Once we're back, clear blocking flags so everyone else can join in
SetReturningFlag(false);
bot_owner->SetBotPulling(false);
if (GetPet()) {
GetPet()->SetPetOrder(m_previous_pet_order);
}
}
// Need to keep puller out of combat until they reach their 'return to' destination
if (HasTargetReflection()) {
SetTarget(nullptr);
WipeHateList();
return;
}
}
//#pragma endregion
//#pragma region ALT COMBAT (ACQUIRE TARGET)
else if (bo_alt_combat && m_alt_combat_hate_timer.Check()) { // Find a mob from hate list to target
// Group roles can be expounded upon in the future
auto assist_mob = entity_list.GetMob(bot_group->GetMainAssistName());
bool find_target = true;
if (assist_mob) {
if (assist_mob->GetTarget()) {
if (assist_mob != this) {
SetTarget(assist_mob->GetTarget());
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
// This artificially inflates pet's target aggro..but, less expensive than checking hate each AI process
GetPet()->AddToHateList(assist_mob->GetTarget(), 1);
GetPet()->SetTarget(assist_mob->GetTarget());
}
}
find_target = false;
}
else if (assist_mob != this) {
SetTarget(nullptr);
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 1)) {
GetPet()->WipeHateList();
GetPet()->SetTarget(nullptr);
}
find_target = false;
}
}
if (find_target) {
if (IsRooted()) {
SetTarget(hate_list.GetClosestEntOnHateList(this, true));
}
else {
// This will keep bots on target for now..but, future updates will allow for rooting/stunning
SetTarget(hate_list.GetEscapingEntOnHateList(leash_owner, leash_distance));
if (!GetTarget()) {
SetTarget(hate_list.GetEntWithMostHateOnList(this, nullptr, true));
}
}
}
}
//#pragma endregion
//#pragma region DEFAULT (ACQUIRE TARGET)
else {
// Default behavior doesn't have a means of acquiring a target from the bot's hate list..
// ..that action occurs through commands or out-of-combat checks
// (Use current target, if already in combat)
}
//#pragma endregion
//#pragma region VERIFY TARGET AND STANCE
Mob* tar = GetTarget(); // We should have a target..if not, we're awaiting new orders
if (!tar || PASSIVE) {
SetTarget(nullptr);
WipeHateList();
SetAttackFlag(false);
SetAttackingFlag(false);
if (PULLING_BOT) {
// 'Flags' should only be set on the bot that is pulling
SetPullingFlag(false);
SetReturningFlag(false);
bot_owner->SetBotPulling(false);
if (GetPet()) {
GetPet()->SetPetOrder(m_previous_pet_order);
}
}
if (GetArchetype() == ARCHETYPE_CASTER) {
BotMeditate(true);
}
return;
}
//#pragma endregion
//#pragma region ATTACKING FLAG (HATE VALIDATION)
if (GetAttackingFlag() && tar->CheckAggro(this)) {
SetAttackingFlag(false);
}
//#pragma endregion
float tar_distance = DistanceSquared(m_Position, tar->GetPosition());
//#pragma region TARGET VALIDATION
// DOUBLE-CHECK THIS CRITERIA
// Verify that our target has attackable criteria
if (HOLDING ||
!tar->IsNPC() ||
tar->IsMezzed() ||
lo_distance > leash_distance ||
tar_distance > leash_distance ||
(!GetAttackingFlag() && !CheckLosFN(tar) && !leash_owner->CheckLosFN(tar)) || // This is suppose to keep bots from attacking things behind walls
!IsAttackAllowed(tar) ||
(bo_alt_combat &&
(!GetAttackingFlag() && NOT_PULLING_BOT && !leash_owner->AutoAttackEnabled() && !tar->GetHateAmount(this) && !tar->GetHateAmount(leash_owner))
)
)
{
// Normally, we wouldn't want to do this without class checks..but, too many issues can arise if we let enchanter animation pets run rampant
if (HasPet()) {
GetPet()->RemoveFromHateList(tar);
GetPet()->SetTarget(nullptr);
}
RemoveFromHateList(tar);
SetTarget(nullptr);
SetAttackFlag(false);
SetAttackingFlag(false);
if (PULLING_BOT) {
SetPullingFlag(false);
SetReturningFlag(false);
bot_owner->SetBotPulling(false);
if (GetPet()) {
GetPet()->SetPetOrder(m_previous_pet_order);
}
}
if (IsMoving()) {
StopMoving();
}
return;
}
//#pragma endregion
// This causes conflicts with default pet handler (bounces between targets)
if (NOT_PULLING_BOT && HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
// We don't add to hate list here because it's assumed to already be on the list
GetPet()->SetTarget(tar);
}
if (DivineAura()) {
return;
}
if (!(m_PlayerState & static_cast<uint32>(PlayerState::Aggressive))) {
SendAddPlayerState(PlayerState::Aggressive);
}
//#pragma region PULLING FLAG (ACTIONABLE RANGE)
if (GetPullingFlag()) {
constexpr size_t PULL_AGGRO = 5225; // spells[5225]: 'Throw Stone' - 0 cast time
if (tar_distance <= (spells[PULL_AGGRO].range * spells[PULL_AGGRO].range)) {
StopMoving();
CastSpell(PULL_AGGRO, tar->GetID());
return;
}
}
//#pragma endregion
//#pragma region COMBAT RANGE CALCS
bool atCombatRange = false;
const auto* p_item = GetBotItem(EQ::invslot::slotPrimary);
const auto* s_item = GetBotItem(EQ::invslot::slotSecondary);
bool behind_mob = false;
bool backstab_weapon = false;
if (GetClass() == ROGUE) {
behind_mob = BehindMob(tar, GetX(), GetY()); // Can be separated for other future use
backstab_weapon = p_item && p_item->GetItemBackstabDamage();
}
// Calculate melee distances
float melee_distance_max = 0.0f;
float melee_distance = 0.0f;
{
float size_mod = GetSize();
float other_size_mod = tar->GetSize();
if (GetRace() == RT_DRAGON || GetRace() == RT_WURM || GetRace() == RT_DRAGON_7) { // For races with a fixed size
size_mod = 60.0f;
}
else if (size_mod < 6.0f) {
size_mod = 8.0f;
}
if (tar->GetRace() == RT_DRAGON || tar->GetRace() == RT_WURM || tar->GetRace() == RT_DRAGON_7) { // For races with a fixed size
other_size_mod = 60.0f;
}
else if (other_size_mod < 6.0f) {
other_size_mod = 8.0f;
}
if (other_size_mod > size_mod) {
size_mod = other_size_mod;
}
if (size_mod > 29.0f) {
size_mod *= size_mod;
}
else if (size_mod > 19.0f) {
size_mod *= (size_mod * 2.0f);
}
else {
size_mod *= (size_mod * 4.0f);
}
// Prevention of ridiculously sized hit boxes
if (size_mod > 10000.0f) {
size_mod = (size_mod / 7.0f);
}
melee_distance_max = size_mod;
switch (GetClass()) {
case WARRIOR:
case PALADIN:
case SHADOWKNIGHT:
if (p_item && p_item->GetItem()->IsType2HWeapon()) {
melee_distance = melee_distance_max * 0.45f;
}
else if ((s_item && s_item->GetItem()->IsTypeShield()) || (!p_item && !s_item)) {
melee_distance = melee_distance_max * 0.35f;
}
else {
melee_distance = melee_distance_max * 0.40f;
}
break;
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
if (p_item && p_item->GetItem()->IsType2HWeapon()) {
melee_distance = melee_distance_max * 0.95f;
}
else {
melee_distance = melee_distance_max * 0.75f;
}
break;
case ROGUE:
if (behind_mob && backstab_weapon) {
if (p_item->GetItem()->IsType2HWeapon()) { // 'p_item' tested in 'backstab_weapon' check above
melee_distance = melee_distance_max * 0.30f;
}
else {
melee_distance = melee_distance_max * 0.25f;
}
break;
}
// Fall-through
default:
if (p_item && p_item->GetItem()->IsType2HWeapon()) {
melee_distance = melee_distance_max * 0.70f;
}
else {
melee_distance = melee_distance_max * 0.50f;
}
break;
}
}
float melee_distance_min = melee_distance / 2.0f;
// Calculate caster distances
float caster_distance_max = 0.0f;
float caster_distance_min = 0.0f;
float caster_distance = 0.0f;
{
if (GetLevel() >= GetStopMeleeLevel() && GetClass() >= WARRIOR && GetClass() <= BERSERKER) {
caster_distance_max = MAX_CASTER_DISTANCE[(GetClass() - 1)];
}
if (caster_distance_max) {
caster_distance_min = melee_distance_max;
if (caster_distance_max <= caster_distance_min) {
caster_distance_max = caster_distance_min * 1.25f;
}
caster_distance = ((caster_distance_max + caster_distance_min) / 2);
}
}
bool atArcheryRange = IsArcheryRange(tar);
if (GetRangerAutoWeaponSelect()) {
bool changeWeapons = false;
if (atArcheryRange && !IsBotArcher()) {
SetBotArcher(true);
changeWeapons = true;
}
else if (!atArcheryRange && IsBotArcher()) {
SetBotArcher(false);
changeWeapons = true;
}
if (changeWeapons) {
ChangeBotArcherWeapons(IsBotArcher());
}
}
if (IsBotArcher() && atArcheryRange) {
atCombatRange = true;
}
else if (caster_distance_max && tar_distance <= caster_distance_max) {
atCombatRange = true;
}
else if (tar_distance <= melee_distance) {
atCombatRange = true;
}
//#pragma endregion
//#pragma region ENGAGED AT COMBAT RANGE
// We can fight
if (atCombatRange) {
//if (IsMoving() || GetCombatJitterFlag()) { // StopMoving() needs to be called so that the jitter timer can be reset
if (IsMoving()) {
// Since we're using a pseudo-shadowstep for jitter, disregard the combat jitter flag
//if (!GetCombatJitterFlag()) {
StopMoving(CalculateHeadingToTarget(tar->GetX(), tar->GetY()));
//}
return;
}
// Combat 'jitter' code
// Note: Combat Jitter is disabled until a working movement solution can be found
if (AI_movement_timer->Check() && (!spellend_timer.Enabled() || GetClass() == BARD)) {
if (!IsRooted()) {
if (HasTargetReflection()) {
if (!tar->IsFeared() && !tar->IsStunned()) {
if (GetClass() == ROGUE) {
if (m_evade_timer.Check(false)) { // Attempt to evade
int timer_duration = (HideReuseTime - GetSkillReuseTime(EQ::skills::SkillHide)) * 1000;
if (timer_duration < 0) {
timer_duration = 0;
}
m_evade_timer.Start(timer_duration);
if (zone->random.Int(0, 260) < (int)GetSkill(EQ::skills::SkillHide)) {
RogueEvade(tar);
}
return;
}
}
//if (tar->IsRooted()) { // Move caster/rogue back from rooted mob - out of combat range, if necessary
// if (GetArchetype() == ARCHETYPE_CASTER || GetClass() == ROGUE) {
// if (tar_distance <= melee_distance_max) {
// if (PlotPositionAroundTarget(this, Goal.x, Goal.y, Goal.z)) {
// //if (PlotPositionBehindMeFacingTarget(tar, Goal.x, Goal.y, Goal.z)) {
// Teleport(Goal);
// //WalkTo(Goal.x, Goal.y, Goal.z);
// SetCombatJitterFlag();
// return;
// }
// }
// }
//}
}
}
//else {
// if (caster_distance_min && tar_distance < caster_distance_min && !tar->IsFeared()) { // Caster back-off adjustment
// if (PlotPositionAroundTarget(this, Goal.x, Goal.y, Goal.z)) {
// //if (PlotPositionBehindMeFacingTarget(tar, Goal.x, Goal.y, Goal.z)) {
// if (DistanceSquared(Goal, tar->GetPosition()) <= caster_distance_max) {
// Teleport(Goal);
// //WalkTo(Goal.x, Goal.y, Goal.z);
// SetCombatJitterFlag();
// return;
// }
// }
// }
// else if (tar_distance < melee_distance_min) { // Melee back-off adjustment
// if (PlotPositionAroundTarget(this, Goal.x, Goal.y, Goal.z)) {
// //if (PlotPositionBehindMeFacingTarget(tar, Goal.x, Goal.y, Goal.z)) {
// if (DistanceSquared(Goal, tar->GetPosition()) <= melee_distance_max) {
// Teleport(Goal);
// //WalkTo(Goal.x, Goal.y, Goal.z);
// SetCombatJitterFlag();
// return;
// }
// }
// }
// else if (backstab_weapon && !behind_mob) { // Move the rogue to behind the mob
// if (PlotPositionAroundTarget(tar, Goal.x, Goal.y, Goal.z)) {
// //if (PlotPositionOnArcBehindTarget(tar, Goal.x, Goal.y, Goal.z, melee_distance)) {
// float distance_squared = DistanceSquared(Goal, tar->GetPosition());
// if (/*distance_squared >= melee_distance_min && */distance_squared <= melee_distance_max) {
// Teleport(Goal);
// //RunTo(Goal.x, Goal.y, Goal.z);
// SetCombatJitterFlag();
// return;
// }
// }
// }
// else if (m_combat_jitter_timer.Check()) {
// if (!caster_distance && PlotPositionAroundTarget(tar, Goal.x, Goal.y, Goal.z)) {
// //if (!caster_distance && PlotPositionOnArcInFrontOfTarget(tar, Goal.x, Goal.y, Goal.z, melee_distance)) {
// float distance_squared = DistanceSquared(Goal, tar->GetPosition());
// if (/*distance_squared >= melee_distance_min && */distance_squared <= melee_distance_max) {
// Teleport(Goal);
// //WalkTo(Goal.x, Goal.y, Goal.z);
// SetCombatJitterFlag();
// return;
// }
// }
// else if (caster_distance && PlotPositionAroundTarget(tar, Goal.x, Goal.y, Goal.z)) {
// //else if (caster_distance && PlotPositionOnArcInFrontOfTarget(tar, Goal.x, Goal.y, Goal.z, caster_distance)) {
// float distance_squared = DistanceSquared(Goal, tar->GetPosition());
// if (/*distance_squared >= caster_distance_min && */distance_squared <= caster_distance_max) {
// Teleport(Goal);
// //WalkTo(Goal.x, Goal.y, Goal.z);
// SetCombatJitterFlag();
// return;
// }
// }
// }
// if (!IsFacingMob(tar)) {
// FaceTarget(tar);
// return;
// }
//}
}
else {
if (!IsSitting() && !IsFacingMob(tar)) {
FaceTarget(tar);
return;
}
}
}
if (!IsBotNonSpellFighter() && AI_EngagedCastCheck()) {
return;
}
// Up to this point, GetTarget() has been safe to dereference since the initial
// TEST_COMBATANTS() call. Due to the chance of the target dying and our pointer
// being nullified, we need to test it before dereferencing to avoid crashes
if (IsBotArcher() && ranged_timer.Check(false)) { // Can shoot mezzed, stunned and dead!?
TEST_COMBATANTS();
if (GetTarget()->GetHPRatio() <= 99.0f) {
BotRangedAttack(tar);
}
}
else if (!IsBotArcher() && GetLevel() < GetStopMeleeLevel()) {
// We can't fight if we don't have a target, are stun/mezzed or dead..
// Stop attacking if the target is enraged
TEST_COMBATANTS();
if (tar->IsEnraged() && !BehindMob(tar, GetX(), GetY())) {
return;
}
// First, special attack per class (kick, backstab etc..)
TEST_COMBATANTS();
DoClassAttacks(tar);
TEST_COMBATANTS();
if (attack_timer.Check()) { // Process primary weapon attacks
Attack(tar, EQ::invslot::slotPrimary);
TEST_COMBATANTS();
TriggerDefensiveProcs(tar, EQ::invslot::slotPrimary, false);
TEST_COMBATANTS();
TryWeaponProc(p_item, tar, EQ::invslot::slotPrimary);
// bool tripleSuccess = false;
TEST_COMBATANTS();
if (CanThisClassDoubleAttack()) {
if (CheckBotDoubleAttack()) {
Attack(tar, EQ::invslot::slotPrimary, true);
}
TEST_COMBATANTS();
if (GetSpecialAbility(SPECATK_TRIPLE) && CheckBotDoubleAttack(true)) {
// tripleSuccess = true;
Attack(tar, EQ::invslot::slotPrimary, true);
}
TEST_COMBATANTS();
// quad attack, does this belong here??
if (GetSpecialAbility(SPECATK_QUAD) && CheckBotDoubleAttack(true)) {
Attack(tar, EQ::invslot::slotPrimary, true);
}
}
TEST_COMBATANTS();
// Live AA - Flurry, Rapid Strikes ect (Flurry does not require Triple Attack).
int32 flurrychance = (aabonuses.FlurryChance + spellbonuses.FlurryChance + itembonuses.FlurryChance);
if (flurrychance) {
if (zone->random.Int(0, 100) < flurrychance) {
MessageString(Chat::NPCFlurry, YOU_FLURRY);
Attack(tar, EQ::invslot::slotPrimary, false);
TEST_COMBATANTS();
Attack(tar, EQ::invslot::slotPrimary, false);
}
}
TEST_COMBATANTS();
auto ExtraAttackChanceBonus =
(spellbonuses.ExtraAttackChance[0] + itembonuses.ExtraAttackChance[0] +
aabonuses.ExtraAttackChance[0]);
if (ExtraAttackChanceBonus) {
if (p_item && p_item->GetItem()->IsType2HWeapon()) {
if (zone->random.Int(0, 100) < ExtraAttackChanceBonus) {
Attack(tar, EQ::invslot::slotPrimary, false);
}
}
}
}
TEST_COMBATANTS();
if (attack_dw_timer.Check() && CanThisClassDualWield()) { // Process secondary weapon attacks
const EQ::ItemData* s_itemdata = nullptr;
// Can only dual wield without a weapon if you're a monk
if (s_item || (GetClass() == MONK)) {
if (s_item) {
s_itemdata = s_item->GetItem();
}
int weapon_type = 0; // No weapon type.
bool use_fist = true;
if (s_itemdata) {
weapon_type = s_itemdata->ItemType;
use_fist = false;
}
if (use_fist || !s_itemdata->IsType2HWeapon()) {
float DualWieldProbability = 0.0f;
int32 Ambidexterity = (aabonuses.Ambidexterity + spellbonuses.Ambidexterity + itembonuses.Ambidexterity);
DualWieldProbability = ((GetSkill(EQ::skills::SkillDualWield) + GetLevel() + Ambidexterity) / 400.0f); // 78.0 max
int32 DWBonus = (spellbonuses.DualWieldChance + itembonuses.DualWieldChance);
DualWieldProbability += (DualWieldProbability * float(DWBonus) / 100.0f);
float random = zone->random.Real(0, 1);
if (random < DualWieldProbability) { // Max 78% of DW
Attack(tar, EQ::invslot::slotSecondary); // Single attack with offhand
TEST_COMBATANTS();
TryWeaponProc(s_item, tar, EQ::invslot::slotSecondary);
TEST_COMBATANTS();
if (CanThisClassDoubleAttack() && CheckBotDoubleAttack()) {
if (tar->GetHP() > -10) {
Attack(tar, EQ::invslot::slotSecondary); // Single attack with offhand
}
}
}
}
}
}
}
if (GetAppearance() == eaDead) {
return;
}
}
//#pragma endregion
//#pragma region ENGAGED NOT AT COMBAT RANGE
else { // To far away to fight (GetTarget() validity can be iffy below this point - including outer scopes)
// This code actually gets processed when we are too far away from target and have not engaged yet, too
if (/*!GetCombatJitterFlag() && */AI_movement_timer->Check() && (!spellend_timer.Enabled() || GetClass() == BARD)) { // Pursue processing
if (GetTarget() && !IsRooted()) {
LogAI("Pursuing [{}] while engaged", GetTarget()->GetCleanName());
Goal = GetTarget()->GetPosition();
if (DistanceSquared(m_Position, Goal) <= leash_distance) {
RunTo(Goal.x, Goal.y, Goal.z);
}
else {
WipeHateList();
SetTarget(nullptr);
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
GetPet()->WipeHateList();
GetPet()->SetTarget(nullptr);
}
}
return;
}
else {
if (IsMoving()) {
StopMoving();
}
return;
}
}
if (GetTarget() && GetTarget()->IsFeared() && !spellend_timer.Enabled() && AI_think_timer->Check()) {
if (!IsFacingMob(GetTarget())) {
FaceTarget(GetTarget());
}
// This is a mob that is fleeing either because it has been feared or is low on hitpoints
AI_PursueCastCheck(); // This appears to always return true..can't trust for success/fail
return;
}
} // End not in combat range
//#pragma endregion
if (!IsMoving() && !spellend_timer.Enabled()) { // This may actually need work...
if (GetTarget() && AI_EngagedCastCheck()) {
BotMeditate(false);
}
else if (GetArchetype() == ARCHETYPE_CASTER) {
BotMeditate(true);
}
return;
}
}
else { // Out-of-combat behavior
SetAttackFlag(false);
SetAttackingFlag(false);
if (!bot_owner->GetBotPulling()) {
SetPullingFlag(false);
SetReturningFlag(false);
}
//#pragma region AUTO DEFEND
// This is as close as I could get without modifying the aggro mechanics and making it an expensive process...
// 'class Client' doesn't make use of hate_list...
if (RuleB(Bots, AllowOwnerOptionAutoDefend) && bot_owner->GetBotOption(Client::booAutoDefend)) {
if (!m_auto_defend_timer.Enabled()) {
m_auto_defend_timer.Start(zone->random.Int(250, 1250)); // random timer to simulate 'awareness' (cuts down on scanning overhead)
return;
}
if (m_auto_defend_timer.Check() && bot_owner->GetAggroCount()) {
if (NOT_HOLDING && NOT_PASSIVE) {
auto xhaters = bot_owner->GetXTargetAutoMgr();
if (xhaters && !xhaters->empty()) {
for (auto hater_iter : xhaters->get_list()) {
if (!hater_iter.spawn_id) {
continue;
}
if (bot_owner->GetBotPulling() && bot_owner->GetTarget() && hater_iter.spawn_id == bot_owner->GetTarget()->GetID()) {
continue;
}
auto hater = entity_list.GetMob(hater_iter.spawn_id);
if (hater && !hater->IsMezzed() && DistanceSquared(hater->GetPosition(), bot_owner->GetPosition()) <= leash_distance) {
// This is roughly equivilent to npc attacking a client pet owner
AddToHateList(hater, 1);
SetTarget(hater);
SetAttackingFlag();
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 2)) {
GetPet()->AddToHateList(hater, 1);
GetPet()->SetTarget(hater);
}
m_auto_defend_timer.Disable();
return;
}
}
}
}
}
}
//#pragma endregion
SetTarget(nullptr);
if (HasPet() && (GetClass() != ENCHANTER || GetPet()->GetPetType() != petAnimation || GetAA(aaAnimationEmpathy) >= 1)) {
GetPet()->WipeHateList();
GetPet()->SetTarget(nullptr);
}
if (m_PlayerState & static_cast<uint32>(PlayerState::Aggressive)) {
SendRemovePlayerState(PlayerState::Aggressive);
}
//#pragma region OK TO IDLE
// Ok to idle
if ((NOT_GUARDING && fm_distance <= GetFollowDistance()) || (GUARDING && DistanceSquared(GetPosition(), GetGuardPoint()) <= GetFollowDistance())) {
if (!IsMoving() && AI_think_timer->Check() && !spellend_timer.Enabled()) {
if (NOT_PASSIVE) {
if (!AI_IdleCastCheck() && !IsCasting() && GetClass() != BARD) {
BotMeditate(true);
}
}
else {
if (GetClass() != BARD) {
BotMeditate(true);
}
}
return;
}
}
// Non-engaged movement checks
if (AI_movement_timer->Check() && (!IsCasting() || GetClass() == BARD)) {
if (GUARDING) {
Goal = GetGuardPoint();
}
else {
Goal = follow_mob->GetPosition();
}
float destination_distance = DistanceSquared(GetPosition(), Goal);
if ((!bot_owner->GetBotPulling() || PULLING_BOT) && (destination_distance > GetFollowDistance())) {
if (!IsRooted()) {
if (rest_timer.Enabled()) {
rest_timer.Disable();
}
bool running = true;
if (destination_distance < GetFollowDistance() + BOT_FOLLOW_DISTANCE_WALK) {
running = false;
}
if (running) {
RunTo(Goal.x, Goal.y, Goal.z);
}
else {
WalkTo(Goal.x, Goal.y, Goal.z);
}
return;
}
}
else {
if (IsMoving()) {
StopMoving();
return;
}
}
}
// Basically, bard bots get a chance to cast idle spells while moving
if (GetClass() == BARD && IsMoving() && NOT_PASSIVE) {
if (!spellend_timer.Enabled() && AI_think_timer->Check()) {
AI_IdleCastCheck();
return;
}
}
//#pragma endregion
}
#undef TEST_COMBATANTS
#undef PULLING_BOT
#undef NOT_PULLING_BOT
#undef GUARDING
#undef NOT_GUARDING
#undef HOLDING
#undef NOT_HOLDING
#undef PASSIVE
#undef NOT_PASSIVE
}
// AI Processing for a Bot object's pet
void Bot::PetAIProcess() {
if( !HasPet() || !GetPet() || !GetPet()->IsNPC())
return;
Mob* BotOwner = this->GetBotOwner();
NPC* botPet = this->GetPet()->CastToNPC();
if(!botPet->GetOwner() || !botPet->GetID() || !botPet->GetOwnerID()) {
Kill();
return;
}
if (!botPet->IsAIControlled() || botPet->GetAttackTimer().Check(false) || botPet->IsCasting() || !botPet->GetOwner()->IsBot())
return;
if (IsEngaged()) {
if (botPet->IsRooted())
botPet->SetTarget(hate_list.GetClosestEntOnHateList(botPet));
else
botPet->SetTarget(hate_list.GetEntWithMostHateOnList(botPet));
// Let's check if we have a los with our target.
// If we don't, our hate_list is wiped.
// It causes some cpu stress but without it, it was causing the bot/pet to aggro behind wall, floor etc...
if(!botPet->CheckLosFN(botPet->GetTarget()) || botPet->GetTarget()->IsMezzed() || !botPet->IsAttackAllowed(GetTarget())) {
botPet->WipeHateList();
botPet->SetTarget(botPet->GetOwner());
return;
}
botPet->FaceTarget(botPet->GetTarget());
bool is_combat_range = botPet->CombatRange(botPet->GetTarget());
// Ok, we're engaged, each class type has a special AI
// Only melee class will go to melee. Casters and healers will stay behind, following the leader by default.
// I should probably make the casters staying in place so they can cast..
// Ok, we 're a melee or any other class lvl<12. Yes, because after it becomes hard to go in melee for casters.. even for bots..
if(is_combat_range) {
botPet->GetAIMovementTimer()->Check();
if(botPet->IsMoving()) {
botPet->SetHeading(botPet->GetTarget()->GetHeading());
if(moved) {
moved = false;
botPet->SetRunAnimSpeed(0);
}
}
if(!botPet->IsMoving()) {
float newX = 0;
float newY = 0;
float newZ = 0;
bool petHasAggro = false;
if(botPet->GetTarget() && botPet->GetTarget()->GetHateTop() && botPet->GetTarget()->GetHateTop() == botPet)
petHasAggro = true;
if(botPet->GetClass() == ROGUE && !petHasAggro && !botPet->BehindMob(botPet->GetTarget(), botPet->GetX(), botPet->GetY())) {
// Move the rogue to behind the mob
if(botPet->PlotPositionAroundTarget(botPet->GetTarget(), newX, newY, newZ)) {
botPet->RunTo(newX, newY, newZ);
return;
}
}
else if(GetTarget() == botPet->GetTarget() && !petHasAggro && !botPet->BehindMob(botPet->GetTarget(), botPet->GetX(), botPet->GetY())) {
// If the bot owner and the bot are fighting the same mob, then move the pet to the rear arc of the mob
if(botPet->PlotPositionAroundTarget(botPet->GetTarget(), newX, newY, newZ)) {
botPet->RunTo(newX, newY, newZ);
return;
}
}
else if(DistanceSquaredNoZ(botPet->GetPosition(), botPet->GetTarget()->GetPosition()) < botPet->GetTarget()->GetSize()) {
// Let's try to adjust our melee range so we don't appear to be bunched up
bool isBehindMob = false;
bool moveBehindMob = false;
if(botPet->BehindMob(botPet->GetTarget(), botPet->GetX(), botPet->GetY()))
isBehindMob = true;
if (!isBehindMob && !petHasAggro)
moveBehindMob = true;
if(botPet->PlotPositionAroundTarget(botPet->GetTarget(), newX, newY, newZ, moveBehindMob)) {
botPet->RunTo(newX, newY, newZ);
return;
}
}
}
// we can't fight if we don't have a target, are stun/mezzed or dead..
if(botPet->GetTarget() && !botPet->IsStunned() && !botPet->IsMezzed() && (botPet->GetAppearance() != eaDead)) {
// check the delay on the attack
if(botPet->GetAttackTimer().Check()) {
// Stop attacking while we are on a front arc and the target is enraged
if(!botPet->BehindMob(botPet->GetTarget(), botPet->GetX(), botPet->GetY()) && botPet->GetTarget()->IsEnraged())
return;
if (botPet->Attack(GetTarget(), EQ::invslot::slotPrimary)) // try the main hand
if (botPet->GetTarget()) {
// We're a pet so we re able to dual attack
int32 RandRoll = zone->random.Int(0, 99);
if (botPet->CanThisClassDoubleAttack() && (RandRoll < (botPet->GetLevel() + NPCDualAttackModifier))) {
if (botPet->Attack(botPet->GetTarget(), EQ::invslot::slotPrimary)) {}
}
}
if (botPet->GetOwner()->IsBot()) {
int aa_chance = 0;
int aa_skill = 0;
// Magician AA
aa_skill += botPet->GetOwner()->GetAA(aaElementalAlacrity);
// Necromancer AA
aa_skill += botPet->GetOwner()->GetAA(aaQuickeningofDeath);
// Beastlord AA
aa_skill += botPet->GetOwner()->GetAA(aaWardersAlacrity);
if(aa_skill >= 1)
aa_chance += ((aa_skill > 5 ? 5 : aa_skill) * 4);
if(aa_skill >= 6)
aa_chance += ((aa_skill - 5 > 3 ? 3 : aa_skill - 5) * 7);
if(aa_skill >= 9)
aa_chance += ((aa_skill - 8 > 3 ? 3 : aa_skill - 8) * 3);
if(aa_skill >= 12)
aa_chance += ((aa_skill - 11) * 1);
//aa_chance += botPet->GetOwner()->GetAA(aaCompanionsAlacrity) * 3;
if (zone->random.Int(1, 100) < aa_chance)
Flurry(nullptr);
}
// Ok now, let's check pet's offhand.
if (botPet->GetAttackDWTimer().Check() && botPet->GetOwnerID() && botPet->GetOwner() && ((botPet->GetOwner()->GetClass() == MAGICIAN) || (botPet->GetOwner()->GetClass() == NECROMANCER) || (botPet->GetOwner()->GetClass() == SHADOWKNIGHT) || (botPet->GetOwner()->GetClass() == BEASTLORD))) {
if(botPet->GetOwner()->GetLevel() >= 24) {
float DualWieldProbability = ((botPet->GetSkill(EQ::skills::SkillDualWield) + botPet->GetLevel()) / 400.0f);
DualWieldProbability -= zone->random.Real(0, 1);
if(DualWieldProbability < 0) {
botPet->Attack(botPet->GetTarget(), EQ::invslot::slotSecondary);
if (botPet->CanThisClassDoubleAttack()) {
int32 RandRoll = zone->random.Int(0, 99);
if (RandRoll < (botPet->GetLevel() + 20))
botPet->Attack(botPet->GetTarget(), EQ::invslot::slotSecondary);
}
}
}
}
if(!botPet->GetOwner())
return;
// Special attack
botPet->DoClassAttacks(botPet->GetTarget());
}
// See if the pet can cast any spell
botPet->AI_EngagedCastCheck();
}
} else {
// Now, if we cannot reach our target
if (!botPet->HateSummon()) {
if(botPet->GetTarget() && botPet->AI_PursueCastCheck()) {}
else if (botPet->GetTarget() && botPet->GetAIMovementTimer()->Check()) {
botPet->SetRunAnimSpeed(0);
if(!botPet->IsRooted()) {
LogAI("Pursuing [{}] while engaged", botPet->GetTarget()->GetCleanName());
botPet->RunTo(botPet->GetTarget()->GetX(), botPet->GetTarget()->GetY(), botPet->GetTarget()->GetZ());
return;
} else {
botPet->SetHeading(botPet->GetTarget()->GetHeading());
if(moved) {
moved = false;
StopNavigation();
botPet->StopNavigation();
}
}
}
}
}
} else {
// Ok if we're not engaged, what's happening..
if(botPet->GetTarget() != botPet->GetOwner())
botPet->SetTarget(botPet->GetOwner());
if(!IsMoving())
botPet->AI_IdleCastCheck();
if(botPet->GetAIMovementTimer()->Check()) {
switch(pStandingPetOrder) {
case SPO_Follow: {
float dist = DistanceSquared(botPet->GetPosition(), botPet->GetTarget()->GetPosition());
botPet->SetRunAnimSpeed(0);
if(dist > 184) {
botPet->RunTo(botPet->GetTarget()->GetX(), botPet->GetTarget()->GetY(), botPet->GetTarget()->GetZ());
return;
} else {
botPet->SetHeading(botPet->GetTarget()->GetHeading());
if(moved) {
moved = false;
StopNavigation();
botPet->StopNavigation();
}
}
break;
}
case SPO_Sit:
botPet->SetAppearance(eaSitting);
break;
case SPO_Guard:
botPet->NextGuardPosition();
break;
}
}
}
}
void Bot::Depop() {
WipeHateList();
entity_list.RemoveFromHateLists(this);
if(HasGroup())
Bot::RemoveBotFromGroup(this, GetGroup());
if(HasPet())
GetPet()->Depop();
_botOwner = 0;
_botOwnerCharacterID = 0;
_previousTarget = 0;
NPC::Depop(false);
}
bool Bot::Spawn(Client* botCharacterOwner) {
if(GetBotID() > 0 && _botOwnerCharacterID > 0 && botCharacterOwner && botCharacterOwner->CharacterID() == _botOwnerCharacterID) {
// Rename the bot name to make sure that Mob::GetName() matches Mob::GetCleanName() so we dont have a bot named "Jesuschrist001"
strcpy(name, GetCleanName());
// Get the zone id this bot spawned in
_lastZoneId = GetZoneID();
// this change propagates to Bot::FillSpawnStruct()
this->helmtexture = 0; //0xFF;
this->texture = 0; //0xFF;
if(this->Save())
this->GetBotOwner()->CastToClient()->Message(Chat::White, "%s saved.", this->GetCleanName());
else
this->GetBotOwner()->CastToClient()->Message(Chat::Red, "%s save failed!", this->GetCleanName());
// Spawn the bot at the bot owner's loc
this->m_Position.x = botCharacterOwner->GetX();
this->m_Position.y = botCharacterOwner->GetY();
this->m_Position.z = botCharacterOwner->GetZ();
// Make the bot look at the bot owner
FaceTarget(botCharacterOwner);
UpdateEquipmentLight();
UpdateActiveLight();
this->m_targetable = true;
entity_list.AddBot(this, true, true);
// Load pet
LoadPet();
SentPositionPacket(0.0f, 0.0f, 0.0f, 0.0f, 0);
ping_timer.Start(8000);
// there is something askew with spawn struct appearance fields...
// I re-enabled this until I can sort it out
uint32 itemID = 0;
uint8 materialFromSlot = 0xFF;
for (int i = EQ::invslot::EQUIPMENT_BEGIN; i <= EQ::invslot::EQUIPMENT_END; ++i) {
itemID = GetBotItemBySlot(i);
if(itemID != 0) {
materialFromSlot = EQ::InventoryProfile::CalcMaterialFromSlot(i);
if(materialFromSlot != 0xFF)
this->SendWearChange(materialFromSlot);
}
}
return true;
}
return false;
}
// Deletes the inventory record for the specified item from the database for this bot.
void Bot::RemoveBotItemBySlot(uint32 slotID, std::string *errorMessage)
{
if(!GetBotID())
return;
if(!database.botdb.DeleteItemBySlot(GetBotID(), slotID))
*errorMessage = BotDatabase::fail::DeleteItemBySlot();
m_inv.DeleteItem(slotID);
UpdateEquipmentLight();
}
// Retrieves all the inventory records from the database for this bot.
void Bot::GetBotItems(EQ::InventoryProfile &inv, std::string* errorMessage)
{
if(!GetBotID())
return;
if (!database.botdb.LoadItems(GetBotID(), inv)) {
*errorMessage = BotDatabase::fail::LoadItems();
return;
}
UpdateEquipmentLight();
}
// Returns the inventory record for this bot from the database for the specified equipment slot.
uint32 Bot::GetBotItemBySlot(uint32 slotID)
{
uint32 item_id = 0;
if(!GetBotID())
return item_id;
if (!database.botdb.LoadItemBySlot(GetBotID(), slotID, item_id)) {
if (GetBotOwner() && GetBotOwner()->IsClient())
GetBotOwner()->CastToClient()->Message(Chat::Red, "%s", BotDatabase::fail::LoadItemBySlot());
}
return item_id;
}
void Bot::SetLevel(uint8 in_level, bool command) {
if(in_level > 0)
Mob::SetLevel(in_level, command);
}
void Bot::FillSpawnStruct(NewSpawn_Struct* ns, Mob* ForWho) {
if(ns) {
Mob::FillSpawnStruct(ns, ForWho);
ns->spawn.afk = 0;
ns->spawn.lfg = 0;
ns->spawn.anon = 0;
ns->spawn.gm = 0;
if(IsInAGuild())
ns->spawn.guildID = GuildID();
else
ns->spawn.guildID = 0xFFFFFFFF; // 0xFFFFFFFF = NO GUILD, 0 = Unknown Guild
ns->spawn.is_npc = 0; // 0=no, 1=yes
ns->spawn.is_pet = 0;
ns->spawn.guildrank = 0;
ns->spawn.showhelm = GetShowHelm() ? 1 : 0;
ns->spawn.flymode = 0;
ns->spawn.size = 0;
ns->spawn.NPC = 0; // 0=player,1=npc,2=pc corpse,3=npc corpse
UpdateActiveLight();
ns->spawn.light = m_Light.Type[EQ::lightsource::LightActive];
ns->spawn.helm = helmtexture; //(GetShowHelm() ? helmtexture : 0); //0xFF;
ns->spawn.equip_chest2 = texture; //0xFF;
ns->spawn.show_name = true;
strcpy(ns->spawn.lastName, GetSurname().c_str());
strcpy(ns->spawn.title, GetTitle().c_str());
strcpy(ns->spawn.suffix, GetSuffix().c_str());
const EQ::ItemData* item = nullptr;
const EQ::ItemInstance* inst = nullptr;
uint32 spawnedbotid = 0;
spawnedbotid = this->GetBotID();
for (int i = EQ::textures::textureBegin; i < EQ::textures::weaponPrimary; i++) {
inst = GetBotItem(i);
if (inst) {
item = inst->GetItem();
if (item != 0) {
ns->spawn.equipment.Slot[i].Material = item->Material;
ns->spawn.equipment.Slot[i].EliteModel = item->EliteMaterial;
ns->spawn.equipment.Slot[i].HerosForgeModel = item->HerosForgeModel;
if (armor_tint.Slot[i].Color)
ns->spawn.equipment_tint.Slot[i].Color = armor_tint.Slot[i].Color;
else
ns->spawn.equipment_tint.Slot[i].Color = item->Color;
} else {
if (armor_tint.Slot[i].Color)
ns->spawn.equipment_tint.Slot[i].Color = armor_tint.Slot[i].Color;
}
}
}
inst = GetBotItem(EQ::invslot::slotPrimary);
if(inst) {
item = inst->GetItem();
if(item) {
if(strlen(item->IDFile) > 2)
ns->spawn.equipment.Primary.Material = atoi(&item->IDFile[2]);
ns->spawn.equipment_tint.Primary.Color = GetEquipmentColor(EQ::textures::weaponPrimary);
}
}
inst = GetBotItem(EQ::invslot::slotSecondary);
if(inst) {
item = inst->GetItem();
if(item) {
if(strlen(item->IDFile) > 2)
ns->spawn.equipment.Secondary.Material = atoi(&item->IDFile[2]);
ns->spawn.equipment_tint.Secondary.Color = GetEquipmentColor(EQ::textures::weaponSecondary);
}
}
}
}
Bot* Bot::LoadBot(uint32 botID)
{
Bot* loaded_bot = nullptr;
if (!botID)
return loaded_bot;
if (!database.botdb.LoadBot(botID, loaded_bot)) // TODO: Consider update to message handler
return loaded_bot;
return loaded_bot;
}
// Load and spawn all zoned bots by bot owner character
void Bot::LoadAndSpawnAllZonedBots(Client* botOwner) {
if(botOwner) {
if(botOwner->HasGroup()) {
Group* g = botOwner->GetGroup();
if(g) {
uint32 TempGroupId = g->GetID();
std::list<uint32> ActiveBots;
// Modified LoadGroupedBotsByGroupID to require a CharacterID
if (!database.botdb.LoadGroupedBotsByGroupID(botOwner->CharacterID(), TempGroupId, ActiveBots)) {
botOwner->Message(Chat::Red, "%s", BotDatabase::fail::LoadGroupedBotsByGroupID());
return;
}
if(!ActiveBots.empty()) {
for(std::list<uint32>::iterator itr = ActiveBots.begin(); itr != ActiveBots.end(); ++itr) {
Bot* activeBot = Bot::LoadBot(*itr);
if (!activeBot)
continue;
if (!activeBot->Spawn(botOwner)) {
safe_delete(activeBot);
continue;
}
g->UpdatePlayer(activeBot);
// follow the bot owner, not the group leader we just zoned with our owner.
if (g->IsGroupMember(botOwner) && g->IsGroupMember(activeBot))
activeBot->SetFollowID(botOwner->GetID());
if(!botOwner->HasGroup())
database.SetGroupID(activeBot->GetCleanName(), 0, activeBot->GetBotID());
}
}
}
}
}
}
// Returns TRUE if there is atleast 1 bot in the specified group
bool Bot::GroupHasBot(Group* group) {
bool Result = false;
if(group) {
for(int Counter = 0; Counter < MAX_GROUP_MEMBERS; Counter++) {
if (group->members[Counter] == nullptr)
continue;
if(group->members[Counter]->IsBot()) {
Result = true;
break;
}
}
}
return Result;
}
uint32 Bot::SpawnedBotCount(uint32 botOwnerCharacterID) {
uint32 Result = 0;
if(botOwnerCharacterID > 0) {
std::list<Bot*> SpawnedBots = entity_list.GetBotsByBotOwnerCharacterID(botOwnerCharacterID);
Result = SpawnedBots.size();
}
return Result;
}
void Bot::LevelBotWithClient(Client* client, uint8 level, bool sendlvlapp) {
// This essentially performs a '#bot update,' with appearance packets, based on the current methods.
// This should not be called outside of Client::SetEXP() due to it's lack of rule checks.
if(client) {
std::list<Bot*> blist = entity_list.GetBotsByBotOwnerCharacterID(client->CharacterID());
for(std::list<Bot*>::iterator biter = blist.begin(); biter != blist.end(); ++biter) {
Bot* bot = *biter;
if(bot && (bot->GetLevel() != client->GetLevel())) {
bot->SetPetChooser(false); // not sure what this does, but was in bot 'update' code
bot->CalcBotStats(client->GetBotOption(Client::booStatsUpdate));
if(sendlvlapp)
bot->SendLevelAppearance();
// modified from Client::SetLevel()
bot->SendAppearancePacket(AT_WhoLevel, level, true, true); // who level change
}
}
blist.clear();
}
}
void Bot::SendBotArcheryWearChange(uint8 material_slot, uint32 material, uint32 color) {
EQApplicationPacket* outapp = new EQApplicationPacket(OP_WearChange, sizeof(WearChange_Struct));
WearChange_Struct* wc = (WearChange_Struct*)outapp->pBuffer;
wc->spawn_id = GetID();
wc->material = material;
wc->color.Color = color;
wc->wear_slot_id = material_slot;
entity_list.QueueClients(this, outapp);
safe_delete(outapp);
}
// Returns the item id that is in the bot inventory collection for the specified slot.
EQ::ItemInstance* Bot::GetBotItem(uint32 slotID) {
EQ::ItemInstance* item = m_inv.GetItem(slotID);
if(item)
return item;
return nullptr;
}
// Adds the specified item it bot to the NPC equipment array and to the bot inventory collection.
void Bot::BotAddEquipItem(int slot, uint32 id) {
// this is being called before bot is assigned an entity id..
// ..causing packets to be sent out to zone with an id of '0'
if(slot > 0 && id > 0) {
uint8 materialFromSlot = EQ::InventoryProfile::CalcMaterialFromSlot(slot);
if (materialFromSlot != EQ::textures::materialInvalid) {
equipment[slot] = id; // npc has more than just material slots. Valid material should mean valid inventory index
if (GetID()) // temp hack fix
SendWearChange(materialFromSlot);
}
UpdateEquipmentLight();
if (UpdateActiveLight())
if (GetID()) // temp hack fix
SendAppearancePacket(AT_Light, GetActiveLightType());
}
}
// Erases the specified item from bot the NPC equipment array and from the bot inventory collection.
void Bot::BotRemoveEquipItem(int16 slot)
{
uint8 material_slot = EQ::InventoryProfile::CalcMaterialFromSlot(slot);
if (material_slot != EQ::textures::materialInvalid) {
equipment[slot] = 0; // npc has more than just material slots. Valid material should mean valid inventory index
SendWearChange(material_slot);
if (material_slot == EQ::textures::armorChest)
SendWearChange(EQ::textures::armorArms);
}
UpdateEquipmentLight();
if (UpdateActiveLight())
SendAppearancePacket(AT_Light, GetActiveLightType());
}
void Bot::BotTradeSwapItem(Client* client, int16 lootSlot, const EQ::ItemInstance* inst, const EQ::ItemInstance* inst_swap, uint32 equipableSlots, std::string* errorMessage, bool swap) {
if(!errorMessage->empty())
return;
client->PushItemOnCursor(*inst_swap, true);
// Remove the item from the bot and from the bot's database records
RemoveBotItemBySlot(lootSlot, errorMessage);
if(!errorMessage->empty())
return;
this->BotRemoveEquipItem(lootSlot);
if(swap) {
BotTradeAddItem(inst->GetItem()->ID, inst, inst->GetCharges(), equipableSlots, lootSlot, errorMessage);
if(!errorMessage->empty())
return;
}
}
void Bot::BotTradeAddItem(uint32 id, const EQ::ItemInstance* inst, int16 charges, uint32 equipableSlots, uint16 lootSlot, std::string* errorMessage, bool addToDb)
{
if(addToDb) {
if (!database.botdb.SaveItemBySlot(this, lootSlot, inst)) {
*errorMessage = BotDatabase::fail::SaveItemBySlot();
return;
}
m_inv.PutItem(lootSlot, *inst);
}
this->BotAddEquipItem(lootSlot, id);
}
bool Bot::RemoveBotFromGroup(Bot* bot, Group* group) {
bool Result = false;
if(bot && group) {
if(bot->HasGroup()) {
if(!group->IsLeader(bot)) {
bot->SetFollowID(0);
if(group->DelMember(bot))
database.SetGroupID(bot->GetCleanName(), 0, bot->GetBotID());
} else {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(!group->members[i])
continue;
group->members[i]->SetFollowID(0);
}
group->DisbandGroup();
database.SetGroupID(bot->GetCleanName(), 0, bot->GetBotID());
}
Result = true;
}
}
return Result;
}
bool Bot::AddBotToGroup(Bot* bot, Group* group) {
bool Result = false;
if(bot && group) {
if(!bot->HasGroup()) {
// Add bot to this group
if(group->AddMember(bot)) {
if(group->GetLeader()) {
bot->SetFollowID(group->GetLeader()->GetID());
// Need to send this only once when a group is formed with a bot so the client knows it is also the group leader
if(group->GroupCount() == 2 && group->GetLeader()->IsClient()) {
group->UpdateGroupAAs();
Mob *TempLeader = group->GetLeader();
group->SendUpdate(groupActUpdate, TempLeader);
}
}
Result = true;
}
}
}
return Result;
}
// Completes a trade with a client bot owner
void Bot::FinishTrade(Client* client, BotTradeType tradeType)
{
if (!client || (GetOwner() != client) || client->GetTradeskillObject() || client->trade->state == Trading) {
if (client)
client->ResetTrade();
return;
}
// these notes are not correct or obselete
if (tradeType == BotTradeClientNormal) {
// Items being traded are found in the normal trade window used to trade between a Client and a Client or NPC
// Items in this mode are found in slot ids 3000 thru 3003 - thought bots used the full 8-slot window..?
PerformTradeWithClient(EQ::invslot::TRADE_BEGIN, EQ::invslot::TRADE_END, client); // {3000..3007}
}
else if (tradeType == BotTradeClientNoDropNoTrade) {
// Items being traded are found on the Client's cursor slot, slot id 30. This item can be either a single item or it can be a bag.
// If it is a bag, then we have to search for items in slots 331 thru 340
PerformTradeWithClient(EQ::invslot::slotCursor, EQ::invslot::slotCursor, client);
// TODO: Add logic here to test if the item in SLOT_CURSOR is a container type, if it is then we need to call the following:
// PerformTradeWithClient(331, 340, client);
}
}
// Perfoms the actual trade action with a client bot owner
void Bot::PerformTradeWithClient(int16 beginSlotID, int16 endSlotID, Client* client)
{
using namespace EQ;
struct ClientTrade {
const ItemInstance* tradeItemInstance;
int16 fromClientSlot;
int16 toBotSlot;
int adjustStackSize;
std::string acceptedItemName;
ClientTrade(const ItemInstance* item, int16 from, const char* name = "") : tradeItemInstance(item), fromClientSlot(from), toBotSlot(invslot::SLOT_INVALID), adjustStackSize(0), acceptedItemName(name) { }
};
struct ClientReturn {
const ItemInstance* returnItemInstance;
int16 fromBotSlot;
int16 toClientSlot;
int adjustStackSize;
std::string failedItemName;
ClientReturn(const ItemInstance* item, int16 from, const char* name = "") : returnItemInstance(item), fromBotSlot(from), toClientSlot(invslot::SLOT_INVALID), adjustStackSize(0), failedItemName(name) { }
};
static const int16 bot_equip_order[invslot::EQUIPMENT_COUNT] = {
invslot::slotCharm, invslot::slotEar1, invslot::slotHead, invslot::slotFace,
invslot::slotEar2, invslot::slotNeck, invslot::slotShoulders, invslot::slotArms,
invslot::slotBack, invslot::slotWrist1, invslot::slotWrist2, invslot::slotRange,
invslot::slotHands, invslot::slotPrimary, invslot::slotSecondary, invslot::slotFinger1,
invslot::slotFinger2, invslot::slotChest, invslot::slotLegs, invslot::slotFeet,
invslot::slotWaist, invslot::slotPowerSource, invslot::slotAmmo
};
enum { stageStackable = 0, stageEmpty, stageReplaceable };
if (!client) {
Emote("NO CLIENT");
return;
}
if (client != GetOwner()) {
client->Message(Chat::Red, "You are not the owner of this bot - Trade Canceled.");
client->ResetTrade();
return;
}
if ((beginSlotID != invslot::TRADE_BEGIN) && (beginSlotID != invslot::slotCursor)) {
client->Message(Chat::Red, "Trade request processing from illegal 'begin' slot - Trade Canceled.");
client->ResetTrade();
return;
}
if ((endSlotID != invslot::TRADE_END) && (endSlotID != invslot::slotCursor)) {
client->Message(Chat::Red, "Trade request processing from illegal 'end' slot - Trade Canceled.");
client->ResetTrade();
return;
}
if (((beginSlotID == invslot::slotCursor) && (endSlotID != invslot::slotCursor)) || ((beginSlotID != invslot::slotCursor) && (endSlotID == invslot::slotCursor))) {
client->Message(Chat::Red, "Trade request processing illegal slot range - Trade Canceled.");
client->ResetTrade();
return;
}
if (endSlotID < beginSlotID) {
client->Message(Chat::Red, "Trade request processing in reverse slot order - Trade Canceled.");
client->ResetTrade();
return;
}
if (client->IsEngaged() || IsEngaged()) {
client->Message(Chat::Yellow, "You may not perform a trade while engaged - Trade Canceled!");
client->ResetTrade();
return;
}
std::list<ClientTrade> client_trade;
std::list<ClientReturn> client_return;
// pre-checks for incoming illegal transfers
for (int16 trade_index = beginSlotID; trade_index <= endSlotID; ++trade_index) {
auto trade_instance = client->GetInv()[trade_index];
if (!trade_instance)
continue;
if (!trade_instance->GetItem()) {
// TODO: add logging
client->Message(Chat::Red, "A server error was encountered while processing client slot %i - Trade Canceled.", trade_index);
client->ResetTrade();
return;
}
if ((trade_index != invslot::slotCursor) && !trade_instance->IsDroppable()) {
// TODO: add logging
client->Message(Chat::Red, "Trade hack detected - Trade Canceled.");
client->ResetTrade();
return;
}
if (trade_instance->IsStackable() && (trade_instance->GetCharges() < trade_instance->GetItem()->StackSize)) { // temp until partial stacks are implemented
client->Message(Chat::Yellow, "'%s' is only a partially stacked item - Trade Canceled!", trade_instance->GetItem()->Name);
client->ResetTrade();
return;
}
if (CheckLoreConflict(trade_instance->GetItem())) {
client->Message(Chat::Yellow, "This bot already has lore equipment matching the item '%s' - Trade Canceled!", trade_instance->GetItem()->Name);
client->ResetTrade();
return;
}
if (!trade_instance->IsType(item::ItemClassCommon)) {
client_return.push_back(ClientReturn(trade_instance, trade_index, trade_instance->GetItem()->Name));
continue;
}
if (!trade_instance->IsEquipable(GetBaseRace(), GetClass()) || (GetLevel() < trade_instance->GetItem()->ReqLevel)) { // deity checks will be handled within IsEquipable()
client_return.push_back(ClientReturn(trade_instance, trade_index, trade_instance->GetItem()->Name));
continue;
}
client_trade.push_back(ClientTrade(trade_instance, trade_index, trade_instance->GetItem()->Name));
}
// check for incoming lore hacks
for (auto& trade_iterator : client_trade) {
if (!trade_iterator.tradeItemInstance->GetItem()->LoreFlag)
continue;
for (const auto& check_iterator : client_trade) {
if (check_iterator.fromClientSlot == trade_iterator.fromClientSlot)
continue;
if (!check_iterator.tradeItemInstance->GetItem()->LoreFlag)
continue;
if ((trade_iterator.tradeItemInstance->GetItem()->LoreGroup == -1) && (check_iterator.tradeItemInstance->GetItem()->ID == trade_iterator.tradeItemInstance->GetItem()->ID)) {
// TODO: add logging
client->Message(Chat::Red, "Trade hack detected - Trade Canceled.");
client->ResetTrade();
return;
}
if ((trade_iterator.tradeItemInstance->GetItem()->LoreGroup > 0) && (check_iterator.tradeItemInstance->GetItem()->LoreGroup == trade_iterator.tradeItemInstance->GetItem()->LoreGroup)) {
// TODO: add logging
client->Message(Chat::Red, "Trade hack detected - Trade Canceled.");
client->ResetTrade();
return;
}
}
}
// find equipment slots
const bool can_dual_wield = (GetSkill(EQ::skills::SkillDualWield) > 0);
bool melee_2h_weapon = false;
bool melee_secondary = false;
//for (unsigned stage_loop = stageStackable; stage_loop <= stageReplaceable; ++stage_loop) { // awaiting implementation
for (unsigned stage_loop = stageEmpty; stage_loop <= stageReplaceable; ++stage_loop) {
for (auto& trade_iterator : client_trade) {
if (trade_iterator.toBotSlot != invslot::SLOT_INVALID)
continue;
auto trade_instance = trade_iterator.tradeItemInstance;
//if ((stage_loop == stageStackable) && !trade_instance->IsStackable())
// continue;
for (auto index : bot_equip_order) {
if (!(trade_instance->GetItem()->Slots & (1 << index)))
continue;
//if (stage_loop == stageStackable) {
// // TODO: implement
// continue;
//}
if (stage_loop != stageReplaceable) {
if (m_inv[index])
continue;
}
bool slot_taken = false;
for (const auto& check_iterator : client_trade) {
if (check_iterator.fromClientSlot == trade_iterator.fromClientSlot)
continue;
if (check_iterator.toBotSlot == index) {
slot_taken = true;
break;
}
}
if (slot_taken)
continue;
if (index == invslot::slotPrimary) {
if (trade_instance->GetItem()->IsType2HWeapon()) {
if (!melee_secondary) {
melee_2h_weapon = true;
auto equipped_secondary_weapon = m_inv[invslot::slotSecondary];
if (equipped_secondary_weapon)
client_return.push_back(ClientReturn(equipped_secondary_weapon, invslot::slotSecondary));
}
else {
continue;
}
}
}
if (index == invslot::slotSecondary) {
if (!melee_2h_weapon) {
if ((can_dual_wield && trade_instance->GetItem()->IsType1HWeapon()) || trade_instance->GetItem()->IsTypeShield() || !trade_instance->IsWeapon()) {
melee_secondary = true;
auto equipped_primary_weapon = m_inv[invslot::slotPrimary];
if (equipped_primary_weapon && equipped_primary_weapon->GetItem()->IsType2HWeapon())
client_return.push_back(ClientReturn(equipped_primary_weapon, invslot::slotPrimary));
}
else {
continue;
}
}
else {
continue;
}
}
trade_iterator.toBotSlot = index;
if (m_inv[index])
client_return.push_back(ClientReturn(m_inv[index], index));
break;
}
}
}
// move unassignable items from trade list to return list
for (std::list<ClientTrade>::iterator trade_iterator = client_trade.begin(); trade_iterator != client_trade.end();) {
if (trade_iterator->toBotSlot == invslot::SLOT_INVALID) {
client_return.push_back(ClientReturn(trade_iterator->tradeItemInstance, trade_iterator->fromClientSlot, trade_iterator->tradeItemInstance->GetItem()->Name));
trade_iterator = client_trade.erase(trade_iterator);
continue;
}
++trade_iterator;
}
// out-going return checks for client
for (auto& return_iterator : client_return) {
auto return_instance = return_iterator.returnItemInstance;
if (!return_instance)
continue;
if (!return_instance->GetItem()) {
// TODO: add logging
client->Message(Chat::Red, "A server error was encountered while processing bot slot %i - Trade Canceled.", return_iterator.fromBotSlot);
client->ResetTrade();
return;
}
// non-failing checks above are causing this to trigger (i.e., !ItemClassCommon and !IsEquipable{race, class, min_level})
// this process is hindered by not having bots use the inventory trade method (TODO: implement bot inventory use)
if (client->CheckLoreConflict(return_instance->GetItem())) {
client->Message(Chat::Yellow, "You already have lore equipment matching the item '%s' - Trade Canceled!", return_instance->GetItem()->Name);
client->ResetTrade();
return;
}
if (return_iterator.fromBotSlot == invslot::slotCursor) {
return_iterator.toClientSlot = invslot::slotCursor;
}
else {
int16 client_search_general = invslot::GENERAL_BEGIN;
uint8 client_search_bag = invbag::SLOT_BEGIN;
bool run_search = true;
while (run_search) {
int16 client_test_slot = client->GetInv().FindFreeSlotForTradeItem(return_instance, client_search_general, client_search_bag);
if (client_test_slot == invslot::SLOT_INVALID) {
run_search = false;
continue;
}
bool slot_taken = false;
for (const auto& check_iterator : client_return) {
if (check_iterator.fromBotSlot == return_iterator.fromBotSlot)
continue;
if ((check_iterator.toClientSlot == client_test_slot) && (client_test_slot != invslot::slotCursor)) {
slot_taken = true;
break;
}
}
if (slot_taken) {
if ((client_test_slot >= invslot::GENERAL_BEGIN) && (client_test_slot <= invslot::GENERAL_END)) {
++client_search_general;
client_search_bag = invbag::SLOT_BEGIN;
}
else {
client_search_general = InventoryProfile::CalcSlotId(client_test_slot);
client_search_bag = InventoryProfile::CalcBagIdx(client_test_slot);
++client_search_bag;
if (client_search_bag >= invbag::SLOT_COUNT) {
// incrementing this past legacy::GENERAL_END triggers the (client_test_slot == legacy::SLOT_INVALID) at the beginning of the search loop
// ideally, this will never occur because we always start fresh with each loop iteration and should receive SLOT_CURSOR as a return value
++client_search_general;
client_search_bag = invbag::SLOT_BEGIN;
}
}
continue;
}
return_iterator.toClientSlot = client_test_slot;
run_search = false;
}
}
if (return_iterator.toClientSlot == invslot::SLOT_INVALID) {
client->Message(Chat::Yellow, "You do not have room to complete this trade - Trade Canceled!");
client->ResetTrade();
return;
}
}
// perform actual trades
// returns first since clients have trade slots and bots do not
for (auto& return_iterator : client_return) {
// TODO: code for stackables
if (return_iterator.fromBotSlot == invslot::slotCursor) { // failed trade return
// no movement action required
}
else if ((return_iterator.fromBotSlot >= invslot::TRADE_BEGIN) && (return_iterator.fromBotSlot <= invslot::TRADE_END)) { // failed trade returns
client->PutItemInInventory(return_iterator.toClientSlot, *return_iterator.returnItemInstance);
client->SendItemPacket(return_iterator.toClientSlot, return_iterator.returnItemInstance, ItemPacketTrade);
client->DeleteItemInInventory(return_iterator.fromBotSlot);
}
else { // successful trade returns
auto return_instance = m_inv.PopItem(return_iterator.fromBotSlot);
//if (*return_instance != *return_iterator.returnItemInstance) {
// // TODO: add logging
//}
if (!database.botdb.DeleteItemBySlot(GetBotID(), return_iterator.fromBotSlot))
client->Message(Chat::Red, "%s (slot: %i, name: '%s')", BotDatabase::fail::DeleteItemBySlot(), return_iterator.fromBotSlot, (return_instance ? return_instance->GetItem()->Name : "nullptr"));
BotRemoveEquipItem(return_iterator.fromBotSlot);
if (return_instance)
client->PutItemInInventory(return_iterator.toClientSlot, *return_instance, true);
InventoryProfile::MarkDirty(return_instance);
}
return_iterator.returnItemInstance = nullptr;
}
// trades can now go in as empty slot inserts
for (auto& trade_iterator : client_trade) {
// TODO: code for stackables
if (!database.botdb.SaveItemBySlot(this, trade_iterator.toBotSlot, trade_iterator.tradeItemInstance))
client->Message(Chat::Red, "%s (slot: %i, name: '%s')", BotDatabase::fail::SaveItemBySlot(), trade_iterator.toBotSlot, (trade_iterator.tradeItemInstance ? trade_iterator.tradeItemInstance->GetItem()->Name : "nullptr"));
m_inv.PutItem(trade_iterator.toBotSlot, *trade_iterator.tradeItemInstance);
this->BotAddEquipItem(trade_iterator.toBotSlot, (trade_iterator.tradeItemInstance ? trade_iterator.tradeItemInstance->GetID() : 0));
trade_iterator.tradeItemInstance = nullptr; // actual deletion occurs in client delete below
client->DeleteItemInInventory(trade_iterator.fromClientSlot, 0, (trade_iterator.fromClientSlot == EQ::invslot::slotCursor));
// database currently has unattuned item saved in inventory..it will be attuned on next bot load
// this prevents unattuned item returns in the mean time (TODO: re-work process)
if (trade_iterator.toBotSlot >= invslot::EQUIPMENT_BEGIN && trade_iterator.toBotSlot <= invslot::EQUIPMENT_END) {
auto attune_item = m_inv.GetItem(trade_iterator.toBotSlot);
if (attune_item && attune_item->GetItem()->Attuneable)
attune_item->SetAttuned(true);
}
}
// trade messages
for (const auto& return_iterator : client_return) {
if (return_iterator.failedItemName.size())
client->Message(Chat::Tell, "%s tells you, \"%s, I can't use this '%s.'\"", GetCleanName(), client->GetName(), return_iterator.failedItemName.c_str());
}
for (const auto& trade_iterator : client_trade) {
if (trade_iterator.acceptedItemName.size())
client->Message(Chat::Tell, "%s tells you, \"Thank you for the '%s,' %s!\"", GetCleanName(), trade_iterator.acceptedItemName.c_str(), client->GetName());
}
size_t accepted_count = client_trade.size();
size_t returned_count = client_return.size();
client->Message(Chat::Lime, "Trade with '%s' resulted in %i accepted item%s, %i returned item%s.", GetCleanName(), accepted_count, ((accepted_count == 1) ? "" : "s"), returned_count, ((returned_count == 1) ? "" : "s"));
if (accepted_count)
CalcBotStats(client->GetBotOption(Client::booStatsUpdate));
}
bool Bot::Death(Mob *killerMob, int32 damage, uint16 spell_id, EQ::skills::SkillType attack_skill) {
if(!NPC::Death(killerMob, damage, spell_id, attack_skill))
return false;
Save();
Mob *my_owner = GetBotOwner();
if (my_owner && my_owner->IsClient() && my_owner->CastToClient()->GetBotOption(Client::booDeathMarquee)) {
if (killerMob)
my_owner->CastToClient()->SendMarqueeMessage(Chat::Red, 510, 0, 1000, 3000, StringFormat("%s has been slain by %s", GetCleanName(), killerMob->GetCleanName()));
else
my_owner->CastToClient()->SendMarqueeMessage(Chat::Red, 510, 0, 1000, 3000, StringFormat("%s has been slain", GetCleanName()));
}
Mob *give_exp = hate_list.GetDamageTopOnHateList(this);
Client *give_exp_client = nullptr;
if(give_exp && give_exp->IsClient())
give_exp_client = give_exp->CastToClient();
bool IsLdonTreasure = (this->GetClass() == LDON_TREASURE);
if(entity_list.GetCorpseByID(GetID()))
entity_list.GetCorpseByID(GetID())->Depop();
Group *g = GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i]) {
if(g->members[i] == this) {
// If the leader dies, make the next bot the leader
// and reset all bots followid
if(g->IsLeader(g->members[i])) {
if(g->members[i + 1]) {
g->SetLeader(g->members[i + 1]);
g->members[i + 1]->SetFollowID(g->members[i]->GetFollowID());
for(int j = 0; j < MAX_GROUP_MEMBERS; j++) {
if(g->members[j] && (g->members[j] != g->members[i + 1]))
g->members[j]->SetFollowID(g->members[i + 1]->GetID());
}
}
}
// delete from group data
RemoveBotFromGroup(this, g);
//Make sure group still exists if it doesnt they were already updated in RemoveBotFromGroup
g = GetGroup();
if (!g)
break;
// if group members exist below this one, move
// them all up one slot in the group list
int j = (i + 1);
for(; j < MAX_GROUP_MEMBERS; j++) {
if(g->members[j]) {
g->members[j-1] = g->members[j];
strcpy(g->membername[j-1], g->members[j]->GetCleanName());
g->membername[j][0] = '\0';
memset(g->membername[j], 0, 64);
g->members[j] = nullptr;
}
}
// update the client group
EQApplicationPacket* outapp = new EQApplicationPacket(OP_GroupUpdate, sizeof(GroupJoin_Struct));
GroupJoin_Struct* gu = (GroupJoin_Struct*)outapp->pBuffer;
gu->action = groupActLeave;
strcpy(gu->membername, GetCleanName());
if(g) {
for(int k = 0; k < MAX_GROUP_MEMBERS; k++) {
if(g->members[k] && g->members[k]->IsClient())
g->members[k]->CastToClient()->QueuePacket(outapp);
}
}
safe_delete(outapp);
}
}
}
}
LeaveHealRotationMemberPool();
if ((GetPullingFlag() || GetReturningFlag()) && my_owner && my_owner->IsClient()) {
my_owner->CastToClient()->SetBotPulling(false);
}
entity_list.RemoveBot(this->GetID());
return true;
}
void Bot::Damage(Mob *from, int32 damage, uint16 spell_id, EQ::skills::SkillType attack_skill, bool avoidable, int8 buffslot, bool iBuffTic, eSpecialAttacks special) {
if(spell_id == 0)
spell_id = SPELL_UNKNOWN;
//handle EVENT_ATTACK. Resets after we have not been attacked for 12 seconds
if(attacked_timer.Check()) {
LogCombat("Triggering EVENT_ATTACK due to attack by [{}]", from->GetName());
parse->EventNPC(EVENT_ATTACK, this, from, "", 0);
}
attacked_timer.Start(CombatEventTimer_expire);
// if spell is lifetap add hp to the caster
if (spell_id != SPELL_UNKNOWN && IsLifetapSpell(spell_id)) {
int healed = GetActSpellHealing(spell_id, damage);
LogCombat("Applying lifetap heal of [{}] to [{}]", healed, GetCleanName());
HealDamage(healed);
entity_list.MessageClose(this, true, 300, Chat::Spells, "%s beams a smile at %s", GetCleanName(), from->GetCleanName() );
}
CommonDamage(from, damage, spell_id, attack_skill, avoidable, buffslot, iBuffTic, special);
if(GetHP() < 0) {
if(IsCasting())
InterruptSpell();
SetAppearance(eaDead);
}
SendHPUpdate();
if(this == from)
return;
// Aggro the bot's group members
if(IsGrouped()) {
Group *g = GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i] && g->members[i]->IsBot() && from && !g->members[i]->CheckAggro(from) && g->members[i]->IsAttackAllowed(from))
g->members[i]->AddToHateList(from, 1);
}
}
}
}
//void Bot::AddToHateList(Mob* other, uint32 hate = 0, int32 damage = 0, bool iYellForHelp = true, bool bFrenzy = false, bool iBuffTic = false)
void Bot::AddToHateList(Mob* other, uint32 hate, int32 damage, bool iYellForHelp, bool bFrenzy, bool iBuffTic, bool pet_command) {
Mob::AddToHateList(other, hate, damage, iYellForHelp, bFrenzy, iBuffTic, pet_command);
}
bool Bot::Attack(Mob* other, int Hand, bool FromRiposte, bool IsStrikethrough, bool IsFromSpell, ExtraAttackOptions *opts) {
if (!other) {
SetTarget(nullptr);
LogError("A null Mob object was passed to Bot::Attack for evaluation!");
return false;
}
if ((GetHP() <= 0) || (GetAppearance() == eaDead)) {
SetTarget(nullptr);
LogCombat("Attempted to attack [{}] while unconscious or, otherwise, appearing dead", other->GetCleanName());
return false;
}
//if(!GetTarget() || GetTarget() != other) // NPC::Attack() doesn't do this
// SetTarget(other);
// apparently, we always want our target to be 'other'..why not just set it?
SetTarget(other);
// takes more to compare a call result, load for a call, load a compare to address and compare, and finally
// push a value to an address than to just load for a call and push a value to an address.
LogCombat("Attacking [{}] with hand [{}] [{}]", other->GetCleanName(), Hand, (FromRiposte ? "(this is a riposte)" : ""));
if ((IsCasting() && (GetClass() != BARD) && !IsFromSpell) || (!IsAttackAllowed(other))) {
if(this->GetOwnerID())
entity_list.MessageClose(this, 1, 200, 10, "%s says, '%s is not a legal target master.'", this->GetCleanName(), this->GetTarget()->GetCleanName());
if(other) {
RemoveFromHateList(other);
LogCombat("I am not allowed to attack [{}]", other->GetCleanName());
}
return false;
}
if(DivineAura()) {//cant attack while invulnerable
LogCombat("Attack canceled, Divine Aura is in effect");
return false;
}
FaceTarget(GetTarget());
EQ::ItemInstance* weapon = nullptr;
if (Hand == EQ::invslot::slotPrimary) {
weapon = GetBotItem(EQ::invslot::slotPrimary);
OffHandAtk(false);
}
if (Hand == EQ::invslot::slotSecondary) {
weapon = GetBotItem(EQ::invslot::slotSecondary);
OffHandAtk(true);
}
if(weapon != nullptr) {
if (!weapon->IsWeapon()) {
LogCombat("Attack canceled, Item [{}] ([{}]) is not a weapon", weapon->GetItem()->Name, weapon->GetID());
return false;
}
LogCombat("Attacking with weapon: [{}] ([{}])", weapon->GetItem()->Name, weapon->GetID());
}
else
LogCombat("Attacking without a weapon");
// calculate attack_skill and skillinuse depending on hand and weapon
// also send Packet to near clients
DamageHitInfo my_hit;
my_hit.skill = AttackAnimation(Hand, weapon);
LogCombat("Attacking with [{}] in slot [{}] using skill [{}]", weapon?weapon->GetItem()->Name:"Fist", Hand, my_hit.skill);
// Now figure out damage
my_hit.damage_done = 1;
my_hit.min_damage = 0;
uint8 mylevel = GetLevel() ? GetLevel() : 1;
uint32 hate = 0;
if (weapon)
hate = (weapon->GetItem()->Damage + weapon->GetItem()->ElemDmgAmt);
my_hit.base_damage = GetWeaponDamage(other, weapon, &hate);
if (hate == 0 && my_hit.base_damage > 1)
hate = my_hit.base_damage;
//if weapon damage > 0 then we know we can hit the target with this weapon
//otherwise we cannot and we set the damage to -5 later on
if (my_hit.base_damage > 0) {
my_hit.min_damage = 0;
// ***************************************************************
// *** Calculate the damage bonus, if applicable, for this hit ***
// ***************************************************************
#ifndef EQEMU_NO_WEAPON_DAMAGE_BONUS
// If you include the preprocessor directive "#define EQEMU_NO_WEAPON_DAMAGE_BONUS", that indicates that you do not
// want damage bonuses added to weapon damage at all. This feature was requested by ChaosSlayer on the EQEmu Forums.
//
// This is not recommended for normal usage, as the damage bonus represents a non-trivial component of the DPS output
// of weapons wielded by higher-level melee characters (especially for two-handed weapons).
int ucDamageBonus = 0;
if (Hand == EQ::invslot::slotPrimary && GetLevel() >= 28 && IsWarriorClass()) {
// Damage bonuses apply only to hits from the main hand (Hand == MainPrimary) by characters level 28 and above
// who belong to a melee class. If we're here, then all of these conditions apply.
ucDamageBonus = GetWeaponDamageBonus(weapon ? weapon->GetItem() : (const EQ::ItemData*) nullptr);
my_hit.min_damage = ucDamageBonus;
hate += ucDamageBonus;
}
#endif
//Live AA - Sinister Strikes *Adds weapon damage bonus to offhand weapon.
if (Hand == EQ::invslot::slotSecondary) {
if (aabonuses.SecondaryDmgInc || itembonuses.SecondaryDmgInc || spellbonuses.SecondaryDmgInc){
ucDamageBonus = GetWeaponDamageBonus(weapon ? weapon->GetItem() : (const EQ::ItemData*) nullptr);
my_hit.min_damage = ucDamageBonus;
hate += ucDamageBonus;
}
}
LogCombat("Damage calculated: base [{}] min damage [{}] skill [{}]", my_hit.base_damage, my_hit.min_damage, my_hit.skill);
int hit_chance_bonus = 0;
my_hit.offense = offense(my_hit.skill);
my_hit.hand = Hand;
if (opts) {
my_hit.base_damage *= opts->damage_percent;
my_hit.base_damage += opts->damage_flat;
hate *= opts->hate_percent;
hate += opts->hate_flat;
hit_chance_bonus += opts->hit_chance;
}
my_hit.tohit = GetTotalToHit(my_hit.skill, hit_chance_bonus);
DoAttack(other, my_hit, opts);
LogCombat("Final damage after all reductions: [{}]", my_hit.damage_done);
} else {
my_hit.damage_done = DMG_INVULNERABLE;
}
// Hate Generation is on a per swing basis, regardless of a hit, miss, or block, its always the same.
// If we are this far, this means we are atleast making a swing.
other->AddToHateList(this, hate);
///////////////////////////////////////////////////////////
////// Send Attack Damage
///////////////////////////////////////////////////////////
other->Damage(this, my_hit.damage_done, SPELL_UNKNOWN, my_hit.skill);
if (GetHP() < 0)
return false;
MeleeLifeTap(my_hit.damage_done);
if (my_hit.damage_done > 0)
CheckNumHitsRemaining(NumHit::OutgoingHitSuccess);
CommonBreakInvisibleFromCombat();
if (spellbonuses.NegateIfCombat)
BuffFadeByEffect(SE_NegateIfCombat);
if(GetTarget())
TriggerDefensiveProcs(other, Hand, true, my_hit.damage_done);
if (my_hit.damage_done > 0)
return true;
else
return false;
}
int32 Bot::CalcBotAAFocus(focusType type, uint32 aa_ID, uint32 points, uint16 spell_id)
{
const SPDat_Spell_Struct &spell = spells[spell_id];
int32 value = 0;
int lvlModifier = 100;
int spell_level = 0;
int lvldiff = 0;
bool LimitSpellSkill = false;
bool SpellSkill_Found = false;
uint32 effect = 0;
int32 base_value = 0;
int32 limit_value = 0;
uint32 slot = 0;
bool LimitFound = false;
int FocusCount = 0;
auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(aa_ID, points);
auto ability = ability_rank.first;
auto rank = ability_rank.second;
if(!ability) {
return 0;
}
for(auto &eff : rank->effects) {
effect = eff.effect_id;
base_value = eff.base_value;
limit_value = eff.limit_value;
slot = eff.slot;
//AA Foci's can contain multiple focus effects within the same AA.
//To handle this we will not automatically return zero if a limit is found.
//Instead if limit is found and multiple effects, we will reset the limit check
//when the next valid focus effect is found.
if (IsFocusEffect(0, 0, true,effect) || (effect == SE_TriggerOnCast)) {
FocusCount++;
//If limit found on prior check next, else end loop.
if (FocusCount > 1) {
if (LimitFound) {
value = 0;
LimitFound = false;
}
else
break;
}
}
switch (effect) {
case SE_Blank:
break;
case SE_LimitResist:
if(base_value) {
if(spell.resist_type != base_value)
LimitFound = true;
}
break;
case SE_LimitInstant:
if(spell.buff_duration)
LimitFound = true;
break;
case SE_LimitMaxLevel:
spell_level = spell.classes[(GetClass() % 17) - 1];
lvldiff = spell_level - base_value;
//every level over cap reduces the effect by base2 percent unless from a clicky when ItemCastsUseFocus is true
if(lvldiff > 0 && (spell_level <= RuleI(Character, MaxLevel) || RuleB(Character, ItemCastsUseFocus) == false)) {
if(limit_value > 0) {
lvlModifier -= (limit_value * lvldiff);
if(lvlModifier < 1)
LimitFound = true;
}
else
LimitFound = true;
}
break;
case SE_LimitMinLevel:
if((spell.classes[(GetClass() % 17) - 1]) < base_value)
LimitFound = true;
break;
case SE_LimitCastTimeMin:
if (spell.cast_time < base_value)
LimitFound = true;
break;
case SE_LimitSpell:
if(base_value < 0) {
if (spell_id == (base_value*-1))
LimitFound = true;
} else {
if (spell_id != base_value)
LimitFound = true;
}
break;
case SE_LimitMinDur:
if (base_value > CalcBuffDuration_formula(GetLevel(), spell.buff_duration_formula, spell.buff_duration))
LimitFound = true;
break;
case SE_LimitEffect:
if(base_value < 0) {
if(IsEffectInSpell(spell_id,(base_value*-1)))
LimitFound = true;
} else {
if(!IsEffectInSpell(spell_id,base_value))
LimitFound = true;
}
break;
case SE_LimitSpellType:
switch(base_value) {
case 0:
if (!IsDetrimentalSpell(spell_id))
LimitFound = true;
break;
case 1:
if (!IsBeneficialSpell(spell_id))
LimitFound = true;
break;
}
break;
case SE_LimitManaMin:
if(spell.mana < base_value)
LimitFound = true;
break;
case SE_LimitTarget:
if(base_value < 0) {
if(-base_value == spell.target_type)
LimitFound = true;
} else {
if(base_value != spell.target_type)
LimitFound = true;
}
break;
case SE_LimitCombatSkills:
if((base_value == 1 && !IsDiscipline(spell_id)) || (base_value == 0 && IsDiscipline(spell_id)))
LimitFound = true;
break;
case SE_LimitSpellGroup:
if((base_value > 0 && base_value != spell.spell_group) || (base_value < 0 && base_value == spell.spell_group))
LimitFound = true;
break;
case SE_LimitCastingSkill:
LimitSpellSkill = true;
if(base_value == spell.skill)
SpellSkill_Found = true;
break;
case SE_LimitClass:
//Do not use this limit more then once per spell. If multiple class, treat value like items would.
if (!PassLimitClass(base_value, GetClass()))
LimitFound = true;
break;
//Handle Focus Effects
case SE_ImprovedDamage:
if (type == focusImprovedDamage && base_value > value)
value = base_value;
break;
case SE_ImprovedDamage2:
if (type == focusImprovedDamage2 && base_value > value)
value = base_value;
break;
case SE_ImprovedHeal:
if (type == focusImprovedHeal && base_value > value)
value = base_value;
break;
case SE_ReduceManaCost:
if (type == focusManaCost)
value = base_value;
break;
case SE_IncreaseSpellHaste:
if (type == focusSpellHaste && base_value > value)
value = base_value;
break;
case SE_IncreaseSpellDuration:
if (type == focusSpellDuration && base_value > value)
value = base_value;
break;
case SE_SpellDurationIncByTic:
if (type == focusSpellDurByTic && base_value > value)
value = base_value;
break;
case SE_SwarmPetDuration:
if (type == focusSwarmPetDuration && base_value > value)
value = base_value;
break;
case SE_IncreaseRange:
if (type == focusRange && base_value > value)
value = base_value;
break;
case SE_ReduceReagentCost:
if (type == focusReagentCost && base_value > value)
value = base_value;
break;
case SE_PetPowerIncrease:
if (type == focusPetPower && base_value > value)
value = base_value;
break;
case SE_SpellResistReduction:
if (type == focusResistRate && base_value > value)
value = base_value;
break;
case SE_SpellHateMod:
if (type == focusSpellHateMod) {
if(value != 0) {
if(value > 0) {
if(base_value > value)
value = base_value;
} else {
if(base_value < value)
value = base_value;
}
}
else
value = base_value;
}
break;
case SE_ReduceReuseTimer: {
if(type == focusReduceRecastTime)
value = (base_value / 1000);
break;
}
case SE_TriggerOnCast: {
if(type == focusTriggerOnCast) {
if(zone->random.Int(0, 100) <= base_value)
value = limit_value;
else {
value = 0;
LimitFound = true;
}
}
break;
}
case SE_FcSpellVulnerability: {
if(type == focusSpellVulnerability)
value = base_value;
break;
}
case SE_BlockNextSpellFocus: {
if(type == focusBlockNextSpell) {
if(zone->random.Int(1, 100) <= base_value)
value = 1;
}
break;
}
case SE_FcTwincast: {
if(type == focusTwincast)
value = base_value;
break;
}
//case SE_SympatheticProc:
//{
// if(type == focusSympatheticProc)
// {
// float ProcChance, ProcBonus;
// int16 ProcRateMod = base1; //Baseline is 100 for most Sympathetic foci
// int32 cast_time = GetActSpellCasttime(spell_id, spells[spell_id].cast_time);
// GetSympatheticProcChances(ProcBonus, ProcChance, cast_time, ProcRateMod);
// if(zone->random.Real(0, 1) <= ProcChance)
// value = focus_id;
// else
// value = 0;
// }
// break;
//}
case SE_FcDamageAmt: {
if(type == focusFcDamageAmt)
value = base_value;
break;
}
case SE_FcDamageAmt2: {
if(type == focusFcDamageAmt2)
value = base_value;
break;
}
case SE_FcDamageAmtCrit: {
if(type == focusFcDamageAmtCrit)
value = base_value;
break;
}
case SE_FcDamageAmtIncoming: {
if(type == focusFcDamageAmtIncoming)
value = base_value;
break;
}
case SE_FcHealAmtIncoming:
if(type == focusFcHealAmtIncoming)
value = base_value;
break;
case SE_FcHealPctCritIncoming:
if (type == focusFcHealPctCritIncoming)
value = base_value;
break;
case SE_FcHealAmtCrit:
if(type == focusFcHealAmtCrit)
value = base_value;
break;
case SE_FcHealAmt:
if(type == focusFcHealAmt)
value = base_value;
break;
case SE_FcHealPctIncoming:
if(type == focusFcHealPctIncoming)
value = base_value;
break;
case SE_FcBaseEffects: {
if (type == focusFcBaseEffects)
value = base_value;
break;
}
case SE_FcDamagePctCrit: {
if(type == focusFcDamagePctCrit)
value = base_value;
break;
}
case SE_FcIncreaseNumHits: {
if(type == focusIncreaseNumHits)
value = base_value;
break;
}
//Check for spell skill limits.
if ((LimitSpellSkill) && (!SpellSkill_Found))
return 0;
}
}
if (LimitFound)
return 0;
return (value * lvlModifier / 100);
}
int32 Bot::GetBotFocusEffect(focusType bottype, uint16 spell_id) {
if (IsBardSong(spell_id) && bottype != focusFcBaseEffects)
return 0;
int32 realTotal = 0;
int32 realTotal2 = 0;
int32 realTotal3 = 0;
bool rand_effectiveness = false;
//Improved Healing, Damage & Mana Reduction are handled differently in that some are random percentages
//In these cases we need to find the most powerful effect, so that each piece of gear wont get its own chance
if(RuleB(Spells, LiveLikeFocusEffects) && (bottype == focusManaCost || bottype == focusImprovedHeal || bottype == focusImprovedDamage || bottype == focusImprovedDamage2 || bottype == focusResistRate))
rand_effectiveness = true;
//Check if item focus effect exists for the client.
if (itembonuses.FocusEffects[bottype]) {
const EQ::ItemData* TempItem = nullptr;
const EQ::ItemData* UsedItem = nullptr;
const EQ::ItemInstance* TempInst = nullptr;
uint16 UsedFocusID = 0;
int32 Total = 0;
int32 focus_max = 0;
int32 focus_max_real = 0;
//item focus
// are focus effects the same as bonus? (slotAmmo-excluded)
for (int x = EQ::invslot::EQUIPMENT_BEGIN; x <= EQ::invslot::EQUIPMENT_END; x++) {
TempItem = nullptr;
EQ::ItemInstance* ins = GetBotItem(x);
if (!ins)
continue;
TempItem = ins->GetItem();
if (TempItem && TempItem->Focus.Effect > 0 && TempItem->Focus.Effect != SPELL_UNKNOWN) {
if(rand_effectiveness) {
focus_max = CalcBotFocusEffect(bottype, TempItem->Focus.Effect, spell_id, true);
if ((focus_max > 0 && focus_max_real >= 0 && focus_max > focus_max_real) || (focus_max < 0 && focus_max < focus_max_real)) {
focus_max_real = focus_max;
UsedItem = TempItem;
UsedFocusID = TempItem->Focus.Effect;
}
} else {
Total = CalcBotFocusEffect(bottype, TempItem->Focus.Effect, spell_id);
if ((Total > 0 && realTotal >= 0 && Total > realTotal) || (Total < 0 && Total < realTotal)) {
realTotal = Total;
UsedItem = TempItem;
UsedFocusID = TempItem->Focus.Effect;
}
}
}
for (int y = EQ::invaug::SOCKET_BEGIN; y <= EQ::invaug::SOCKET_END; ++y) {
EQ::ItemInstance *aug = nullptr;
aug = ins->GetAugment(y);
if(aug) {
const EQ::ItemData* TempItemAug = aug->GetItem();
if (TempItemAug && TempItemAug->Focus.Effect > 0 && TempItemAug->Focus.Effect != SPELL_UNKNOWN) {
if(rand_effectiveness) {
focus_max = CalcBotFocusEffect(bottype, TempItemAug->Focus.Effect, spell_id, true);
if ((focus_max > 0 && focus_max_real >= 0 && focus_max > focus_max_real) || (focus_max < 0 && focus_max < focus_max_real)) {
focus_max_real = focus_max;
UsedItem = TempItem;
UsedFocusID = TempItemAug->Focus.Effect;
}
} else {
Total = CalcBotFocusEffect(bottype, TempItemAug->Focus.Effect, spell_id);
if ((Total > 0 && realTotal >= 0 && Total > realTotal) || (Total < 0 && Total < realTotal)) {
realTotal = Total;
UsedItem = TempItem;
UsedFocusID = TempItemAug->Focus.Effect;
}
}
}
}
}
}
if(UsedItem && rand_effectiveness && focus_max_real != 0)
realTotal = CalcBotFocusEffect(bottype, UsedFocusID, spell_id);
}
//Check if spell focus effect exists for the client.
if (spellbonuses.FocusEffects[bottype]) {
//Spell Focus
int32 Total2 = 0;
int32 focus_max2 = 0;
int32 focus_max_real2 = 0;
int buff_tracker = -1;
int buff_slot = 0;
uint32 focusspellid = 0;
uint32 focusspell_tracker = 0;
uint32 buff_max = GetMaxTotalSlots();
for (buff_slot = 0; buff_slot < buff_max; buff_slot++) {
focusspellid = buffs[buff_slot].spellid;
if (focusspellid == 0 || focusspellid >= SPDAT_RECORDS)
continue;
if(rand_effectiveness) {
focus_max2 = CalcBotFocusEffect(bottype, focusspellid, spell_id, true);
if ((focus_max2 > 0 && focus_max_real2 >= 0 && focus_max2 > focus_max_real2) || (focus_max2 < 0 && focus_max2 < focus_max_real2)) {
focus_max_real2 = focus_max2;
buff_tracker = buff_slot;
focusspell_tracker = focusspellid;
}
} else {
Total2 = CalcBotFocusEffect(bottype, focusspellid, spell_id);
if ((Total2 > 0 && realTotal2 >= 0 && Total2 > realTotal2) || (Total2 < 0 && Total2 < realTotal2)) {
realTotal2 = Total2;
buff_tracker = buff_slot;
focusspell_tracker = focusspellid;
}
}
}
if(focusspell_tracker && rand_effectiveness && focus_max_real2 != 0)
realTotal2 = CalcBotFocusEffect(bottype, focusspell_tracker, spell_id);
// For effects like gift of mana that only fire once, save the spellid into an array that consists of all available buff slots.
if(buff_tracker >= 0 && buffs[buff_tracker].hit_number > 0)
m_spellHitsLeft[buff_tracker] = focusspell_tracker;
}
// AA Focus
if (aabonuses.FocusEffects[bottype]) {
int32 Total3 = 0;
uint32 slots = 0;
uint32 aa_AA = 0;
uint32 aa_value = 0;
for(auto &aa : aa_ranks) {
auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(aa.first, aa.second.first);
auto ability = ability_rank.first;
auto rank = ability_rank.second;
if(!ability) {
continue;
}
aa_AA = ability->id;
aa_value = aa.second.first;
if (aa_AA < 1 || aa_value < 1)
continue;
Total3 = CalcBotAAFocus(bottype, aa_AA, aa_value, spell_id);
if (Total3 > 0 && realTotal3 >= 0 && Total3 > realTotal3) {
realTotal3 = Total3;
}
else if (Total3 < 0 && Total3 < realTotal3) {
realTotal3 = Total3;
}
}
}
if(bottype == focusReagentCost && IsSummonPetSpell(spell_id) && GetAA(aaElementalPact))
return 100;
if(bottype == focusReagentCost && (IsEffectInSpell(spell_id, SE_SummonItem) || IsSacrificeSpell(spell_id)))
return 0;
return (realTotal + realTotal2);
}
int32 Bot::CalcBotFocusEffect(focusType bottype, uint16 focus_id, uint16 spell_id, bool best_focus) {
if(!IsValidSpell(focus_id) || !IsValidSpell(spell_id))
return 0;
const SPDat_Spell_Struct &focus_spell = spells[focus_id];
const SPDat_Spell_Struct &spell = spells[spell_id];
int32 value = 0;
int lvlModifier = 100;
int spell_level = 0;
int lvldiff = 0;
bool LimitSpellSkill = false;
bool SpellSkill_Found = false;
for (int i = 0; i < EFFECT_COUNT; i++) {
switch (focus_spell.effect_id[i]) {
case SE_Blank:
break;
case SE_LimitResist:{
if(focus_spell.base_value[i]) {
if(spell.resist_type != focus_spell.base_value[i])
return 0;
}
break;
}
case SE_LimitInstant: {
if(spell.buff_duration)
return 0;
break;
}
case SE_LimitMaxLevel:{
if (IsNPC())
break;
spell_level = spell.classes[(GetClass() % 17) - 1];
lvldiff = (spell_level - focus_spell.base_value[i]);
if(lvldiff > 0 && (spell_level <= RuleI(Character, MaxLevel) || RuleB(Character, ItemCastsUseFocus) == false)) {
if(focus_spell.limit_value[i] > 0) {
lvlModifier -= (focus_spell.limit_value[i] * lvldiff);
if(lvlModifier < 1)
return 0;
}
else
return 0;
}
break;
}
case SE_LimitMinLevel:
if (IsNPC())
break;
if (spell.classes[(GetClass() % 17) - 1] < focus_spell.base_value[i])
return 0;
break;
case SE_LimitCastTimeMin:
if (spells[spell_id].cast_time < (uint32)focus_spell.base_value[i])
return 0;
break;
case SE_LimitSpell:
if(focus_spell.base_value[i] < 0) {
if (spell_id == (focus_spell.base_value[i] * -1))
return 0;
} else {
if (spell_id != focus_spell.base_value[i])
return 0;
}
break;
case SE_LimitMinDur:
if (focus_spell.base_value[i] > CalcBuffDuration_formula(GetLevel(), spell.buff_duration_formula, spell.buff_duration))
return 0;
break;
case SE_LimitEffect:
if(focus_spell.base_value[i] < 0) {
if(IsEffectInSpell(spell_id,focus_spell.base_value[i]))
return 0;
} else {
if(focus_spell.base_value[i] == SE_SummonPet) {
if(!IsEffectInSpell(spell_id, SE_SummonPet) && !IsEffectInSpell(spell_id, SE_NecPet) && !IsEffectInSpell(spell_id, SE_SummonBSTPet)) {
return 0;
}
} else if(!IsEffectInSpell(spell_id,focus_spell.base_value[i]))
return 0;
}
break;
case SE_LimitSpellType:
switch(focus_spell.base_value[i]) {
case 0:
if (!IsDetrimentalSpell(spell_id))
return 0;
break;
case 1:
if (!IsBeneficialSpell(spell_id))
return 0;
break;
default:
LogInfo("CalcFocusEffect: unknown limit spelltype [{}]", focus_spell.base_value[i]);
}
break;
case SE_LimitManaMin:
if(spell.mana < focus_spell.base_value[i])
return 0;
break;
case SE_LimitTarget:
if((focus_spell.base_value[i] < 0) && -focus_spell.base_value[i] == spell.target_type)
return 0;
else if (focus_spell.base_value[i] > 0 && focus_spell.base_value[i] != spell.target_type)
return 0;
break;
case SE_LimitCombatSkills:
if(focus_spell.base_value[i] == 1 && !IsDiscipline(spell_id))
return 0;
else if(focus_spell.base_value[i] == 0 && IsDiscipline(spell_id))
return 0;
break;
case SE_LimitSpellGroup:
if(focus_spell.base_value[i] > 0 && focus_spell.base_value[i] != spell.spell_group)
return 0;
else if(focus_spell.base_value[i] < 0 && focus_spell.base_value[i] == spell.spell_group)
return 0;
break;
case SE_LimitCastingSkill:
LimitSpellSkill = true;
if(focus_spell.base_value[i] == spell.skill)
SpellSkill_Found = true;
break;
case SE_LimitClass:
if (!PassLimitClass(focus_spell.base_value[i], GetClass()))
return 0;
break;
case SE_ImprovedDamage:
if (bottype == focusImprovedDamage) {
if(best_focus) {
if (focus_spell.limit_value[i] != 0)
value = focus_spell.limit_value[i];
else
value = focus_spell.base_value[i];
}
else if (focus_spell.limit_value[i] == 0 || focus_spell.base_value[i] == focus_spell.limit_value[i])
value = focus_spell.base_value[i];
else
value = zone->random.Int(focus_spell.base_value[i], focus_spell.limit_value[i]);
}
break;
case SE_ImprovedDamage2:
if (bottype == focusImprovedDamage2) {
if(best_focus) {
if (focus_spell.limit_value[i] != 0)
value = focus_spell.limit_value[i];
else
value = focus_spell.base_value[i];
}
else if (focus_spell.limit_value[i] == 0 || focus_spell.base_value[i] == focus_spell.limit_value[i])
value = focus_spell.base_value[i];
else
value = zone->random.Int(focus_spell.base_value[i], focus_spell.limit_value[i]);
}
break;
case SE_ImprovedHeal:
if (bottype == focusImprovedHeal) {
if(best_focus) {
if (focus_spell.limit_value[i] != 0)
value = focus_spell.limit_value[i];
else
value = focus_spell.base_value[i];
}
else if (focus_spell.limit_value[i] == 0 || focus_spell.base_value[i] == focus_spell.limit_value[i])
value = focus_spell.base_value[i];
else
value = zone->random.Int(focus_spell.base_value[i], focus_spell.limit_value[i]);
}
break;
case SE_ReduceManaCost:
if (bottype == focusManaCost) {
if(best_focus) {
if (focus_spell.limit_value[i] != 0)
value = focus_spell.limit_value[i];
else
value = focus_spell.base_value[i];
}
else if (focus_spell.limit_value[i] == 0 || focus_spell.base_value[i] == focus_spell.limit_value[i])
value = focus_spell.base_value[i];
else
value = zone->random.Int(focus_spell.base_value[i], focus_spell.limit_value[i]);
}
break;
case SE_IncreaseSpellHaste:
if (bottype == focusSpellHaste && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_IncreaseSpellDuration:
if (bottype == focusSpellDuration && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_SpellDurationIncByTic:
if (bottype == focusSpellDurByTic && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_SwarmPetDuration:
if (bottype == focusSwarmPetDuration && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_IncreaseRange:
if (bottype == focusRange && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_ReduceReagentCost:
if (bottype == focusReagentCost && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_PetPowerIncrease:
if (bottype == focusPetPower && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_SpellResistReduction:
if (bottype == focusResistRate && focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
break;
case SE_SpellHateMod:
if (bottype == focusSpellHateMod) {
if(value != 0) {
if(value > 0) {
if(focus_spell.base_value[i] > value)
value = focus_spell.base_value[i];
}
else {
if(focus_spell.base_value[i] < value)
value = focus_spell.base_value[i];
}
} else
value = focus_spell.base_value[i];
}
break;
case SE_ReduceReuseTimer: {
if(bottype == focusReduceRecastTime)
value = (focus_spell.base_value[i] / 1000);
break;
}
case SE_TriggerOnCast: {
if(bottype == focusTriggerOnCast) {
if(zone->random.Int(0, 100) <= focus_spell.base_value[i])
value = focus_spell.limit_value[i];
else
value = 0;
}
break;
}
case SE_FcSpellVulnerability: {
if(bottype == focusSpellVulnerability)
value = focus_spell.base_value[i];
break;
}
case SE_BlockNextSpellFocus: {
if(bottype == focusBlockNextSpell) {
if(zone->random.Int(1, 100) <= focus_spell.base_value[i])
value = 1;
}
break;
}
case SE_FcTwincast: {
if(bottype == focusTwincast)
value = focus_spell.base_value[i];
break;
}
case SE_SympatheticProc: {
if(bottype == focusSympatheticProc) {
float ProcChance = GetSympatheticProcChances(spell_id, focus_spell.base_value[i]);
if(zone->random.Real(0, 1) <= ProcChance)
value = focus_id;
else
value = 0;
}
break;
}
case SE_FcDamageAmt: {
if(bottype == focusFcDamageAmt)
value = focus_spell.base_value[i];
break;
}
case SE_FcDamageAmt2: {
if(bottype == focusFcDamageAmt2)
value = focus_spell.base_value[i];
break;
}
case SE_FcDamageAmtCrit: {
if(bottype == focusFcDamageAmtCrit)
value = focus_spell.base_value[i];
break;
}
case SE_FcHealAmtIncoming:
if(bottype == focusFcHealAmtIncoming)
value = focus_spell.base_value[i];
break;
case SE_FcHealPctCritIncoming:
if (bottype == focusFcHealPctCritIncoming)
value = focus_spell.base_value[i];
break;
case SE_FcHealAmtCrit:
if(bottype == focusFcHealAmtCrit)
value = focus_spell.base_value[i];
break;
case SE_FcHealAmt:
if(bottype == focusFcHealAmt)
value = focus_spell.base_value[i];
break;
case SE_FcHealPctIncoming:
if(bottype == focusFcHealPctIncoming)
value = focus_spell.base_value[i];
break;
case SE_FcBaseEffects: {
if (bottype == focusFcBaseEffects)
value = focus_spell.base_value[i];
break;
}
case SE_FcDamagePctCrit: {
if(bottype == focusFcDamagePctCrit)
value = focus_spell.base_value[i];
break;
}
case SE_FcIncreaseNumHits: {
if(bottype == focusIncreaseNumHits)
value = focus_spell.base_value[i];
break;
}
default:
LogSpells("CalcFocusEffect: unknown effectid [{}]", focus_spell.effect_id[i]);
break;
}
}
//Check for spell skill limits.
if ((LimitSpellSkill) && (!SpellSkill_Found))
return 0;
return(value * lvlModifier / 100);
}
//proc chance includes proc bonus
float Bot::GetProcChances(float ProcBonus, uint16 hand) {
int mydex = GetDEX();
float ProcChance = 0.0f;
uint32 weapon_speed = 0;
switch (hand) {
case EQ::invslot::slotPrimary:
weapon_speed = attack_timer.GetDuration();
break;
case EQ::invslot::slotSecondary:
weapon_speed = attack_dw_timer.GetDuration();
break;
case EQ::invslot::slotRange:
weapon_speed = ranged_timer.GetDuration();
break;
}
if (weapon_speed < RuleI(Combat, MinHastedDelay))
weapon_speed = RuleI(Combat, MinHastedDelay);
if (RuleB(Combat, AdjustProcPerMinute)) {
ProcChance = (static_cast<float>(weapon_speed) * RuleR(Combat, AvgProcsPerMinute) / 60000.0f);
ProcBonus += static_cast<float>(mydex) * RuleR(Combat, ProcPerMinDexContrib);
ProcChance += (ProcChance * ProcBonus / 100.0f);
} else {
ProcChance = (RuleR(Combat, BaseProcChance) + static_cast<float>(mydex) / RuleR(Combat, ProcDexDivideBy));
ProcChance += (ProcChance * ProcBonus / 100.0f);
}
LogCombat("Proc chance [{}] ([{}] from bonuses)", ProcChance, ProcBonus);
return ProcChance;
}
int Bot::GetHandToHandDamage(void) {
if (RuleB(Combat, UseRevampHandToHand)) {
// everyone uses this in the revamp!
int skill = GetSkill(EQ::skills::SkillHandtoHand);
int epic = 0;
if (CastToNPC()->GetEquippedItemFromTextureSlot(EQ::textures::armorHands) == 10652 && GetLevel() > 46)
epic = 280;
if (epic > skill)
skill = epic;
return skill / 15 + 3;
}
static uint8 mnk_dmg[] = {99,
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, // 1-10
6, 6, 6, 6, 7, 7, 7, 7, 7, 8, // 11-20
8, 8, 8, 8, 9, 9, 9, 9, 9, 10, // 21-30
10, 10, 10, 10, 11, 11, 11, 11, 11, 12, // 31-40
12, 12, 12, 12, 13, 13, 13, 13, 13, 14, // 41-50
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, // 51-60
14, 14}; // 61-62
static uint8 bst_dmg[] = {99,
4, 4, 4, 4, 4, 5, 5, 5, 5, 5, // 1-10
5, 6, 6, 6, 6, 6, 6, 7, 7, 7, // 11-20
7, 7, 7, 8, 8, 8, 8, 8, 8, 9, // 21-30
9, 9, 9, 9, 9, 10, 10, 10, 10, 10, // 31-40
10, 11, 11, 11, 11, 11, 11, 12, 12}; // 41-49
if (GetClass() == MONK) {
if (CastToNPC()->GetEquippedItemFromTextureSlot(EQ::textures::armorHands) == 10652 && GetLevel() > 50)
return 9;
if (level > 62)
return 15;
return mnk_dmg[level];
} else if (GetClass() == BEASTLORD) {
if (level > 49)
return 13;
return bst_dmg[level];
}
return 2;
}
bool Bot::TryFinishingBlow(Mob *defender, int &damage)
{
if (!defender)
return false;
if (aabonuses.FinishingBlow[1] && !defender->IsClient() && defender->GetHPRatio() < 10) {
int chance = aabonuses.FinishingBlow[0];
int fb_damage = aabonuses.FinishingBlow[1];
int levelreq = aabonuses.FinishingBlowLvl[0];
if (defender->GetLevel() <= levelreq && (chance >= zone->random.Int(1, 1000))) {
LogCombat("Landed a finishing blow: levelreq at [{}], other level [{}]",
levelreq, defender->GetLevel());
entity_list.MessageCloseString(this, false, 200, Chat::MeleeCrit, FINISHING_BLOW, GetName());
damage = fb_damage;
return true;
} else {
LogCombat("failed a finishing blow: levelreq at [{}], other level [{}]",
levelreq, defender->GetLevel());
return false;
}
}
return false;
}
void Bot::DoRiposte(Mob* defender) {
LogCombat("Preforming a riposte");
if (!defender)
return;
defender->Attack(this, EQ::invslot::slotPrimary, true);
int32 DoubleRipChance = (defender->GetAABonuses().GiveDoubleRiposte[0] + defender->GetSpellBonuses().GiveDoubleRiposte[0] + defender->GetItemBonuses().GiveDoubleRiposte[0]);
if(DoubleRipChance && (DoubleRipChance >= zone->random.Int(0, 100))) {
LogCombat("Preforming a double riposte ([{}] percent chance)", DoubleRipChance);
defender->Attack(this, EQ::invslot::slotPrimary, true);
}
DoubleRipChance = defender->GetAABonuses().GiveDoubleRiposte[1];
if(DoubleRipChance && (DoubleRipChance >= zone->random.Int(0, 100))) {
if (defender->GetClass() == MONK)
defender->MonkSpecialAttack(this, defender->GetAABonuses().GiveDoubleRiposte[2]);
else if (defender->IsBot())
defender->CastToClient()->DoClassAttacks(this,defender->GetAABonuses().GiveDoubleRiposte[2], true);
}
}
int Bot::GetBaseSkillDamage(EQ::skills::SkillType skill, Mob *target)
{
int base = EQ::skills::GetBaseDamage(skill);
auto skill_level = GetSkill(skill);
switch (skill) {
case EQ::skills::SkillDragonPunch:
case EQ::skills::SkillEagleStrike:
case EQ::skills::SkillTigerClaw:
if (skill_level >= 25)
base++;
if (skill_level >= 75)
base++;
if (skill_level >= 125)
base++;
if (skill_level >= 175)
base++;
return base;
case EQ::skills::SkillFrenzy:
if (GetBotItem(EQ::invslot::slotPrimary)) {
if (GetLevel() > 15)
base += GetLevel() - 15;
if (base > 23)
base = 23;
if (GetLevel() > 50)
base += 2;
if (GetLevel() > 54)
base++;
if (GetLevel() > 59)
base++;
}
return base;
case EQ::skills::SkillFlyingKick: {
float skill_bonus = skill_level / 9.0f;
float ac_bonus = 0.0f;
auto inst = GetBotItem(EQ::invslot::slotFeet);
if (inst)
ac_bonus = inst->GetItemArmorClass(true) / 25.0f;
if (ac_bonus > skill_bonus)
ac_bonus = skill_bonus;
return static_cast<int>(ac_bonus + skill_bonus);
}
case EQ::skills::SkillKick: {
float skill_bonus = skill_level / 10.0f;
float ac_bonus = 0.0f;
auto inst = GetBotItem(EQ::invslot::slotFeet);
if (inst)
ac_bonus = inst->GetItemArmorClass(true) / 25.0f;
if (ac_bonus > skill_bonus)
ac_bonus = skill_bonus;
return static_cast<int>(ac_bonus + skill_bonus);
}
case EQ::skills::SkillBash: {
float skill_bonus = skill_level / 10.0f;
float ac_bonus = 0.0f;
const EQ::ItemInstance *inst = nullptr;
if (HasShieldEquiped())
inst = GetBotItem(EQ::invslot::slotSecondary);
else if (HasTwoHanderEquipped())
inst = GetBotItem(EQ::invslot::slotPrimary);
if (inst)
ac_bonus = inst->GetItemArmorClass(true) / 25.0f;
if (ac_bonus > skill_bonus)
ac_bonus = skill_bonus;
return static_cast<int>(ac_bonus + skill_bonus);
}
case EQ::skills::SkillBackstab: {
float skill_bonus = static_cast<float>(skill_level) * 0.02f;
auto inst = GetBotItem(EQ::invslot::slotPrimary);
if (inst && inst->GetItem() && inst->GetItem()->ItemType == EQ::item::ItemType1HPiercing) {
base = inst->GetItemBackstabDamage(true);
if (!inst->GetItemBackstabDamage())
base += inst->GetItemWeaponDamage(true);
if (target) {
if (inst->GetItemElementalFlag(true) && inst->GetItemElementalDamage(true))
base += target->ResistElementalWeaponDmg(inst);
if (inst->GetItemBaneDamageBody(true) || inst->GetItemBaneDamageRace(true))
base += target->CheckBaneDamage(inst);
}
}
return static_cast<int>(static_cast<float>(base) * (skill_bonus + 2.0f));
}
default:
return 0;
}
}
void Bot::DoSpecialAttackDamage(Mob *who, EQ::skills::SkillType skill, int32 max_damage, int32 min_damage, int32 hate_override, int ReuseTime, bool HitChance) {
int32 hate = max_damage;
if(hate_override > -1)
hate = hate_override;
if (skill == EQ::skills::SkillBash) {
const EQ::ItemInstance* inst = GetBotItem(EQ::invslot::slotSecondary);
const EQ::ItemData* botweapon = nullptr;
if(inst)
botweapon = inst->GetItem();
if(botweapon) {
if (botweapon->ItemType == EQ::item::ItemTypeShield)
hate += botweapon->AC;
hate = (hate * (100 + GetFuriousBash(botweapon->Focus.Effect)) / 100);
}
}
DamageHitInfo my_hit;
my_hit.base_damage = max_damage;
my_hit.min_damage = min_damage;
my_hit.damage_done = 1;
my_hit.skill = skill;
my_hit.offense = offense(my_hit.skill);
my_hit.tohit = GetTotalToHit(my_hit.skill, 0);
my_hit.hand = EQ::invslot::slotPrimary;
if (skill == EQ::skills::SkillThrowing || skill == EQ::skills::SkillArchery)
my_hit.hand = EQ::invslot::slotRange;
DoAttack(who, my_hit);
who->AddToHateList(this, hate);
who->Damage(this, my_hit.damage_done, SPELL_UNKNOWN, skill, false);
if(!GetTarget() || HasDied())
return;
if (my_hit.damage_done > 0)
CheckNumHitsRemaining(NumHit::OutgoingHitSuccess);
//[AA Dragon Punch] value[0] = 100 for 25%, chance value[1] = skill
//if(aabonuses.SpecialAttackKBProc[0] && aabonuses.SpecialAttackKBProc[1] == skill){
// int kb_chance = 25;
// kb_chance += (kb_chance * (100 - aabonuses.SpecialAttackKBProc[0]) / 100);
// if (zone->random.Int(0, 99) < kb_chance)
// SpellFinished(904, who, 10, 0, -1, spells[904].ResistDiff);
// //who->Stun(100); Kayen: This effect does not stun on live, it only moves the NPC.
//}
if (HasSkillProcs())
TrySkillProc(who, skill, (ReuseTime * 1000));
if (my_hit.damage_done > 0 && HasSkillProcSuccess())
TrySkillProc(who, skill, (ReuseTime * 1000), true);
}
void Bot::TryBackstab(Mob *other, int ReuseTime) {
if(!other)
return;
bool bIsBehind = false;
bool bCanFrontalBS = false;
const EQ::ItemInstance* inst = GetBotItem(EQ::invslot::slotPrimary);
const EQ::ItemData* botpiercer = nullptr;
if(inst)
botpiercer = inst->GetItem();
if (!botpiercer || (botpiercer->ItemType != EQ::item::ItemType1HPiercing)) {
BotGroupSay(this, "I can't backstab with this weapon!");
return;
}
int tripleChance = (itembonuses.TripleBackstab + spellbonuses.TripleBackstab + aabonuses.TripleBackstab);
if (BehindMob(other, GetX(), GetY()))
bIsBehind = true;
else {
int FrontalBSChance = (itembonuses.FrontalBackstabChance + spellbonuses.FrontalBackstabChance + aabonuses.FrontalBackstabChance);
if (FrontalBSChance && (FrontalBSChance > zone->random.Int(0, 100)))
bCanFrontalBS = true;
}
if (bIsBehind || bCanFrontalBS) {
int chance = (10 + (GetDEX() / 10) + (itembonuses.HeroicDEX / 10));
if(level >= 60 && other->GetLevel() <= 45 && !other->CastToNPC()->IsEngaged() && other->GetHP()<= 32000 && other->IsNPC() && zone->random.Real(0, 99) < chance) {
entity_list.MessageCloseString(this, false, 200, Chat::MeleeCrit, ASSASSINATES, GetName());
RogueAssassinate(other);
} else {
RogueBackstab(other);
if (level > 54) {
float DoubleAttackProbability = ((GetSkill(EQ::skills::SkillDoubleAttack) + GetLevel()) / 500.0f);
if(zone->random.Real(0, 1) < DoubleAttackProbability) {
if(other->GetHP() > 0)
RogueBackstab(other,false,ReuseTime);
if (tripleChance && other->GetHP() > 0 && tripleChance > zone->random.Int(0, 100))
RogueBackstab(other,false,ReuseTime);
}
}
}
} else if(aabonuses.FrontalBackstabMinDmg || itembonuses.FrontalBackstabMinDmg || spellbonuses.FrontalBackstabMinDmg) {
m_specialattacks = eSpecialAttacks::ChaoticStab;
RogueBackstab(other, true);
m_specialattacks = eSpecialAttacks::None;
}
else
Attack(other, EQ::invslot::slotPrimary);
}
void Bot::RogueBackstab(Mob *other, bool min_damage, int ReuseTime)
{
if (!other)
return;
EQ::ItemInstance *botweaponInst = GetBotItem(EQ::invslot::slotPrimary);
if (botweaponInst) {
if (!GetWeaponDamage(other, botweaponInst))
return;
} else if (!GetWeaponDamage(other, (const EQ::ItemData *)nullptr)) {
return;
}
uint32 hate = 0;
int base_damage = GetBaseSkillDamage(EQ::skills::SkillBackstab, other);
hate = base_damage;
DoSpecialAttackDamage(other, EQ::skills::SkillBackstab, base_damage, 0, hate, ReuseTime);
DoAnim(anim1HPiercing);
}
void Bot::RogueAssassinate(Mob* other) {
EQ::ItemInstance* botweaponInst = GetBotItem(EQ::invslot::slotPrimary);
if(botweaponInst) {
if(GetWeaponDamage(other, botweaponInst))
other->Damage(this, 32000, SPELL_UNKNOWN, EQ::skills::SkillBackstab);
else
other->Damage(this, -5, SPELL_UNKNOWN, EQ::skills::SkillBackstab);
}
DoAnim(anim1HPiercing);
}
void Bot::DoClassAttacks(Mob *target, bool IsRiposte) {
if(!target || spellend_timer.Enabled() || IsFeared() || IsStunned() || IsMezzed() || DivineAura() || GetHP() < 0 || !IsAttackAllowed(target))
return;
bool taunt_time = taunt_timer.Check();
bool ca_time = classattack_timer.Check(false);
bool ma_time = monkattack_timer.Check(false);
bool ka_time = knightattack_timer.Check(false);
if (taunt_time) {
// Bots without this skill shouldn't be 'checking' on this timer..let's just disable it and avoid the extra IsAttackAllowed() checks
// Note: this is done here instead of NPC::ctor() because taunt skill can be acquired during level ups (the timer is re-enabled in CalcBotStats())
if (!GetSkill(EQ::skills::SkillTaunt)) {
taunt_timer.Disable();
return;
}
if (!IsAttackAllowed(target)) {
return;
}
}
if ((ca_time || ma_time || ka_time) && !IsAttackAllowed(target)) {
return;
}
if(ka_time){
switch(GetClass()){
case SHADOWKNIGHT: {
CastSpell(SPELL_NPC_HARM_TOUCH, target->GetID());
knightattack_timer.Start(HarmTouchReuseTime * 1000);
break;
}
case PALADIN: {
if(GetHPRatio() < 20) {
CastSpell(SPELL_LAY_ON_HANDS, GetID());
knightattack_timer.Start(LayOnHandsReuseTime * 1000);
}
else {
knightattack_timer.Start(2000);
}
break;
}
default: {
break;
}
}
}
if(taunting && target && target->IsNPC() && taunt_time) {
if(GetTarget() && GetTarget()->GetHateTop() && GetTarget()->GetHateTop() != this) {
BotGroupSay(this, "Taunting %s", target->GetCleanName());
Taunt(target->CastToNPC(), false);
taunt_timer.Start(TauntReuseTime * 1000);
}
}
if (ma_time) {
switch (GetClass()) {
case MONK: {
int reuse = (MonkSpecialAttack(target, EQ::skills::SkillTigerClaw) - 1);
// Live AA - Technique of Master Wu
int wuchance = itembonuses.DoubleSpecialAttack + spellbonuses.DoubleSpecialAttack + aabonuses.DoubleSpecialAttack;
if (wuchance) {
const int MonkSPA[5] = {
EQ::skills::SkillFlyingKick,
EQ::skills::SkillDragonPunch,
EQ::skills::SkillEagleStrike,
EQ::skills::SkillTigerClaw,
EQ::skills::SkillRoundKick
};
int extra = 0;
// always 1/4 of the double attack chance, 25% at rank 5 (100/4)
while (wuchance > 0) {
if (zone->random.Roll(wuchance)) {
++extra;
}
else {
break;
}
wuchance /= 4;
}
Mob* bo = GetBotOwner();
if (bo && bo->IsClient() && bo->CastToClient()->GetBotOption(Client::booMonkWuMessage)) {
bo->Message(
GENERIC_EMOTE,
"The spirit of Master Wu fills %s! %s gains %d additional attack(s).",
GetCleanName(),
GetCleanName(),
extra
);
}
auto classic = RuleB(Combat, ClassicMasterWu);
while (extra) {
MonkSpecialAttack(GetTarget(), (classic ? MonkSPA[zone->random.Int(0, 4)] : EQ::skills::SkillTigerClaw));
--extra;
}
}
float HasteModifier = (GetHaste() * 0.01f);
monkattack_timer.Start((reuse * 1000) / HasteModifier);
break;
}
default:
break;;
}
}
if (!ca_time) {
return;
}
float HasteModifier = (GetHaste() * 0.01f);
uint16 skill_to_use = -1;
int level = GetLevel();
int reuse = (TauntReuseTime * 1000);
bool did_attack = false;
switch(GetClass()) {
case WARRIOR:
if(level >= RuleI(Combat, NPCBashKickLevel)){
bool canBash = false;
if ((GetRace() == OGRE || GetRace() == TROLL || GetRace() == BARBARIAN) || (m_inv.GetItem(EQ::invslot::slotSecondary) && m_inv.GetItem(EQ::invslot::slotSecondary)->GetItem()->ItemType == EQ::item::ItemTypeShield) || (m_inv.GetItem(EQ::invslot::slotPrimary) && m_inv.GetItem(EQ::invslot::slotPrimary)->GetItem()->IsType2HWeapon() && GetAA(aa2HandBash) >= 1))
canBash = true;
if(!canBash || zone->random.Int(0, 100) > 25)
skill_to_use = EQ::skills::SkillKick;
else
skill_to_use = EQ::skills::SkillBash;
}
case RANGER:
case BEASTLORD:
skill_to_use = EQ::skills::SkillKick;
break;
case BERSERKER:
skill_to_use = EQ::skills::SkillFrenzy;
break;
case CLERIC:
case SHADOWKNIGHT:
case PALADIN:
if(level >= RuleI(Combat, NPCBashKickLevel)){
if ((GetRace() == OGRE || GetRace() == TROLL || GetRace() == BARBARIAN) || (m_inv.GetItem(EQ::invslot::slotSecondary) && m_inv.GetItem(EQ::invslot::slotSecondary)->GetItem()->ItemType == EQ::item::ItemTypeShield) || (m_inv.GetItem(EQ::invslot::slotPrimary) && m_inv.GetItem(EQ::invslot::slotPrimary)->GetItem()->IsType2HWeapon() && GetAA(aa2HandBash) >= 1))
skill_to_use = EQ::skills::SkillBash;
}
break;
case MONK:
if (GetLevel() >= 30) {
skill_to_use = EQ::skills::SkillFlyingKick;
}
else if (GetLevel() >= 25) {
skill_to_use = EQ::skills::SkillDragonPunch;
}
else if (GetLevel() >= 20) {
skill_to_use = EQ::skills::SkillEagleStrike;
}
else if (GetLevel() >= 5) {
skill_to_use = EQ::skills::SkillRoundKick;
}
else {
skill_to_use = EQ::skills::SkillKick;
}
break;
case ROGUE:
skill_to_use = EQ::skills::SkillBackstab;
break;
}
if(skill_to_use == -1)
return;
int dmg = GetBaseSkillDamage(static_cast<EQ::skills::SkillType>(skill_to_use), GetTarget());
if (skill_to_use == EQ::skills::SkillBash) {
if (target != this) {
DoAnim(animTailRake);
if (GetWeaponDamage(target, GetBotItem(EQ::invslot::slotSecondary)) <= 0 && GetWeaponDamage(target, GetBotItem(EQ::invslot::slotShoulders)) <= 0)
dmg = DMG_INVULNERABLE;
reuse = (BashReuseTime * 1000);
DoSpecialAttackDamage(target, EQ::skills::SkillBash, dmg, 0, -1, reuse);
did_attack = true;
}
}
if (skill_to_use == EQ::skills::SkillFrenzy) {
int AtkRounds = 3;
DoAnim(anim2HSlashing);
reuse = (FrenzyReuseTime * 1000);
did_attack = true;
while(AtkRounds > 0) {
if (GetTarget() && (AtkRounds == 1 || zone->random.Int(0, 100) < 75)) {
DoSpecialAttackDamage(GetTarget(), EQ::skills::SkillFrenzy, dmg, 0, dmg, reuse, true);
}
AtkRounds--;
}
}
if (skill_to_use == EQ::skills::SkillKick) {
if(target != this) {
DoAnim(animKick);
if (GetWeaponDamage(target, GetBotItem(EQ::invslot::slotFeet)) <= 0)
dmg = DMG_INVULNERABLE;
reuse = (KickReuseTime * 1000);
DoSpecialAttackDamage(target, EQ::skills::SkillKick, dmg, 0, -1, reuse);
did_attack = true;
}
}
if (
skill_to_use == EQ::skills::SkillFlyingKick ||
skill_to_use == EQ::skills::SkillDragonPunch ||
skill_to_use == EQ::skills::SkillEagleStrike ||
skill_to_use == EQ::skills::SkillRoundKick
) {
reuse = (MonkSpecialAttack(target, skill_to_use) - 1);
// Live AA - Technique of Master Wu
int wuchance = itembonuses.DoubleSpecialAttack + spellbonuses.DoubleSpecialAttack + aabonuses.DoubleSpecialAttack;
if (wuchance) {
const int MonkSPA[5] = {
EQ::skills::SkillFlyingKick,
EQ::skills::SkillDragonPunch,
EQ::skills::SkillEagleStrike,
EQ::skills::SkillTigerClaw,
EQ::skills::SkillRoundKick
};
int extra = 0;
// always 1/4 of the double attack chance, 25% at rank 5 (100/4)
while (wuchance > 0) {
if (zone->random.Roll(wuchance)) {
++extra;
}
else {
break;
}
wuchance /= 4;
}
Mob* bo = GetBotOwner();
if (bo && bo->IsClient() && bo->CastToClient()->GetBotOption(Client::booMonkWuMessage)) {
bo->Message(
GENERIC_EMOTE,
"The spirit of Master Wu fills %s! %s gains %d additional attack(s).",
GetCleanName(),
GetCleanName(),
extra
);
}
auto classic = RuleB(Combat, ClassicMasterWu);
while (extra) {
MonkSpecialAttack(GetTarget(), (classic ? MonkSPA[zone->random.Int(0, 4)] : skill_to_use));
--extra;
}
}
reuse *= 1000;
did_attack = true;
}
if (skill_to_use == EQ::skills::SkillBackstab) {
reuse = (BackstabReuseTime * 1000);
did_attack = true;
if (IsRiposte)
reuse = 0;
TryBackstab(target,reuse);
}
classattack_timer.Start(reuse / HasteModifier);
}
int32 Bot::CheckAggroAmount(uint16 spellid) {
int32 AggroAmount = Mob::CheckAggroAmount(spellid, nullptr);
int32 focusAggro = GetBotFocusEffect(focusSpellHateMod, spellid);
AggroAmount = (AggroAmount * (100 + focusAggro) / 100);
return AggroAmount;
}
int32 Bot::CheckHealAggroAmount(uint16 spellid, Mob *target, uint32 heal_possible) {
int32 AggroAmount = Mob::CheckHealAggroAmount(spellid, target, heal_possible);
int32 focusAggro = GetBotFocusEffect(focusSpellHateMod, spellid);
AggroAmount = (AggroAmount * (100 + focusAggro) / 100);
return AggroAmount;
}
void Bot::MakePet(uint16 spell_id, const char* pettype, const char *petname) {
Mob::MakePet(spell_id, pettype, petname);
}
void Bot::AI_Stop() {
NPC::AI_Stop();
Mob::AI_Stop();
}
FACTION_VALUE Bot::GetReverseFactionCon(Mob* iOther) {
if(iOther->IsBot())
return FACTION_ALLY;
return NPC::GetReverseFactionCon(iOther);
}
Mob* Bot::GetOwnerOrSelf() {
Mob* Result = nullptr;
if(this->GetBotOwner())
Result = GetBotOwner();
else
Result = this;
return Result;
}
Mob* Bot::GetOwner() {
Mob* Result = nullptr;
Result = GetBotOwner();
if(!Result)
this->SetBotOwner(0);
return Result;
}
bool Bot::IsBotAttackAllowed(Mob* attacker, Mob* target, bool& hasRuleDefined) {
bool Result = false;
if(attacker && target) {
if(attacker == target) {
hasRuleDefined = true;
Result = false;
} else if(attacker->IsClient() && target->IsBot() && attacker->CastToClient()->GetPVP() && target->CastToBot()->GetBotOwner()->CastToClient()->GetPVP()) {
hasRuleDefined = true;
Result = true;
} else if(attacker->IsClient() && target->IsBot()) {
hasRuleDefined = true;
Result = false;
} else if(attacker->IsBot() && target->IsNPC()) {
hasRuleDefined = true;
Result = true;
} else if(attacker->IsBot() && !target->IsNPC()) {
hasRuleDefined = true;
Result = false;
} else if(attacker->IsPet() && attacker->IsFamiliar()) {
hasRuleDefined = true;
Result = false;
} else if(attacker->IsBot() && attacker->CastToBot()->GetBotOwner() && attacker->CastToBot()->GetBotOwner()->CastToClient()->GetPVP()) {
if(target->IsBot() && target->GetOwner() && target->GetOwner()->CastToClient()->GetPVP()) {
hasRuleDefined = true;
if(target->GetOwner() == attacker->GetOwner())
Result = false;
else
Result = true;
} else if(target->IsClient() && target->CastToClient()->GetPVP()) {
hasRuleDefined = true;
if(target == attacker->GetOwner())
Result = false;
else
Result = true;
} else if(target->IsNPC()) {
hasRuleDefined = true;
Result = true;
} else if(!target->IsNPC()) {
hasRuleDefined = true;
Result = false;
}
}
}
return Result;
}
void Bot::EquipBot(std::string* errorMessage) {
GetBotItems(m_inv, errorMessage);
const EQ::ItemInstance* inst = nullptr;
const EQ::ItemData* item = nullptr;
for (int i = EQ::invslot::EQUIPMENT_BEGIN; i <= EQ::invslot::EQUIPMENT_END; ++i) {
inst = GetBotItem(i);
if(inst) {
item = inst->GetItem();
BotTradeAddItem(inst->GetID(), inst, inst->GetCharges(), item->Slots, i, errorMessage, false);
if(!errorMessage->empty())
return;
}
}
UpdateEquipmentLight();
}
void Bot::BotOrderCampAll(Client* c) {
if(c) {
std::list<Bot*> BotList = entity_list.GetBotsByBotOwnerCharacterID(c->CharacterID());
for(std::list<Bot*>::iterator botListItr = BotList.begin(); botListItr != BotList.end(); ++botListItr)
(*botListItr)->Camp();
}
}
void Bot::ProcessBotOwnerRefDelete(Mob* botOwner) {
if(botOwner) {
if(botOwner->IsClient()) {
std::list<Bot*> BotList = entity_list.GetBotsByBotOwnerCharacterID(botOwner->CastToClient()->CharacterID());
if(!BotList.empty()) {
for(std::list<Bot*>::iterator botListItr = BotList.begin(); botListItr != BotList.end(); ++botListItr) {
Bot* tempBot = *botListItr;
if(tempBot) {
tempBot->SetTarget(0);
tempBot->SetBotOwner(0);
}
}
}
}
}
}
void Bot::ProcessGuildInvite(Client* guildOfficer, Bot* botToGuild) {
if(guildOfficer && botToGuild) {
if(!botToGuild->IsInAGuild()) {
if (!guild_mgr.CheckPermission(guildOfficer->GuildID(), guildOfficer->GuildRank(), GUILD_INVITE)) {
guildOfficer->Message(Chat::Red, "You dont have permission to invite.");
return;
}
if (!database.botdb.SaveGuildMembership(botToGuild->GetBotID(), guildOfficer->GuildID(), GUILD_MEMBER)) {
guildOfficer->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::SaveGuildMembership(), botToGuild->GetCleanName());
return;
}
ServerPacket* pack = new ServerPacket(ServerOP_GuildCharRefresh, sizeof(ServerGuildCharRefresh_Struct));
ServerGuildCharRefresh_Struct *s = (ServerGuildCharRefresh_Struct *) pack->pBuffer;
s->guild_id = guildOfficer->GuildID();
s->old_guild_id = GUILD_NONE;
s->char_id = botToGuild->GetBotID();
worldserver.SendPacket(pack);
safe_delete(pack);
} else {
guildOfficer->Message(Chat::Red, "Bot is in a guild.");
return;
}
}
}
bool Bot::ProcessGuildRemoval(Client* guildOfficer, std::string botName) {
bool Result = false;
if(guildOfficer && !botName.empty()) {
Bot* botToUnGuild = entity_list.GetBotByBotName(botName);
if(botToUnGuild) {
if (database.botdb.SaveGuildMembership(botToUnGuild->GetBotID(), 0, 0))
Result = true;
} else {
uint32 ownerId = 0;
if (!database.botdb.LoadOwnerID(botName, ownerId))
guildOfficer->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::LoadOwnerID(), botName.c_str());
uint32 botId = 0;
if (!database.botdb.LoadBotID(ownerId, botName, botId))
guildOfficer->Message(Chat::Red, "%s for '%s'", BotDatabase::fail::LoadBotID(), botName.c_str());
if (botId && database.botdb.SaveGuildMembership(botId, 0, 0))
Result = true;
}
if(Result) {
EQApplicationPacket* outapp = new EQApplicationPacket(OP_GuildManageRemove, sizeof(GuildManageRemove_Struct));
GuildManageRemove_Struct* gm = (GuildManageRemove_Struct*) outapp->pBuffer;
gm->guildeqid = guildOfficer->GuildID();
strcpy(gm->member, botName.c_str());
guildOfficer->Message(Chat::White, "%s successfully removed from your guild.", botName.c_str());
entity_list.QueueClientsGuild(guildOfficer, outapp, false, gm->guildeqid);
safe_delete(outapp);
}
}
return Result;
}
int32 Bot::CalcMaxMana() {
switch(GetCasterClass()) {
case 'I':
case 'W': {
max_mana = (GenerateBaseManaPoints() + itembonuses.Mana + spellbonuses.Mana + GroupLeadershipAAManaEnhancement());
break;
}
case 'N': {
max_mana = 0;
break;
}
default: {
LogDebug("Invalid Class [{}] in CalcMaxMana", GetCasterClass());
max_mana = 0;
break;
}
}
if(current_mana > max_mana)
current_mana = max_mana;
else if(max_mana < 0)
max_mana = 0;
return max_mana;
}
void Bot::SetAttackTimer() {
float haste_mod = (GetHaste() * 0.01f);
attack_timer.SetAtTrigger(4000, true);
Timer* TimerToUse = nullptr;
const EQ::ItemData* PrimaryWeapon = nullptr;
for (int i = EQ::invslot::slotRange; i <= EQ::invslot::slotSecondary; i++) {
if (i == EQ::invslot::slotPrimary)
TimerToUse = &attack_timer;
else if (i == EQ::invslot::slotRange)
TimerToUse = &ranged_timer;
else if (i == EQ::invslot::slotSecondary)
TimerToUse = &attack_dw_timer;
else
continue;
const EQ::ItemData* ItemToUse = nullptr;
EQ::ItemInstance* ci = GetBotItem(i);
if (ci)
ItemToUse = ci->GetItem();
if (i == EQ::invslot::slotSecondary) {
if (PrimaryWeapon != nullptr) {
if (PrimaryWeapon->IsClassCommon() && PrimaryWeapon->IsType2HWeapon()) {
attack_dw_timer.Disable();
continue;
}
}
if (!GetSkill(EQ::skills::SkillDualWield)) {
attack_dw_timer.Disable();
continue;
}
}
if (ItemToUse != nullptr) {
if (!ItemToUse->IsClassCommon() || ItemToUse->Damage == 0 || ItemToUse->Delay == 0 || ((ItemToUse->ItemType > EQ::item::ItemTypeLargeThrowing) && (ItemToUse->ItemType != EQ::item::ItemTypeMartial) && (ItemToUse->ItemType != EQ::item::ItemType2HPiercing)))
ItemToUse = nullptr;
}
int hhe = (itembonuses.HundredHands + spellbonuses.HundredHands);
int speed = 0;
int delay = 36;
if (ItemToUse == nullptr) {
delay = GetHandToHandDelay();
} else {
delay = ItemToUse->Delay;
}
speed = (RuleB(Spells, Jun182014HundredHandsRevamp) ? static_cast<int>(((delay / haste_mod) + ((hhe / 1000.0f) * (delay / haste_mod))) * 100) : static_cast<int>(((delay / haste_mod) + ((hhe / 100.0f) * delay)) * 100));
TimerToUse->SetAtTrigger(std::max(RuleI(Combat, MinHastedDelay), speed), true, true);
if (i == EQ::invslot::slotPrimary)
PrimaryWeapon = ItemToUse;
}
}
int32 Bot::GetActSpellDamage(uint16 spell_id, int32 value, Mob* target) {
if (spells[spell_id].target_type == ST_Self)
return value;
bool Critical = false;
int32 value_BaseEffect = 0;
value_BaseEffect = (value + (value*GetBotFocusEffect(focusFcBaseEffects, spell_id) / 100));
// Need to scale HT damage differently after level 40! It no longer scales by the constant value in the spell file. It scales differently, instead of 10 more damage per level, it does 30 more damage per level. So we multiply the level minus 40 times 20 if they are over level 40.
if ( (spell_id == SPELL_HARM_TOUCH || spell_id == SPELL_HARM_TOUCH2 || spell_id == SPELL_IMP_HARM_TOUCH ) && GetLevel() > 40)
value -= ((GetLevel() - 40) * 20);
//This adds the extra damage from the AA Unholy Touch, 450 per level to the AA Improved Harm TOuch.
if (spell_id == SPELL_IMP_HARM_TOUCH) //Improved Harm Touch
value -= (GetAA(aaUnholyTouch) * 450); //Unholy Touch
int chance = RuleI(Spells, BaseCritChance);
chance += (itembonuses.CriticalSpellChance + spellbonuses.CriticalSpellChance + aabonuses.CriticalSpellChance);
if (chance > 0) {
int32 ratio = RuleI(Spells, BaseCritRatio);
if (spell_id == SPELL_IMP_HARM_TOUCH && (GetAA(aaSpellCastingFury) > 0) && (GetAA(aaUnholyTouch) > 0))
chance = 100;
if (zone->random.Int(1, 100) <= chance){
Critical = true;
ratio += (itembonuses.SpellCritDmgIncrease + spellbonuses.SpellCritDmgIncrease + aabonuses.SpellCritDmgIncrease);
ratio += (itembonuses.SpellCritDmgIncNoStack + spellbonuses.SpellCritDmgIncNoStack + aabonuses.SpellCritDmgIncNoStack);
} else if (GetClass() == WIZARD && (GetLevel() >= RuleI(Spells, WizCritLevel)) && (zone->random.Int(1, 100) <= RuleI(Spells, WizCritChance))) {
ratio = zone->random.Int(1, 100);
Critical = true;
}
ratio += RuleI(Spells, WizCritRatio);
if (Critical) {
value = (value_BaseEffect * ratio / 100);
value += (value_BaseEffect * GetBotFocusEffect(focusImprovedDamage, spell_id) / 100);
value += (value_BaseEffect * GetBotFocusEffect(focusImprovedDamage2, spell_id) / 100);
value += (int(value_BaseEffect * GetBotFocusEffect(focusFcDamagePctCrit, spell_id) / 100) * ratio / 100);
if (target) {
value += (int(value_BaseEffect * target->GetVulnerability(this, spell_id, 0) / 100) * ratio / 100);
value -= target->GetFcDamageAmtIncoming(this, spell_id);
}
value -= (GetBotFocusEffect(focusFcDamageAmtCrit, spell_id) * ratio / 100);
value -= GetBotFocusEffect(focusFcDamageAmt, spell_id);
value -= GetBotFocusEffect(focusFcDamageAmt2, spell_id);
if(itembonuses.SpellDmg && spells[spell_id].classes[(GetClass() % 17) - 1] >= GetLevel() - 5)
value += (GetExtraSpellAmt(spell_id, itembonuses.SpellDmg, value) * ratio / 100);
entity_list.MessageClose(this, false, 100, Chat::SpellCrit, "%s delivers a critical blast! (%d)", GetName(), -value);
return value;
}
}
value = value_BaseEffect;
value += (value_BaseEffect * GetBotFocusEffect(focusImprovedDamage, spell_id) / 100);
value += (value_BaseEffect * GetBotFocusEffect(focusImprovedDamage2, spell_id) / 100);
value += (value_BaseEffect * GetBotFocusEffect(focusFcDamagePctCrit, spell_id) / 100);
if (target) {
value += (value_BaseEffect * target->GetVulnerability(this, spell_id, 0) / 100);
value -= target->GetFcDamageAmtIncoming(this, spell_id);
}
value -= GetBotFocusEffect(focusFcDamageAmtCrit, spell_id);
value -= GetBotFocusEffect(focusFcDamageAmt, spell_id);
value -= GetBotFocusEffect(focusFcDamageAmt2, spell_id);
if(itembonuses.SpellDmg && spells[spell_id].classes[(GetClass() % 17) - 1] >= GetLevel() - 5)
value += GetExtraSpellAmt(spell_id, itembonuses.SpellDmg, value);
return value;
}
int32 Bot::GetActSpellHealing(uint16 spell_id, int32 value, Mob* target) {
if (target == nullptr)
target = this;
int32 value_BaseEffect = 0;
int32 chance = 0;
int8 modifier = 1;
bool Critical = false;
value_BaseEffect = (value + (value*GetBotFocusEffect(focusFcBaseEffects, spell_id) / 100));
value = value_BaseEffect;
value += int(value_BaseEffect*GetBotFocusEffect(focusImprovedHeal, spell_id) / 100);
if(spells[spell_id].buff_duration < 1) {
chance += (itembonuses.CriticalHealChance + spellbonuses.CriticalHealChance + aabonuses.CriticalHealChance);
chance += target->GetFocusIncoming(focusFcHealPctCritIncoming, SE_FcHealPctCritIncoming, this, spell_id);
if (spellbonuses.CriticalHealDecay)
chance += GetDecayEffectValue(spell_id, SE_CriticalHealDecay);
if(chance && (zone->random.Int(0, 99) < chance)) {
Critical = true;
modifier = 2;
}
value *= modifier;
value += (GetBotFocusEffect(focusFcHealAmtCrit, spell_id) * modifier);
value += GetBotFocusEffect(focusFcHealAmt, spell_id);
value += target->GetFocusIncoming(focusFcHealAmtIncoming, SE_FcHealAmtIncoming, this, spell_id);
if(itembonuses.HealAmt && spells[spell_id].classes[(GetClass() % 17) - 1] >= GetLevel() - 5)
value += (GetExtraSpellAmt(spell_id, itembonuses.HealAmt, value) * modifier);
value += (value * target->GetHealRate() / 100);
if (Critical)
entity_list.MessageClose(this, false, 100, Chat::SpellCrit, "%s performs an exceptional heal! (%d)", GetName(), value);
return value;
} else {
chance = (itembonuses.CriticalHealOverTime + spellbonuses.CriticalHealOverTime + aabonuses.CriticalHealOverTime);
chance += target->GetFocusIncoming(focusFcHealPctCritIncoming, SE_FcHealPctCritIncoming, this, spell_id);
if (spellbonuses.CriticalRegenDecay)
chance += GetDecayEffectValue(spell_id, SE_CriticalRegenDecay);
if(chance && (zone->random.Int(0,99) < chance))
return (value * 2);
}
return value;
}
int32 Bot::GetActSpellCasttime(uint16 spell_id, int32 casttime) {
int32 cast_reducer = GetBotFocusEffect(focusSpellHaste, spell_id);
auto min_cap = casttime / 2;
uint8 botlevel = GetLevel();
uint8 botclass = GetClass();
if (botlevel >= 51 && casttime >= 3000 && !spells[spell_id].good_effect &&
(botclass == SHADOWKNIGHT || botclass == RANGER || botclass == PALADIN || botclass == BEASTLORD)) {
int level_mod = std::min(15, botlevel - 50);
cast_reducer += level_mod * 3;
}
if((casttime >= 4000) && BeneficialSpell(spell_id) && IsBuffSpell(spell_id)) {
switch (GetAA(aaSpellCastingDeftness)) {
case 1:
cast_reducer += 5;
break;
case 2:
cast_reducer += 10;
break;
case 3:
cast_reducer += 25;
break;
}
switch (GetAA(aaQuickBuff)) {
case 1:
cast_reducer += 10;
break;
case 2:
cast_reducer += 25;
break;
case 3:
cast_reducer += 50;
break;
}
}
if(IsSummonSpell(spell_id)) {
switch (GetAA(aaQuickSummoning)) {
case 1:
cast_reducer += 10;
break;
case 2:
cast_reducer += 25;
break;
case 3:
cast_reducer += 50;
break;
}
}
if(IsEvacSpell(spell_id)) {
switch (GetAA(aaQuickEvacuation)) {
case 1:
cast_reducer += 10;
break;
case 2:
cast_reducer += 25;
break;
case 3:
cast_reducer += 50;
break;
}
}
if(IsDamageSpell(spell_id) && spells[spell_id].cast_time >= 4000) {
switch (GetAA(aaQuickDamage)) {
case 1:
cast_reducer += 2;
break;
case 2:
cast_reducer += 5;
break;
case 3:
cast_reducer += 10;
break;
}
}
casttime = casttime * (100 - cast_reducer) / 100;
return std::max(casttime, min_cap);
}
int32 Bot::GetActSpellCost(uint16 spell_id, int32 cost) {
if(this->itembonuses.Clairvoyance && spells[spell_id].classes[(GetClass()%17) - 1] >= GetLevel() - 5) {
int32 mana_back = (this->itembonuses.Clairvoyance * zone->random.Int(1, 100) / 100);
if(mana_back > cost)
mana_back = cost;
cost -= mana_back;
}
float PercentManaReduction = 0;
float SpecializeSkill = GetSpecializeSkillValue(spell_id);
int SuccessChance = zone->random.Int(0, 100);
float bonus = 1.0;
switch(GetAA(aaSpellCastingMastery)) {
case 1:
bonus += 0.05;
break;
case 2:
bonus += 0.15;
break;
case 3:
bonus += 0.30;
break;
}
bonus += (0.05 * GetAA(aaAdvancedSpellCastingMastery));
if(SuccessChance <= (SpecializeSkill * 0.3 * bonus)) {
PercentManaReduction = (1 + 0.05 * SpecializeSkill);
switch(GetAA(aaSpellCastingMastery)) {
case 1:
PercentManaReduction += 2.5;
break;
case 2:
PercentManaReduction += 5.0;
break;
case 3:
PercentManaReduction += 10.0;
break;
}
switch(GetAA(aaAdvancedSpellCastingMastery)) {
case 1:
PercentManaReduction += 2.5;
break;
case 2:
PercentManaReduction += 5.0;
break;
case 3:
PercentManaReduction += 10.0;
break;
}
}
int32 focus_redux = GetBotFocusEffect(focusManaCost, spell_id);
if(focus_redux > 0)
PercentManaReduction += zone->random.Real(1, (double)focus_redux);
cost -= (cost * (PercentManaReduction / 100));
if(focus_redux >= 100) {
uint32 buff_max = GetMaxTotalSlots();
for (int buffSlot = 0; buffSlot < buff_max; buffSlot++) {
if (buffs[buffSlot].spellid == 0 || buffs[buffSlot].spellid >= SPDAT_RECORDS)
continue;
if(IsEffectInSpell(buffs[buffSlot].spellid, SE_ReduceManaCost)) {
if(CalcFocusEffect(focusManaCost, buffs[buffSlot].spellid, spell_id) == 100)
cost = 1;
}
}
}
if(cost < 0)
cost = 0;
return cost;
}
float Bot::GetActSpellRange(uint16 spell_id, float range) {
float extrange = 100;
extrange += GetBotFocusEffect(focusRange, spell_id);
return ((range * extrange) / 100);
}
int32 Bot::GetActSpellDuration(uint16 spell_id, int32 duration) {
int increase = 100;
increase += GetBotFocusEffect(focusSpellDuration, spell_id);
int tic_inc = 0; tic_inc = GetBotFocusEffect(focusSpellDurByTic, spell_id);
if(IsBeneficialSpell(spell_id)) {
switch (GetAA(aaSpellCastingReinforcement)) {
case 1:
increase += 5;
break;
case 2:
increase += 15;
break;
case 3:
increase += 30;
if (GetAA(aaSpellCastingReinforcementMastery) == 1)
increase += 20;
break;
}
if (GetAA(aaSpellCastingReinforcementMastery))
increase += 20;
}
if(IsMezSpell(spell_id))
tic_inc += GetAA(aaMesmerizationMastery);
return (((duration * increase) / 100) + tic_inc);
}
float Bot::GetAOERange(uint16 spell_id) {
float range;
range = spells[spell_id].aoe_range;
if(range == 0)
range = spells[spell_id].range;
if(range == 0)
range = 10;
if(IsBardSong(spell_id) && IsBeneficialSpell(spell_id)) {
float song_bonus = (aabonuses.SongRange + spellbonuses.SongRange + itembonuses.SongRange);
range += (range * song_bonus / 100.0f);
}
range = GetActSpellRange(spell_id, range);
return range;
}
bool Bot::SpellEffect(Mob* caster, uint16 spell_id, float partial) {
bool Result = false;
Result = Mob::SpellEffect(caster, spell_id, partial);
if(IsGrouped()) {
Group *g = GetGroup();
if(g) {
EQApplicationPacket hp_app;
CreateHPPacket(&hp_app);
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i] && g->members[i]->IsClient())
g->members[i]->CastToClient()->QueuePacket(&hp_app);
}
}
}
return Result;
}
void Bot::DoBuffTic(const Buffs_Struct &buff, int slot, Mob* caster) {
Mob::DoBuffTic(buff, slot, caster);
}
bool Bot::CastSpell(uint16 spell_id, uint16 target_id, EQ::spells::CastingSlot slot, int32 cast_time, int32 mana_cost,
uint32* oSpellWillFinish, uint32 item_slot, int16 *resist_adjust, uint32 aa_id) {
bool Result = false;
if(zone && !zone->IsSpellBlocked(spell_id, glm::vec3(GetPosition()))) {
// LogSpells("CastSpell called for spell [{}] ([{}]) on entity [{}], slot [{}], time [{}], mana [{}], from item slot [{}]", spells[spell_id].name, spell_id, target_id, slot, cast_time, mana_cost, (item_slot==0xFFFFFFFF)?999:item_slot);
if(casting_spell_id == spell_id)
ZeroCastingVars();
if(GetClass() != BARD) {
if(!IsValidSpell(spell_id) || casting_spell_id || delaytimer || spellend_timer.Enabled() || IsStunned() || IsFeared() || IsMezzed() || (IsSilenced() && !IsDiscipline(spell_id)) || (IsAmnesiad() && IsDiscipline(spell_id))) {
LogSpells("Spell casting canceled: not able to cast now. Valid? [{}], casting [{}], waiting? [{}], spellend? [{}], stunned? [{}], feared? [{}], mezed? [{}], silenced? [{}]", IsValidSpell(spell_id), casting_spell_id, delaytimer, spellend_timer.Enabled(), IsStunned(), IsFeared(), IsMezzed(), IsSilenced() );
if(IsSilenced() && !IsDiscipline(spell_id))
MessageString(Chat::Red, SILENCED_STRING);
if(IsAmnesiad() && IsDiscipline(spell_id))
MessageString(Chat::Red, MELEE_SILENCE);
if(casting_spell_id)
AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
return false;
}
}
if(IsDetrimentalSpell(spell_id) && !zone->CanDoCombat()){
MessageString(Chat::Red, SPELL_WOULDNT_HOLD);
if(casting_spell_id)
AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
return false;
}
if(DivineAura()) {
LogSpells("Spell casting canceled: cannot cast while Divine Aura is in effect");
InterruptSpell(173, 0x121, false);
return false;
}
if(slot < EQ::spells::CastingSlot::MaxGems && !CheckFizzle(spell_id)) {
int fizzle_msg = IsBardSong(spell_id) ? MISS_NOTE : SPELL_FIZZLE;
InterruptSpell(fizzle_msg, 0x121, spell_id);
uint32 use_mana = ((spells[spell_id].mana) / 4);
LogSpells("Spell casting canceled: fizzled. [{}] mana has been consumed", use_mana);
SetMana(GetMana() - use_mana);
return false;
}
if (HasActiveSong()) {
LogSpells("Casting a new spell/song while singing a song. Killing old song [{}]", bardsong);
bardsong = 0;
bardsong_target_id = 0;
bardsong_slot = EQ::spells::CastingSlot::Gem1;
bardsong_timer.Disable();
}
Result = DoCastSpell(spell_id, target_id, slot, cast_time, mana_cost, oSpellWillFinish, item_slot, aa_id);
}
return Result;
}
bool Bot::SpellOnTarget(uint16 spell_id, Mob* spelltar) {
bool Result = false;
if(!IsValidSpell(spell_id))
return false;
if(spelltar) {
if(spelltar->IsBot() && (spells[spell_id].target_type == ST_GroupTeleport)) {
switch(spell_id) {
// Paladin
case 3577: // Wave of Life
case 4065: // Blessing of Austerity
case 1455: // Wave of Healing
case 2589: // Healing Wave of Prexus
case 3427: // Wave of Marr
case 3683: // Ethereal Cleansing
case 1283: // Celestial Cleansing
case 3485: // Supernal Cleansing
case 5293: // Pious Cleansing
case 4893: // Wave of Trushar
case 5295: // Jeron's Mark
case 5296: // Wave of Piety
// Bard
case 4085: // Forpar's Aria of Affliction
case 4083: // Rizlona's Embers
case 4086: // Forpar's Psalm of Pain
case 4084: // Rizlona's Fire
case 6734: // Song of the Storm
case 3651: // Wind of Marr
case 4087: // Forpar's Verse of Venom
case 3362: // Rizlona's Call of Flame
case 4112: // Call of the Muse
case 4872: // Echo of the Trusik
case 4873: // Dark Echo
case 5377: // Cantata of Life
case 5380: // Yelhun's Mystic Call
case 5382: // Eriki's Psalm of Power
case 6666: // Storm Blade
case 5388: // Ancient Call of Power
// Cleric
case 134: // Word of Health
case 136: // Word of Healing
case 1520: // Word of Vigor
case 1521: // Word of Restoration
case 1523: // Word of Redemption
case 3471: // Word of Replenishment
case 5270: // Word of Vivification
case 2502: // Celestial Remedy
case 2175: // Celestial Health
case 1444: // Celestial Healing
case 1522: // Celestial Elixir
case 2180: // Etherial Elixir
case 3047: // Kazad's Mark
case 3475: // Supernal Elixir
case 4053: // Blessing of Temperance
case 4108: // Aura of Reverence
case 4882: // Holy Elixir
case 5259: // Pious Elixir
case 5272: // Aura of Devotion
case 5277: // Balikor's Mark
// Enchanter
case 5517: // Circle of Alendar
case 6671: // Rune of Rikkukin
case 6739: // Rune of the Scale
// Shaman
case 2521: // Talisman of the Beast
case 4055: // Pack Shrew
case 3842: // Blood of Nadox
case 5417: // Champion
// Druid
case 4058: // Feral Pack
case 2520: // Natures Recovery
break;
default:
return false;
}
}
if(((IsDetrimentalSpell(spell_id) && spelltar->IsBot()) || (IsDetrimentalSpell(spell_id) && spelltar->IsClient())) && !IsResurrectionEffects(spell_id))
return false;
if(spelltar->IsPet()) {
for(int i= 0; i < EFFECT_COUNT; ++i) {
if(spells[spell_id].effect_id[i] == SE_Illusion)
return false;
}
}
Result = Mob::SpellOnTarget(spell_id, spelltar);
}
return Result;
}
bool Bot::IsImmuneToSpell(uint16 spell_id, Mob *caster) {
bool Result = false;
if(!caster)
return false;
if(!IsSacrificeSpell(spell_id) && !(zone->GetZoneID() == 202) && !(this == caster)) {
Result = Mob::IsImmuneToSpell(spell_id, caster);
if(!Result) {
if(caster->IsBot()) {
if(spells[spell_id].target_type == ST_Undead) {
if((GetBodyType() != BT_SummonedUndead) && (GetBodyType() != BT_Undead) && (GetBodyType() != BT_Vampire)) {
LogSpells("Bot's target is not an undead");
return true;
}
}
if(spells[spell_id].target_type == ST_Summoned) {
if((GetBodyType() != BT_SummonedUndead) && (GetBodyType() != BT_Summoned) && (GetBodyType() != BT_Summoned2) && (GetBodyType() != BT_Summoned3)) {
LogSpells("Bot's target is not a summoned creature");
return true;
}
}
}
LogSpells("No bot immunities to spell [{}] found", spell_id);
}
}
return Result;
}
bool Bot::DetermineSpellTargets(uint16 spell_id, Mob *&spell_target, Mob *&ae_center, CastAction_type &CastAction, EQ::spells::CastingSlot slot) {
bool Result = false;
SpellTargetType targetType = spells[spell_id].target_type;
if(targetType == ST_GroupClientAndPet) {
if((spell_id == 1768 && zone->GetZoneID() == 202) || (!IsDetrimentalSpell(spell_id))) {
CastAction = SingleTarget;
return true;
}
}
Result = Mob::DetermineSpellTargets(spell_id, spell_target, ae_center, CastAction, slot);
return Result;
}
bool Bot::DoCastSpell(uint16 spell_id, uint16 target_id, EQ::spells::CastingSlot slot, int32 cast_time, int32 mana_cost, uint32* oSpellWillFinish, uint32 item_slot, uint32 aa_id) {
bool Result = false;
if(GetClass() == BARD)
cast_time = 0;
Result = Mob::DoCastSpell(spell_id, target_id, slot, cast_time, mana_cost, oSpellWillFinish, item_slot, aa_id);
if(oSpellWillFinish) {
const SPDat_Spell_Struct &spell = spells[spell_id];
*oSpellWillFinish = Timer::GetCurrentTime() + ((spell.recast_time > 20000) ? 10000 : spell.recast_time);
}
return Result;
}
int32 Bot::GenerateBaseManaPoints() {
int32 bot_mana = 0;
int32 WisInt = 0;
int32 MindLesserFactor, MindFactor;
int wisint_mana = 0;
int base_mana = 0;
int ConvertedWisInt = 0;
switch(GetCasterClass()) {
case 'I':
WisInt = INT;
if (GetOwner() && GetOwner()->CastToClient() && GetOwner()->CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::SoD && RuleB(Character, SoDClientUseSoDHPManaEnd)) {
if(WisInt > 100) {
ConvertedWisInt = (((WisInt - 100) * 5 / 2) + 100);
if(WisInt > 201)
ConvertedWisInt -= ((WisInt - 201) * 5 / 4);
}
else
ConvertedWisInt = WisInt;
if(GetLevel() < 41) {
wisint_mana = (GetLevel() * 75 * ConvertedWisInt / 1000);
base_mana = (GetLevel() * 15);
} else if(GetLevel() < 81) {
wisint_mana = ((3 * ConvertedWisInt) + ((GetLevel() - 40) * 15 * ConvertedWisInt / 100));
base_mana = (600 + ((GetLevel() - 40) * 30));
} else {
wisint_mana = (9 * ConvertedWisInt);
base_mana = (1800 + ((GetLevel() - 80) * 18));
}
bot_mana = (base_mana + wisint_mana);
} else {
if(((WisInt - 199) / 2) > 0)
MindLesserFactor = ((WisInt - 199) / 2);
else
MindLesserFactor = 0;
MindFactor = WisInt - MindLesserFactor;
if(WisInt > 100)
bot_mana = (((5 * (MindFactor + 20)) / 2) * 3 * GetLevel() / 40);
else
bot_mana = (((5 * (MindFactor + 200)) / 2) * 3 * GetLevel() / 100);
}
break;
case 'W':
WisInt = WIS;
if (GetOwner() && GetOwner()->CastToClient() && GetOwner()->CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::SoD && RuleB(Character, SoDClientUseSoDHPManaEnd)) {
if(WisInt > 100) {
ConvertedWisInt = (((WisInt - 100) * 5 / 2) + 100);
if(WisInt > 201)
ConvertedWisInt -= ((WisInt - 201) * 5 / 4);
} else
ConvertedWisInt = WisInt;
if(GetLevel() < 41) {
wisint_mana = (GetLevel() * 75 * ConvertedWisInt / 1000);
base_mana = (GetLevel() * 15);
} else if(GetLevel() < 81) {
wisint_mana = ((3 * ConvertedWisInt) + ((GetLevel() - 40) * 15 * ConvertedWisInt / 100));
base_mana = (600 + ((GetLevel() - 40) * 30));
} else {
wisint_mana = (9 * ConvertedWisInt);
base_mana = (1800 + ((GetLevel() - 80) * 18));
}
bot_mana = (base_mana + wisint_mana);
} else {
if(((WisInt - 199) / 2) > 0)
MindLesserFactor = ((WisInt - 199) / 2);
else
MindLesserFactor = 0;
MindFactor = (WisInt - MindLesserFactor);
if(WisInt > 100)
bot_mana = (((5 * (MindFactor + 20)) / 2) * 3 * GetLevel() / 40);
else
bot_mana = (((5 * (MindFactor + 200)) / 2) * 3 * GetLevel() / 100);
}
break;
default:
bot_mana = 0;
break;
}
max_mana = bot_mana;
return bot_mana;
}
void Bot::GenerateSpecialAttacks() {
if(((GetClass() == MONK) || (GetClass() == WARRIOR) || (GetClass() == RANGER) || (GetClass() == BERSERKER)) && (GetLevel() >= 60))
SetSpecialAbility(SPECATK_TRIPLE, 1);
}
bool Bot::DoFinishedSpellAETarget(uint16 spell_id, Mob* spellTarget, EQ::spells::CastingSlot slot, bool& stopLogic) {
if(GetClass() == BARD) {
if(!ApplyNextBardPulse(bardsong, this, bardsong_slot))
InterruptSpell(SONG_ENDS_ABRUPTLY, 0x121, bardsong);
stopLogic = true;
}
return true;
}
bool Bot::DoFinishedSpellSingleTarget(uint16 spell_id, Mob* spellTarget, EQ::spells::CastingSlot slot, bool& stopLogic) {
if(spellTarget) {
if(IsGrouped() && (spellTarget->IsBot() || spellTarget->IsClient()) && RuleB(Bots, GroupBuffing)) {
bool noGroupSpell = false;
uint16 thespell = spell_id;
for(int i = 0; i < AIspells.size(); i++) {
int j = BotGetSpells(i);
int spelltype = BotGetSpellType(i);
bool spellequal = (j == thespell);
bool spelltypeequal = ((spelltype == 2) || (spelltype == 16) || (spelltype == 32));
bool spelltypetargetequal = ((spelltype == 8) && (spells[thespell].target_type == ST_Self));
bool spelltypeclassequal = ((spelltype == 1024) && (GetClass() == SHAMAN));
bool slotequal = (slot == EQ::spells::CastingSlot::Item);
if(spellequal || slotequal) {
if((spelltypeequal || spelltypetargetequal) || spelltypeclassequal || slotequal) {
if(((spells[thespell].effect_id[0] == 0) && (spells[thespell].base_value[0] < 0)) &&
(spellTarget->GetHP() < ((spells[thespell].base_value[0] * (-1)) + 100))) {
LogSpells("Bot::DoFinishedSpellSingleTarget - GroupBuffing failure");
return false;
}
SpellOnTarget(thespell, spellTarget);
noGroupSpell = true;
stopLogic = true;
}
}
}
if(!noGroupSpell) {
Group *g = GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i]) {
if((g->members[i]->GetClass() == NECROMANCER) && (IsEffectInSpell(thespell, SE_AbsorbMagicAtt) || IsEffectInSpell(thespell, SE_Rune))) {
}
else
SpellOnTarget(thespell, g->members[i]);
if(g->members[i] && g->members[i]->GetPetID())
SpellOnTarget(thespell, g->members[i]->GetPet());
}
}
SetMana(GetMana() - (GetActSpellCost(thespell, spells[thespell].mana) * (g->GroupCount() - 1)));
}
}
stopLogic = true;
}
}
return true;
}
bool Bot::DoFinishedSpellGroupTarget(uint16 spell_id, Mob* spellTarget, EQ::spells::CastingSlot slot, bool& stopLogic) {
bool isMainGroupMGB = false;
if(isMainGroupMGB && (GetClass() != BARD)) {
BotGroupSay(this, "MGB %s", spells[spell_id].name);
SpellOnTarget(spell_id, this);
entity_list.AESpell(this, this, spell_id, true);
} else {
Group *g = GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; ++i) {
if(g->members[i]) {
SpellOnTarget(spell_id, g->members[i]);
if(g->members[i] && g->members[i]->GetPetID())
SpellOnTarget(spell_id, g->members[i]->GetPet());
}
}
}
}
stopLogic = true;
return true;
}
void Bot::CalcBonuses() {
memset(&itembonuses, 0, sizeof(StatBonuses));
GenerateBaseStats();
CalcItemBonuses(&itembonuses);
CalcSpellBonuses(&spellbonuses);
CalcAABonuses(&aabonuses);
SetAttackTimer();
CalcATK();
CalcSTR();
CalcSTA();
CalcDEX();
CalcAGI();
CalcINT();
CalcWIS();
CalcCHA();
CalcMR();
CalcFR();
CalcDR();
CalcPR();
CalcCR();
CalcCorrup();
CalcAC();
CalcMaxHP();
CalcMaxMana();
CalcMaxEndurance();
hp_regen = CalcHPRegen();
mana_regen = CalcManaRegen();
end_regen = CalcEnduranceRegen();
}
int32 Bot::CalcHPRegenCap(){
int level = GetLevel();
int32 hpregen_cap = 0;
hpregen_cap = (RuleI(Character, ItemHealthRegenCap) + itembonuses.HeroicSTA / 25);
hpregen_cap += (aabonuses.ItemHPRegenCap + spellbonuses.ItemHPRegenCap + itembonuses.ItemHPRegenCap);
return (hpregen_cap * RuleI(Character, HPRegenMultiplier) / 100);
}
int32 Bot::CalcManaRegenCap(){
int32 cap = RuleI(Character, ItemManaRegenCap) + aabonuses.ItemManaRegenCap;
switch(GetCasterClass()) {
case 'I':
cap += (itembonuses.HeroicINT / 25);
break;
case 'W':
cap += (itembonuses.HeroicWIS / 25);
break;
}
return (cap * RuleI(Character, ManaRegenMultiplier) / 100);
}
int32 Bot::GetMaxStat() {
int level = GetLevel();
int32 base = 0;
if (level < 61)
base = 255;
else if (GetOwner() && GetOwner()->CastToClient() && GetOwner()->CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::SoF)
base = (255 + 5 * (level - 60));
else if (level < 71)
base = (255 + 5 * (level - 60));
else
base = 330;
return base;
}
int32 Bot::GetMaxResist() {
int level = GetLevel();
int32 base = 500;
if(level > 60)
base += ((level - 60) * 5);
return base;
}
int32 Bot::GetMaxSTR() {
return (GetMaxStat() + itembonuses.STRCapMod + spellbonuses.STRCapMod + aabonuses.STRCapMod);
}
int32 Bot::GetMaxSTA() {
return (GetMaxStat() + itembonuses.STACapMod + spellbonuses.STACapMod + aabonuses.STACapMod);
}
int32 Bot::GetMaxDEX() {
return (GetMaxStat() + itembonuses.DEXCapMod + spellbonuses.DEXCapMod + aabonuses.DEXCapMod);
}
int32 Bot::GetMaxAGI() {
return (GetMaxStat() + itembonuses.AGICapMod + spellbonuses.AGICapMod + aabonuses.AGICapMod);
}
int32 Bot::GetMaxINT() {
return (GetMaxStat() + itembonuses.INTCapMod + spellbonuses.INTCapMod + aabonuses.INTCapMod);
}
int32 Bot::GetMaxWIS() {
return (GetMaxStat() + itembonuses.WISCapMod + spellbonuses.WISCapMod + aabonuses.WISCapMod);
}
int32 Bot::GetMaxCHA() {
return (GetMaxStat() + itembonuses.CHACapMod + spellbonuses.CHACapMod + aabonuses.CHACapMod);
}
int32 Bot::GetMaxMR() {
return (GetMaxResist() + itembonuses.MRCapMod + spellbonuses.MRCapMod + aabonuses.MRCapMod);
}
int32 Bot::GetMaxPR() {
return (GetMaxResist() + itembonuses.PRCapMod + spellbonuses.PRCapMod + aabonuses.PRCapMod);
}
int32 Bot::GetMaxDR() {
return (GetMaxResist() + itembonuses.DRCapMod + spellbonuses.DRCapMod + aabonuses.DRCapMod);
}
int32 Bot::GetMaxCR() {
return (GetMaxResist() + itembonuses.CRCapMod + spellbonuses.CRCapMod + aabonuses.CRCapMod);
}
int32 Bot::GetMaxFR() {
return (GetMaxResist() + itembonuses.FRCapMod + spellbonuses.FRCapMod + aabonuses.FRCapMod);
}
int32 Bot::GetMaxCorrup() {
return (GetMaxResist() + itembonuses.CorrupCapMod + spellbonuses.CorrupCapMod + aabonuses.CorrupCapMod);
}
int32 Bot::CalcSTR() {
int32 val = (STR + itembonuses.STR + spellbonuses.STR);
int32 mod = aabonuses.STR;
if(val > 255 && GetLevel() <= 60)
val = 255;
STR = (val + mod);
if(STR < 1)
STR = 1;
int m = GetMaxSTR();
if(STR > m)
STR = m;
return STR;
}
int32 Bot::CalcSTA() {
int32 val = (STA + itembonuses.STA + spellbonuses.STA);
int32 mod = aabonuses.STA;
if(val > 255 && GetLevel() <= 60)
val = 255;
STA = (val + mod);
if(STA < 1)
STA = 1;
int m = GetMaxSTA();
if(STA > m)
STA = m;
return STA;
}
int32 Bot::CalcAGI() {
int32 val = (AGI + itembonuses.AGI + spellbonuses.AGI);
int32 mod = aabonuses.AGI;
if(val > 255 && GetLevel() <= 60)
val = 255;
AGI = (val + mod);
if(AGI < 1)
AGI = 1;
int m = GetMaxAGI();
if(AGI > m)
AGI = m;
return AGI;
}
int32 Bot::CalcDEX() {
int32 val = (DEX + itembonuses.DEX + spellbonuses.DEX);
int32 mod = aabonuses.DEX;
if(val > 255 && GetLevel() <= 60)
val = 255;
DEX = (val + mod);
if(DEX < 1)
DEX = 1;
int m = GetMaxDEX();
if(DEX > m)
DEX = m;
return DEX;
}
int32 Bot::CalcINT() {
int32 val = (INT + itembonuses.INT + spellbonuses.INT);
int32 mod = aabonuses.INT;
if(val > 255 && GetLevel() <= 60)
val = 255;
INT = (val + mod);
if(INT < 1)
INT = 1;
int m = GetMaxINT();
if(INT > m)
INT = m;
return INT;
}
int32 Bot::CalcWIS() {
int32 val = (WIS + itembonuses.WIS + spellbonuses.WIS);
int32 mod = aabonuses.WIS;
if(val > 255 && GetLevel() <= 60)
val = 255;
WIS = (val + mod);
if(WIS < 1)
WIS = 1;
int m = GetMaxWIS();
if(WIS > m)
WIS = m;
return WIS;
}
int32 Bot::CalcCHA() {
int32 val = (CHA + itembonuses.CHA + spellbonuses.CHA);
int32 mod = aabonuses.CHA;
if(val > 255 && GetLevel() <= 60)
val = 255;
CHA = (val + mod);
if(CHA < 1)
CHA = 1;
int m = GetMaxCHA();
if(CHA > m)
CHA = m;
return CHA;
}
int32 Bot::CalcMR() {
MR += (itembonuses.MR + spellbonuses.MR + aabonuses.MR);
if(GetClass() == WARRIOR)
MR += (GetLevel() / 2);
if(MR < 1)
MR = 1;
if(MR > GetMaxMR())
MR = GetMaxMR();
return MR;
}
int32 Bot::CalcFR() {
int c = GetClass();
if(c == RANGER) {
FR += 4;
int l = GetLevel();
if(l > 49)
FR += (l - 49);
}
FR += (itembonuses.FR + spellbonuses.FR + aabonuses.FR);
if(FR < 1)
FR = 1;
if(FR > GetMaxFR())
FR = GetMaxFR();
return FR;
}
int32 Bot::CalcDR() {
int c = GetClass();
if(c == PALADIN) {
DR += 8;
int l = GetLevel();
if(l > 49)
DR += (l - 49);
} else if(c == SHADOWKNIGHT) {
DR += 4;
int l = GetLevel();
if(l > 49)
DR += (l - 49);
}
DR += (itembonuses.DR + spellbonuses.DR + aabonuses.DR);
if(DR < 1)
DR = 1;
if(DR > GetMaxDR())
DR = GetMaxDR();
return DR;
}
int32 Bot::CalcPR() {
int c = GetClass();
if(c == ROGUE) {
PR += 8;
int l = GetLevel();
if(l > 49)
PR += (l - 49);
} else if(c == SHADOWKNIGHT) {
PR += 4;
int l = GetLevel();
if(l > 49)
PR += (l - 49);
}
PR += (itembonuses.PR + spellbonuses.PR + aabonuses.PR);
if(PR < 1)
PR = 1;
if(PR > GetMaxPR())
PR = GetMaxPR();
return PR;
}
int32 Bot::CalcCR() {
int c = GetClass();
if(c == RANGER) {
CR += 4;
int l = GetLevel();
if(l > 49)
CR += (l - 49);
}
CR += (itembonuses.CR + spellbonuses.CR + aabonuses.CR);
if(CR < 1)
CR = 1;
if(CR > GetMaxCR())
CR = GetMaxCR();
return CR;
}
int32 Bot::CalcCorrup() {
Corrup = (Corrup + itembonuses.Corrup + spellbonuses.Corrup + aabonuses.Corrup);
if(Corrup > GetMaxCorrup())
Corrup = GetMaxCorrup();
return Corrup;
}
int32 Bot::CalcATK() {
ATK = (itembonuses.ATK + spellbonuses.ATK + aabonuses.ATK + GroupLeadershipAAOffenseEnhancement());
return ATK;
}
void Bot::CalcRestState() {
if(!RuleB(Character, RestRegenEnabled))
return;
RestRegenHP = RestRegenMana = RestRegenEndurance = 0;
if(IsEngaged() || !IsSitting() || !rest_timer.Check(false))
return;
uint32 buff_count = GetMaxTotalSlots();
for (unsigned int j = 0; j < buff_count; j++) {
if(buffs[j].spellid != SPELL_UNKNOWN) {
if(IsDetrimentalSpell(buffs[j].spellid) && (buffs[j].ticsremaining > 0))
if(!DetrimentalSpellAllowsRest(buffs[j].spellid))
return;
}
}
RestRegenHP = 6 * (GetMaxHP() / zone->newzone_data.FastRegenHP);
RestRegenMana = 6 * (GetMaxMana() / zone->newzone_data.FastRegenMana);
RestRegenEndurance = 6 * (GetMaxEndurance() / zone->newzone_data.FastRegenEndurance);
}
int32 Bot::LevelRegen() {
int level = GetLevel();
bool bonus = GetPlayerRaceBit(_baseRace) & RuleI(Character, BaseHPRegenBonusRaces);
uint8 multiplier1 = bonus ? 2 : 1;
int32 hp = 0;
if (level < 51) {
if (IsSitting()) {
if (level < 20)
hp += (2 * multiplier1);
else if (level < 50)
hp += (3 * multiplier1);
else
hp += (4 * multiplier1);
} else
hp += (1 * multiplier1);
} else {
int32 tmp = 0;
float multiplier2 = 1;
if (level < 56) {
tmp = 2;
if (bonus)
multiplier2 = 3;
} else if (level < 60) {
tmp = 3;
if (bonus)
multiplier2 = 3.34;
}
else if (level < 61) {
tmp = 4;
if (bonus)
multiplier2 = 3;
} else if (level < 63) {
tmp = 5;
if (bonus)
multiplier2 = 2.8;
} else if (level < 65) {
tmp = 6;
if (bonus)
multiplier2 = 2.67;
} else {
tmp = 7;
if (bonus)
multiplier2 = 2.58;
}
hp += (int32(float(tmp) * multiplier2));
}
return hp;
}
int32 Bot::CalcHPRegen() {
int32 regen = (LevelRegen() + itembonuses.HPRegen + spellbonuses.HPRegen);
regen += (aabonuses.HPRegen + GroupLeadershipAAHealthRegeneration());
regen = ((regen * RuleI(Character, HPRegenMultiplier)) / 100);
return regen;
}
int32 Bot::CalcManaRegen() {
uint8 level = GetLevel();
uint8 botclass = GetClass();
int32 regen = 0;
if (IsSitting()) {
BuffFadeBySitModifier();
if(botclass != WARRIOR && botclass != MONK && botclass != ROGUE && botclass != BERSERKER) {
regen = ((((GetSkill(EQ::skills::SkillMeditate) / 10) + (level - (level / 4))) / 4) + 4);
regen += (spellbonuses.ManaRegen + itembonuses.ManaRegen);
} else
regen = (2 + spellbonuses.ManaRegen + itembonuses.ManaRegen);
} else
regen = (2 + spellbonuses.ManaRegen + itembonuses.ManaRegen);
if(GetCasterClass() == 'I')
regen += (itembonuses.HeroicINT / 25);
else if(GetCasterClass() == 'W')
regen += (itembonuses.HeroicWIS / 25);
else
regen = 0;
regen += aabonuses.ManaRegen;
regen = ((regen * RuleI(Character, ManaRegenMultiplier)) / 100);
float mana_regen_rate = RuleR(Bots, ManaRegen);
if(mana_regen_rate < 0.0f)
mana_regen_rate = 0.0f;
regen = (regen * mana_regen_rate);
return regen;
}
uint32 Bot::GetClassHPFactor() {
uint32 factor;
switch(GetClass()) {
case BEASTLORD:
case BERSERKER:
case MONK:
case ROGUE:
case SHAMAN:
factor = 255;
break;
case BARD:
case CLERIC:
factor = 264;
break;
case SHADOWKNIGHT:
case PALADIN:
factor = 288;
break;
case RANGER:
factor = 276;
break;
case WARRIOR:
factor = 300;
break;
default:
factor = 240;
break;
}
return factor;
}
int32 Bot::CalcMaxHP() {
int32 bot_hp = 0;
uint32 nd = 10000;
bot_hp += (GenerateBaseHitPoints() + itembonuses.HP);
nd += aabonuses.MaxHP;
bot_hp = ((float)bot_hp * (float)nd / (float)10000);
bot_hp += (spellbonuses.HP + aabonuses.HP);
bot_hp += GroupLeadershipAAHealthEnhancement();
bot_hp += (bot_hp * ((spellbonuses.MaxHPChange + itembonuses.MaxHPChange) / 10000.0f));
max_hp = bot_hp;
if (current_hp > max_hp)
current_hp = max_hp;
int hp_perc_cap = spellbonuses.HPPercCap[0];
if(hp_perc_cap) {
int curHP_cap = ((max_hp * hp_perc_cap) / 100);
if (current_hp > curHP_cap || (spellbonuses.HPPercCap[1] && current_hp > spellbonuses.HPPercCap[1]))
current_hp = curHP_cap;
}
return max_hp;
}
int32 Bot::CalcMaxEndurance() {
max_end = (CalcBaseEndurance() + spellbonuses.Endurance + itembonuses.Endurance);
if (max_end < 0)
max_end = 0;
if (cur_end > max_end)
cur_end = max_end;
int end_perc_cap = spellbonuses.EndPercCap[0];
if(end_perc_cap) {
int curEnd_cap = ((max_end * end_perc_cap) / 100);
if (cur_end > curEnd_cap || (spellbonuses.EndPercCap[1] && cur_end > spellbonuses.EndPercCap[1]))
cur_end = curEnd_cap;
}
return max_end;
}
int32 Bot::CalcBaseEndurance() {
int32 base_end = 0;
int32 base_endurance = 0;
int32 ConvertedStats = 0;
int32 sta_end = 0;
int Stats = 0;
if (GetOwner() && GetOwner()->CastToClient() && GetOwner()->CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::SoD && RuleB(Character, SoDClientUseSoDHPManaEnd)) {
int HeroicStats = 0;
Stats = ((GetSTR() + GetSTA() + GetDEX() + GetAGI()) / 4);
HeroicStats = ((GetHeroicSTR() + GetHeroicSTA() + GetHeroicDEX() + GetHeroicAGI()) / 4);
if (Stats > 100) {
ConvertedStats = (((Stats - 100) * 5 / 2) + 100);
if (Stats > 201)
ConvertedStats -= ((Stats - 201) * 5 / 4);
} else
ConvertedStats = Stats;
if (GetLevel() < 41) {
sta_end = (GetLevel() * 75 * ConvertedStats / 1000);
base_endurance = (GetLevel() * 15);
} else if (GetLevel() < 81) {
sta_end = ((3 * ConvertedStats) + ((GetLevel() - 40) * 15 * ConvertedStats / 100));
base_endurance = (600 + ((GetLevel() - 40) * 30));
} else {
sta_end = (9 * ConvertedStats);
base_endurance = (1800 + ((GetLevel() - 80) * 18));
}
base_end = (base_endurance + sta_end + (HeroicStats * 10));
} else {
Stats = (GetSTR()+GetSTA()+GetDEX()+GetAGI());
int LevelBase = (GetLevel() * 15);
int at_most_800 = Stats;
if(at_most_800 > 800)
at_most_800 = 800;
int Bonus400to800 = 0;
int HalfBonus400to800 = 0;
int Bonus800plus = 0;
int HalfBonus800plus = 0;
int BonusUpto800 = int(at_most_800 / 4) ;
if(Stats > 400) {
Bonus400to800 = int((at_most_800 - 400) / 4);
HalfBonus400to800 = int(std::max((at_most_800 - 400), 0) / 8);
if(Stats > 800) {
Bonus800plus = (int((Stats - 800) / 8) * 2);
HalfBonus800plus = int((Stats - 800) / 16);
}
}
int bonus_sum = (BonusUpto800 + Bonus400to800 + HalfBonus400to800 + Bonus800plus + HalfBonus800plus);
base_end = LevelBase;
base_end += ((bonus_sum * 3 * GetLevel()) / 40);
}
return base_end;
}
int32 Bot::CalcEnduranceRegen() {
int32 regen = (int32(GetLevel() * 4 / 10) + 2);
regen += (spellbonuses.EnduranceRegen + itembonuses.EnduranceRegen);
return (regen * RuleI(Character, EnduranceRegenMultiplier) / 100);
}
int32 Bot::CalcEnduranceRegenCap() {
int cap = (RuleI(Character, ItemEnduranceRegenCap) + itembonuses.HeroicSTR / 25 + itembonuses.HeroicDEX / 25 + itembonuses.HeroicAGI / 25 + itembonuses.HeroicSTA / 25);
return (cap * RuleI(Character, EnduranceRegenMultiplier) / 100);
}
void Bot::SetEndurance(int32 newEnd) {
if(newEnd < 0)
newEnd = 0;
else if(newEnd > GetMaxEndurance())
newEnd = GetMaxEndurance();
cur_end = newEnd;
}
void Bot::DoEnduranceUpkeep() {
int upkeep_sum = 0;
int cost_redux = (spellbonuses.EnduranceReduction + itembonuses.EnduranceReduction);
uint32 buffs_i;
uint32 buff_count = GetMaxTotalSlots();
for (buffs_i = 0; buffs_i < buff_count; buffs_i++) {
if (buffs[buffs_i].spellid != SPELL_UNKNOWN) {
int upkeep = spells[buffs[buffs_i].spellid].endurance_upkeep;
if(upkeep > 0) {
if(cost_redux > 0) {
if(upkeep <= cost_redux)
continue;
upkeep -= cost_redux;
}
if((upkeep+upkeep_sum) > GetEndurance())
BuffFadeBySlot(buffs_i);
else
upkeep_sum += upkeep;
}
}
}
if(upkeep_sum != 0)
SetEndurance(GetEndurance() - upkeep_sum);
}
void Bot::Camp(bool databaseSave) {
Sit();
//auto group = GetGroup();
if(GetGroup())
RemoveBotFromGroup(this, GetGroup());
// RemoveBotFromGroup() code is too complicated for this to work as-is (still needs to be addressed to prevent memory leaks)
//if (group->GroupCount() < 2)
// group->DisbandGroup();
LeaveHealRotationMemberPool();
if(databaseSave)
Save();
Depop();
}
void Bot::Zone() {
if(HasGroup())
GetGroup()->MemberZoned(this);
Save();
Depop();
}
bool Bot::IsArcheryRange(Mob *target) {
bool result = false;
if(target) {
float range = (GetBotArcheryRange() + 5.0);
range *= range;
float targetDistance = DistanceSquaredNoZ(m_Position, target->GetPosition());
float minRuleDistance = (RuleI(Combat, MinRangedAttackDist) * RuleI(Combat, MinRangedAttackDist));
if((targetDistance > range) || (targetDistance < minRuleDistance))
result = false;
else
result = true;
}
return result;
}
void Bot::UpdateGroupCastingRoles(const Group* group, bool disband)
{
if (!group)
return;
for (auto iter : group->members) {
if (!iter)
continue;
if (iter->IsBot()) {
iter->CastToBot()->SetGroupHealer(false);
iter->CastToBot()->SetGroupSlower(false);
iter->CastToBot()->SetGroupNuker(false);
iter->CastToBot()->SetGroupDoter(false);
}
}
if (disband)
return;
Mob* healer = nullptr;
Mob* slower = nullptr;
Mob* nuker = nullptr;
Mob* doter = nullptr;
for (auto iter : group->members) {
if (!iter)
continue;
// GroupHealer
switch (iter->GetClass()) {
case CLERIC:
if (!healer)
healer = iter;
else
switch (healer->GetClass()) {
case CLERIC:
break;
default:
healer = iter;
}
break;
case DRUID:
if (!healer)
healer = iter;
else
switch (healer->GetClass()) {
case CLERIC:
case DRUID:
break;
default:
healer = iter;
}
break;
case SHAMAN:
if (!healer)
healer = iter;
else
switch (healer->GetClass()) {
case CLERIC:
case DRUID:
case SHAMAN:
break;
default:
healer = iter;
}
break;
case PALADIN:
case RANGER:
case BEASTLORD:
if (!healer)
healer = iter;
break;
default:
break;
}
// GroupSlower
switch (iter->GetClass()) {
case SHAMAN:
if (!slower)
slower = iter;
else
switch (slower->GetClass()) {
case SHAMAN:
break;
default:
slower = iter;
}
break;
case ENCHANTER:
if (!slower)
slower = iter;
else
switch (slower->GetClass()) {
case SHAMAN:
case ENCHANTER:
break;
default:
slower = iter;
}
break;
case BEASTLORD:
if (!slower)
slower = iter;
break;
default:
break;
}
// GroupNuker
switch (iter->GetClass()) {
// wizard
// magician
// necromancer
// enchanter
// druid
// cleric
// shaman
// shadowknight
// paladin
// ranger
// beastlord
default:
break;
}
// GroupDoter
switch (iter->GetClass()) {
default:
break;
}
}
if (healer && healer->IsBot())
healer->CastToBot()->SetGroupHealer();
if (slower && slower->IsBot())
slower->CastToBot()->SetGroupSlower();
if (nuker && nuker->IsBot())
nuker->CastToBot()->SetGroupNuker();
if (doter && doter->IsBot())
doter->CastToBot()->SetGroupDoter();
}
//void Bot::UpdateRaidCastingRoles(const Raid* raid, bool disband = false) { }
bool Bot::CanHeal() {
bool result = false;
if(!AI_HasSpells())
return false;
BotSpell botSpell;
botSpell.SpellId = 0;
botSpell.SpellIndex = 0;
botSpell.ManaCost = 0;
botSpell = GetFirstBotSpellBySpellType(this, SpellType_Heal);
if(botSpell.SpellId != 0)
result = true;
return result;
}
Bot* Bot::GetBotByBotClientOwnerAndBotName(Client* c, std::string botName) {
Bot* Result = nullptr;
if(c) {
std::list<Bot*> BotList = entity_list.GetBotsByBotOwnerCharacterID(c->CharacterID());
if(!BotList.empty()) {
for(std::list<Bot*>::iterator botListItr = BotList.begin(); botListItr != BotList.end(); ++botListItr) {
if(std::string((*botListItr)->GetCleanName()) == botName) {
Result = (*botListItr);
break;
}
}
}
}
return Result;
}
void Bot::ProcessBotGroupInvite(Client* c, std::string botName) {
if(c) {
Bot* invitedBot = GetBotByBotClientOwnerAndBotName(c, botName);
if(invitedBot && !invitedBot->HasGroup()) {
if(!c->IsGrouped()) {
Group *g = new Group(c);
if(AddBotToGroup(invitedBot, g)) {
entity_list.AddGroup(g);
database.SetGroupLeaderName(g->GetID(), c->GetName());
g->SaveGroupLeaderAA();
database.SetGroupID(c->GetName(), g->GetID(), c->CharacterID());
database.SetGroupID(invitedBot->GetCleanName(), g->GetID(), invitedBot->GetBotID());
} else {
delete g;
}
} else {
AddBotToGroup(invitedBot, c->GetGroup());
database.SetGroupID(invitedBot->GetCleanName(), c->GetGroup()->GetID(), invitedBot->GetBotID());
}
}
}
}
// Processes a group disband request from a Client for a Bot.
void Bot::ProcessBotGroupDisband(Client* c, std::string botName) {
if(c) {
Bot* tempBot = nullptr;
if(botName.empty())
tempBot = GetFirstBotInGroup(c->GetGroup());
else
tempBot = GetBotByBotClientOwnerAndBotName(c, botName);
RemoveBotFromGroup(tempBot, c->GetGroup());
}
}
// Handles all client zone change event
void Bot::ProcessClientZoneChange(Client* botOwner) {
if(botOwner) {
std::list<Bot*> BotList = entity_list.GetBotsByBotOwnerCharacterID(botOwner->CharacterID());
for(std::list<Bot*>::iterator itr = BotList.begin(); itr != BotList.end(); ++itr) {
Bot* tempBot = *itr;
if(tempBot) {
if(tempBot->HasGroup()) {
Group* g = tempBot->GetGroup();
if(g && g->IsGroupMember(botOwner)) {
if(botOwner && botOwner->IsClient()) {
// Modified to not only zone bots if you're the leader.
// Also zone bots of the non-leader when they change zone.
if(tempBot->GetBotOwnerCharacterID() == botOwner->CharacterID() && g->IsGroupMember(botOwner))
tempBot->Zone();
else
tempBot->Camp();
}
}
else
tempBot->Camp();
}
else
tempBot->Camp();
}
}
}
}
// Finds and returns the first Bot object found in specified group
Bot* Bot::GetFirstBotInGroup(Group* group) {
Bot* Result = nullptr;
if(group) {
for(int Counter = 0; Counter < MAX_GROUP_MEMBERS; Counter++) {
if (group->members[Counter] == nullptr) {
continue;
}
if(group->members[Counter]->IsBot()) {
Result = group->members[Counter]->CastToBot();
break;
}
}
}
return Result;
}
// Processes a client request to inspect a bot's equipment.
void Bot::ProcessBotInspectionRequest(Bot* inspectedBot, Client* client) {
if(inspectedBot && client) {
EQApplicationPacket* outapp = new EQApplicationPacket(OP_InspectAnswer, sizeof(InspectResponse_Struct));
InspectResponse_Struct* insr = (InspectResponse_Struct*) outapp->pBuffer;
insr->TargetID = inspectedBot->GetNPCTypeID();
insr->playerid = inspectedBot->GetID();
const EQ::ItemData* item = nullptr;
const EQ::ItemInstance* inst = nullptr;
for (int16 L = EQ::invslot::EQUIPMENT_BEGIN; L <= EQ::invslot::EQUIPMENT_END; L++) {
inst = inspectedBot->GetBotItem(L);
if(inst) {
item = inst->GetItem();
if(item) {
strcpy(insr->itemnames[L], item->Name);
insr->itemicons[L] = item->Icon;
}
else {
insr->itemnames[L][0] = '\0';
insr->itemicons[L] = 0xFFFFFFFF;
}
}
else {
insr->itemnames[L][0] = '\0';
insr->itemicons[L] = 0xFFFFFFFF;
}
}
strcpy(insr->text, inspectedBot->GetInspectMessage().text);
client->QueuePacket(outapp); // Send answer to requester
}
}
void Bot::CalcItemBonuses(StatBonuses* newbon)
{
const EQ::ItemData* itemtmp = nullptr;
for (int i = EQ::invslot::BONUS_BEGIN; i <= EQ::invslot::BONUS_STAT_END; ++i) {
const EQ::ItemInstance* item = GetBotItem(i);
if(item) {
AddItemBonuses(item, newbon);
}
}
// Caps
if(newbon->HPRegen > CalcHPRegenCap())
newbon->HPRegen = CalcHPRegenCap();
if(newbon->ManaRegen > CalcManaRegenCap())
newbon->ManaRegen = CalcManaRegenCap();
if(newbon->EnduranceRegen > CalcEnduranceRegenCap())
newbon->EnduranceRegen = CalcEnduranceRegenCap();
}
void Bot::AddItemBonuses(const EQ::ItemInstance *inst, StatBonuses* newbon, bool isAug, bool isTribute, int rec_override) {
if (!inst || !inst->IsClassCommon())
{
return;
}
if(inst->GetAugmentType()==0 && isAug == true)
{
return;
}
const EQ::ItemData *item = inst->GetItem();
if(!isTribute && !inst->IsEquipable(GetBaseRace(),GetClass()))
{
if (item->ItemType != EQ::item::ItemTypeFood && item->ItemType != EQ::item::ItemTypeDrink)
return;
}
if(GetLevel() < inst->GetItemRequiredLevel(true))
{
return;
}
auto rec_level = isAug ? rec_override : inst->GetItemRecommendedLevel(true);
if(GetLevel() >= rec_level)
{
newbon->AC += item->AC;
newbon->HP += item->HP;
newbon->Mana += item->Mana;
newbon->Endurance += item->Endur;
newbon->ATK += item->Attack;
newbon->STR += (item->AStr + item->HeroicStr);
newbon->STA += (item->ASta + item->HeroicSta);
newbon->DEX += (item->ADex + item->HeroicDex);
newbon->AGI += (item->AAgi + item->HeroicAgi);
newbon->INT += (item->AInt + item->HeroicInt);
newbon->WIS += (item->AWis + item->HeroicWis);
newbon->CHA += (item->ACha + item->HeroicCha);
newbon->MR += (item->MR + item->HeroicMR);
newbon->FR += (item->FR + item->HeroicFR);
newbon->CR += (item->CR + item->HeroicCR);
newbon->PR += (item->PR + item->HeroicPR);
newbon->DR += (item->DR + item->HeroicDR);
newbon->Corrup += (item->SVCorruption + item->HeroicSVCorrup);
newbon->STRCapMod += item->HeroicStr;
newbon->STACapMod += item->HeroicSta;
newbon->DEXCapMod += item->HeroicDex;
newbon->AGICapMod += item->HeroicAgi;
newbon->INTCapMod += item->HeroicInt;
newbon->WISCapMod += item->HeroicWis;
newbon->CHACapMod += item->HeroicCha;
newbon->MRCapMod += item->HeroicMR;
newbon->CRCapMod += item->HeroicFR;
newbon->FRCapMod += item->HeroicCR;
newbon->PRCapMod += item->HeroicPR;
newbon->DRCapMod += item->HeroicDR;
newbon->CorrupCapMod += item->HeroicSVCorrup;
newbon->HeroicSTR += item->HeroicStr;
newbon->HeroicSTA += item->HeroicSta;
newbon->HeroicDEX += item->HeroicDex;
newbon->HeroicAGI += item->HeroicAgi;
newbon->HeroicINT += item->HeroicInt;
newbon->HeroicWIS += item->HeroicWis;
newbon->HeroicCHA += item->HeroicCha;
newbon->HeroicMR += item->HeroicMR;
newbon->HeroicFR += item->HeroicFR;
newbon->HeroicCR += item->HeroicCR;
newbon->HeroicPR += item->HeroicPR;
newbon->HeroicDR += item->HeroicDR;
newbon->HeroicCorrup += item->HeroicSVCorrup;
}
else
{
int lvl = GetLevel();
newbon->AC += CalcRecommendedLevelBonus( lvl, rec_level, item->AC );
newbon->HP += CalcRecommendedLevelBonus( lvl, rec_level, item->HP );
newbon->Mana += CalcRecommendedLevelBonus( lvl, rec_level, item->Mana );
newbon->Endurance += CalcRecommendedLevelBonus( lvl, rec_level, item->Endur );
newbon->ATK += CalcRecommendedLevelBonus( lvl, rec_level, item->Attack );
newbon->STR += CalcRecommendedLevelBonus( lvl, rec_level, (item->AStr + item->HeroicStr) );
newbon->STA += CalcRecommendedLevelBonus( lvl, rec_level, (item->ASta + item->HeroicSta) );
newbon->DEX += CalcRecommendedLevelBonus( lvl, rec_level, (item->ADex + item->HeroicDex) );
newbon->AGI += CalcRecommendedLevelBonus( lvl, rec_level, (item->AAgi + item->HeroicAgi) );
newbon->INT += CalcRecommendedLevelBonus( lvl, rec_level, (item->AInt + item->HeroicInt) );
newbon->WIS += CalcRecommendedLevelBonus( lvl, rec_level, (item->AWis + item->HeroicWis) );
newbon->CHA += CalcRecommendedLevelBonus( lvl, rec_level, (item->ACha + item->HeroicCha) );
newbon->MR += CalcRecommendedLevelBonus( lvl, rec_level, (item->MR + item->HeroicMR) );
newbon->FR += CalcRecommendedLevelBonus( lvl, rec_level, (item->FR + item->HeroicFR) );
newbon->CR += CalcRecommendedLevelBonus( lvl, rec_level, (item->CR + item->HeroicCR) );
newbon->PR += CalcRecommendedLevelBonus( lvl, rec_level, (item->PR + item->HeroicPR) );
newbon->DR += CalcRecommendedLevelBonus( lvl, rec_level, (item->DR + item->HeroicDR) );
newbon->Corrup += CalcRecommendedLevelBonus( lvl, rec_level, (item->SVCorruption + item->HeroicSVCorrup) );
newbon->STRCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicStr );
newbon->STACapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicSta );
newbon->DEXCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicDex );
newbon->AGICapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicAgi );
newbon->INTCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicInt );
newbon->WISCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicWis );
newbon->CHACapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicCha );
newbon->MRCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicMR );
newbon->CRCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicFR );
newbon->FRCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicCR );
newbon->PRCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicPR );
newbon->DRCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicDR );
newbon->CorrupCapMod += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicSVCorrup );
newbon->HeroicSTR += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicStr );
newbon->HeroicSTA += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicSta );
newbon->HeroicDEX += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicDex );
newbon->HeroicAGI += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicAgi );
newbon->HeroicINT += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicInt );
newbon->HeroicWIS += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicWis );
newbon->HeroicCHA += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicCha );
newbon->HeroicMR += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicMR );
newbon->HeroicFR += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicFR );
newbon->HeroicCR += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicCR );
newbon->HeroicPR += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicPR );
newbon->HeroicDR += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicDR );
newbon->HeroicCorrup += CalcRecommendedLevelBonus( lvl, rec_level, item->HeroicSVCorrup );
}
//FatherNitwit: New style haste, shields, and regens
if(newbon->haste < (int32)item->Haste) {
newbon->haste = item->Haste;
}
if(item->Regen > 0)
newbon->HPRegen += item->Regen;
if(item->ManaRegen > 0)
newbon->ManaRegen += item->ManaRegen;
if(item->EnduranceRegen > 0)
newbon->EnduranceRegen += item->EnduranceRegen;
if(item->DamageShield > 0) {
if((newbon->DamageShield + item->DamageShield) > RuleI(Character, ItemDamageShieldCap))
newbon->DamageShield = RuleI(Character, ItemDamageShieldCap);
else
newbon->DamageShield += item->DamageShield;
}
if(item->SpellShield > 0) {
if((newbon->SpellShield + item->SpellShield) > RuleI(Character, ItemSpellShieldingCap))
newbon->SpellShield = RuleI(Character, ItemSpellShieldingCap);
else
newbon->SpellShield += item->SpellShield;
}
if(item->Shielding > 0) {
if((newbon->MeleeMitigation + item->Shielding) > RuleI(Character, ItemShieldingCap))
newbon->MeleeMitigation = RuleI(Character, ItemShieldingCap);
else
newbon->MeleeMitigation += item->Shielding;
}
if(item->StunResist > 0) {
if((newbon->StunResist + item->StunResist) > RuleI(Character, ItemStunResistCap))
newbon->StunResist = RuleI(Character, ItemStunResistCap);
else
newbon->StunResist += item->StunResist;
}
if(item->StrikeThrough > 0) {
if((newbon->StrikeThrough + item->StrikeThrough) > RuleI(Character, ItemStrikethroughCap))
newbon->StrikeThrough = RuleI(Character, ItemStrikethroughCap);
else
newbon->StrikeThrough += item->StrikeThrough;
}
if(item->Avoidance > 0) {
if((newbon->AvoidMeleeChance + item->Avoidance) > RuleI(Character, ItemAvoidanceCap))
newbon->AvoidMeleeChance = RuleI(Character, ItemAvoidanceCap);
else
newbon->AvoidMeleeChance += item->Avoidance;
}
if(item->Accuracy > 0) {
if((newbon->HitChance + item->Accuracy) > RuleI(Character, ItemAccuracyCap))
newbon->HitChance = RuleI(Character, ItemAccuracyCap);
else
newbon->HitChance += item->Accuracy;
}
if(item->CombatEffects > 0) {
if((newbon->ProcChance + item->CombatEffects) > RuleI(Character, ItemCombatEffectsCap))
newbon->ProcChance = RuleI(Character, ItemCombatEffectsCap);
else
newbon->ProcChance += item->CombatEffects;
}
if(item->DotShielding > 0) {
if((newbon->DoTShielding + item->DotShielding) > RuleI(Character, ItemDoTShieldingCap))
newbon->DoTShielding = RuleI(Character, ItemDoTShieldingCap);
else
newbon->DoTShielding += item->DotShielding;
}
if(item->HealAmt > 0) {
if((newbon->HealAmt + item->HealAmt) > RuleI(Character, ItemHealAmtCap))
newbon->HealAmt = RuleI(Character, ItemHealAmtCap);
else
newbon->HealAmt += item->HealAmt;
}
if(item->SpellDmg > 0) {
if((newbon->SpellDmg + item->SpellDmg) > RuleI(Character, ItemSpellDmgCap))
newbon->SpellDmg = RuleI(Character, ItemSpellDmgCap);
else
newbon->SpellDmg += item->SpellDmg;
}
if(item->Clairvoyance > 0) {
if((newbon->Clairvoyance + item->Clairvoyance) > RuleI(Character, ItemClairvoyanceCap))
newbon->Clairvoyance = RuleI(Character, ItemClairvoyanceCap);
else
newbon->Clairvoyance += item->Clairvoyance;
}
if(item->DSMitigation > 0) {
if((newbon->DSMitigation + item->DSMitigation) > RuleI(Character, ItemDSMitigationCap))
newbon->DSMitigation = RuleI(Character, ItemDSMitigationCap);
else
newbon->DSMitigation += item->DSMitigation;
}
if (item->Worn.Effect > 0 && item->Worn.Type == EQ::item::ItemEffectWorn) {// latent effects
ApplySpellsBonuses(item->Worn.Effect, item->Worn.Level, newbon, 0, item->Worn.Type);
}
if (item->Focus.Effect>0 && (item->Focus.Type == EQ::item::ItemEffectFocus)) { // focus effects
ApplySpellsBonuses(item->Focus.Effect, item->Focus.Level, newbon, 0);
}
switch(item->BardType)
{
case EQ::item::ItemTypeAllInstrumentTypes: // (e.g. Singing Short Sword)
{
if(item->BardValue > newbon->singingMod)
newbon->singingMod = item->BardValue;
if(item->BardValue > newbon->brassMod)
newbon->brassMod = item->BardValue;
if(item->BardValue > newbon->stringedMod)
newbon->stringedMod = item->BardValue;
if(item->BardValue > newbon->percussionMod)
newbon->percussionMod = item->BardValue;
if(item->BardValue > newbon->windMod)
newbon->windMod = item->BardValue;
break;
}
case EQ::item::ItemTypeSinging:
{
if(item->BardValue > newbon->singingMod)
newbon->singingMod = item->BardValue;
break;
}
case EQ::item::ItemTypeWindInstrument:
{
if(item->BardValue > newbon->windMod)
newbon->windMod = item->BardValue;
break;
}
case EQ::item::ItemTypeStringedInstrument:
{
if(item->BardValue > newbon->stringedMod)
newbon->stringedMod = item->BardValue;
break;
}
case EQ::item::ItemTypeBrassInstrument:
{
if(item->BardValue > newbon->brassMod)
newbon->brassMod = item->BardValue;
break;
}
case EQ::item::ItemTypePercussionInstrument:
{
if(item->BardValue > newbon->percussionMod)
newbon->percussionMod = item->BardValue;
break;
}
}
if (item->SkillModValue != 0 && item->SkillModType <= EQ::skills::HIGHEST_SKILL){
if ((item->SkillModValue > 0 && newbon->skillmod[item->SkillModType] < item->SkillModValue) ||
(item->SkillModValue < 0 && newbon->skillmod[item->SkillModType] > item->SkillModValue))
{
newbon->skillmod[item->SkillModType] = item->SkillModValue;
}
}
if (item->ExtraDmgSkill != 0 && item->ExtraDmgSkill <= EQ::skills::HIGHEST_SKILL) {
if((newbon->SkillDamageAmount[item->ExtraDmgSkill] + item->ExtraDmgAmt) > RuleI(Character, ItemExtraDmgCap))
newbon->SkillDamageAmount[item->ExtraDmgSkill] = RuleI(Character, ItemExtraDmgCap);
else
newbon->SkillDamageAmount[item->ExtraDmgSkill] += item->ExtraDmgAmt;
}
if (!isAug)
{
for (int i = EQ::invaug::SOCKET_BEGIN; i <= EQ::invaug::SOCKET_END; i++)
AddItemBonuses(inst->GetAugment(i),newbon,true, false, rec_level);
}
}
int Bot::CalcRecommendedLevelBonus(uint8 level, uint8 reclevel, int basestat)
{
if( (reclevel > 0) && (level < reclevel) )
{
int32 statmod = (level * 10000 / reclevel) * basestat;
if( statmod < 0 )
{
statmod -= 5000;
return (statmod/10000);
}
else
{
statmod += 5000;
return (statmod/10000);
}
}
return 0;
}
// This method is intended to call all necessary methods to do all bot stat calculations, including spell buffs, equipment, AA bonsues, etc.
void Bot::CalcBotStats(bool showtext) {
if(!GetBotOwner())
return;
if(showtext) {
GetBotOwner()->Message(Chat::Yellow, "Updating %s...", GetCleanName());
}
// this code is annoying since many classes change their name and illusions change the race id
/*if(!IsValidRaceClassCombo()) {
GetBotOwner()->Message(Chat::Yellow, "A %s - %s bot was detected. Is this Race/Class combination allowed?.", GetRaceIDName(GetRace()), GetClassIDName(GetClass(), GetLevel()));
GetBotOwner()->Message(Chat::Yellow, "Previous Bots Code releases did not check Race/Class combinations during create.");
GetBotOwner()->Message(Chat::Yellow, "Unless you are experiencing heavy lag, you should delete and remake this bot.");
}*/
if(GetBotOwner()->GetLevel() != GetLevel())
SetLevel(GetBotOwner()->GetLevel());
for (int sindex = 0; sindex <= EQ::skills::HIGHEST_SKILL; ++sindex) {
skills[sindex] = content_db.GetSkillCap(GetClass(), (EQ::skills::SkillType)sindex, GetLevel());
}
taunt_timer.Start(1000);
if (GetClass() == MONK && GetLevel() >= 10) {
monkattack_timer.Start(1000);
}
LoadAAs();
GenerateSpecialAttacks();
if(showtext) {
GetBotOwner()->Message(Chat::Yellow, "Base stats:");
GetBotOwner()->Message(Chat::Yellow, "Level: %i HP: %i AC: %i Mana: %i STR: %i STA: %i DEX: %i AGI: %i INT: %i WIS: %i CHA: %i", GetLevel(), base_hp, AC, max_mana, STR, STA, DEX, AGI, INT, WIS, CHA);
GetBotOwner()->Message(Chat::Yellow, "Resists-- Magic: %i, Poison: %i, Fire: %i, Cold: %i, Disease: %i, Corruption: %i.",MR,PR,FR,CR,DR,Corrup);
// Test Code
if(GetClass() == BARD)
GetBotOwner()->Message(Chat::Yellow, "Bard Skills-- Brass: %i, Percussion: %i, Singing: %i, Stringed: %i, Wind: %i",
GetSkill(EQ::skills::SkillBrassInstruments), GetSkill(EQ::skills::SkillPercussionInstruments), GetSkill(EQ::skills::SkillSinging), GetSkill(EQ::skills::SkillStringedInstruments), GetSkill(EQ::skills::SkillWindInstruments));
}
//if(this->Save())
// this->GetBotOwner()->CastToClient()->Message(Chat::White, "%s saved.", this->GetCleanName());
//else
// this->GetBotOwner()->CastToClient()->Message(Chat::Red, "%s save failed!", this->GetCleanName());
CalcBonuses();
AI_AddNPCSpells(this->GetBotSpellID());
if(showtext) {
GetBotOwner()->Message(Chat::Yellow, "%s has been updated.", GetCleanName());
GetBotOwner()->Message(Chat::Yellow, "Level: %i HP: %i AC: %i Mana: %i STR: %i STA: %i DEX: %i AGI: %i INT: %i WIS: %i CHA: %i", GetLevel(), max_hp, GetAC(), max_mana, GetSTR(), GetSTA(), GetDEX(), GetAGI(), GetINT(), GetWIS(), GetCHA());
GetBotOwner()->Message(Chat::Yellow, "Resists-- Magic: %i, Poison: %i, Fire: %i, Cold: %i, Disease: %i, Corruption: %i.",GetMR(),GetPR(),GetFR(),GetCR(),GetDR(),GetCorrup());
// Test Code
if(GetClass() == BARD) {
GetBotOwner()->Message(Chat::Yellow, "Bard Skills-- Brass: %i, Percussion: %i, Singing: %i, Stringed: %i, Wind: %i",
GetSkill(EQ::skills::SkillBrassInstruments) + GetBrassMod(),
GetSkill(EQ::skills::SkillPercussionInstruments) + GetPercMod(),
GetSkill(EQ::skills::SkillSinging) + GetSingMod(),
GetSkill(EQ::skills::SkillStringedInstruments) + GetStringMod(),
GetSkill(EQ::skills::SkillWindInstruments) + GetWindMod());
GetBotOwner()->Message(Chat::Yellow, "Bard Skill Mods-- Brass: %i, Percussion: %i, Singing: %i, Stringed: %i, Wind: %i", GetBrassMod(), GetPercMod(), GetSingMod(), GetStringMod(), GetWindMod());
}
}
}
bool Bot::CheckLoreConflict(const EQ::ItemData* item) {
if (!item || !(item->LoreFlag))
return false;
if (item->LoreGroup == -1) // Standard lore items; look everywhere except the shared bank, return the result
return (m_inv.HasItem(item->ID, 0, invWhereWorn) != INVALID_INDEX);
//If the item has a lore group, we check for other items with the same group and return the result
return (m_inv.HasItemByLoreGroup(item->LoreGroup, invWhereWorn) != INVALID_INDEX);
}
bool EntityList::Bot_AICheckCloseBeneficialSpells(Bot* caster, uint8 iChance, float iRange, uint32 iSpellTypes) {
if((iSpellTypes & SPELL_TYPES_DETRIMENTAL) != 0) {
LogError("Error: detrimental spells requested from AICheckCloseBeneficialSpells!!");
return false;
}
if(!caster || !caster->AI_HasSpells())
return false;
if (iChance < 100) {
uint8 tmp = zone->random.Int(1, 100);
if (tmp > iChance)
return false;
}
uint8 botCasterClass = caster->GetClass();
if( iSpellTypes == SpellType_Heal ) {
if( botCasterClass == CLERIC || botCasterClass == DRUID || botCasterClass == SHAMAN) {
if(caster->HasGroup()) {
Group *g = caster->GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i] && !g->members[i]->qglobal) {
if(g->members[i]->IsClient() && g->members[i]->GetHPRatio() < 90) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
} else if((g->members[i]->GetClass() == WARRIOR || g->members[i]->GetClass() == PALADIN || g->members[i]->GetClass() == SHADOWKNIGHT) && g->members[i]->GetHPRatio() < 95) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
} else if(g->members[i]->GetClass() == ENCHANTER && g->members[i]->GetHPRatio() < 80) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
} else if(g->members[i]->GetHPRatio() < 70) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
}
}
if(g->members[i] && !g->members[i]->qglobal && g->members[i]->HasPet() && g->members[i]->GetPet()->GetHPRatio() < 50) {
if(g->members[i]->GetPet()->GetOwner() != caster && caster->IsEngaged() && g->members[i]->IsCasting() && g->members[i]->GetClass() != ENCHANTER )
continue;
if(caster->AICastSpell(g->members[i]->GetPet(), 100, SpellType_Heal))
return true;
}
}
}
}
}
if( botCasterClass == PALADIN || botCasterClass == BEASTLORD || botCasterClass == RANGER) {
if(caster->HasGroup()) {
Group *g = caster->GetGroup();
float hpRatioToHeal = 25.0f;
switch(caster->GetBotStance()) {
case EQ::constants::stanceReactive:
case EQ::constants::stanceBalanced:
hpRatioToHeal = 50.0f;
break;
case EQ::constants::stanceBurn:
case EQ::constants::stanceBurnAE:
hpRatioToHeal = 20.0f;
break;
case EQ::constants::stanceAggressive:
case EQ::constants::stanceEfficient:
default:
hpRatioToHeal = 25.0f;
break;
}
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i] && !g->members[i]->qglobal) {
if(g->members[i]->IsClient() && g->members[i]->GetHPRatio() < hpRatioToHeal) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
} else if((g->members[i]->GetClass() == WARRIOR || g->members[i]->GetClass() == PALADIN || g->members[i]->GetClass() == SHADOWKNIGHT) && g->members[i]->GetHPRatio() < hpRatioToHeal) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
} else if(g->members[i]->GetClass() == ENCHANTER && g->members[i]->GetHPRatio() < hpRatioToHeal) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
} else if(g->members[i]->GetHPRatio() < hpRatioToHeal/2) {
if(caster->AICastSpell(g->members[i], 100, SpellType_Heal))
return true;
}
}
if(g->members[i] && !g->members[i]->qglobal && g->members[i]->HasPet() && g->members[i]->GetPet()->GetHPRatio() < 25) {
if(g->members[i]->GetPet()->GetOwner() != caster && caster->IsEngaged() && g->members[i]->IsCasting() && g->members[i]->GetClass() != ENCHANTER )
continue;
if(caster->AICastSpell(g->members[i]->GetPet(), 100, SpellType_Heal))
return true;
}
}
}
}
}
}
if( iSpellTypes == SpellType_Buff) {
uint8 chanceToCast = caster->IsEngaged() ? caster->GetChanceToCastBySpellType(SpellType_Buff) : 100;
if(botCasterClass == BARD) {
if(caster->AICastSpell(caster, chanceToCast, SpellType_Buff))
return true;
else
return false;
}
if(caster->HasGroup()) {
Group *g = caster->GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i]) {
if(caster->AICastSpell(g->members[i], chanceToCast, SpellType_Buff) || caster->AICastSpell(g->members[i]->GetPet(), chanceToCast, SpellType_Buff))
return true;
}
}
}
}
}
if( iSpellTypes == SpellType_Cure) {
if(caster->HasGroup()) {
Group *g = caster->GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i] && caster->GetNeedsCured(g->members[i])) {
if(caster->AICastSpell(g->members[i], caster->GetChanceToCastBySpellType(SpellType_Cure), SpellType_Cure))
return true;
else if(botCasterClass == BARD)
return false;
}
if(g->members[i] && g->members[i]->GetPet() && caster->GetNeedsCured(g->members[i]->GetPet())) {
if(caster->AICastSpell(g->members[i]->GetPet(), (int)caster->GetChanceToCastBySpellType(SpellType_Cure)/4, SpellType_Cure))
return true;
}
}
}
}
}
if (iSpellTypes == SpellType_HateRedux) {
if (!caster->IsEngaged())
return false;
if (caster->HasGroup()) {
Group *g = caster->GetGroup();
if (g) {
for (int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if (g->members[i] && caster->GetNeedsHateRedux(g->members[i])) {
if (caster->AICastSpell(g->members[i], caster->GetChanceToCastBySpellType(SpellType_HateRedux), SpellType_HateRedux))
return true;
}
}
}
}
}
if (iSpellTypes == SpellType_PreCombatBuff) {
if (botCasterClass == BARD || caster->IsEngaged())
return false;
if (caster->HasGroup()) {
Group *g = caster->GetGroup();
if (g) {
for (int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if (g->members[i]) {
if (caster->AICastSpell(g->members[i], iChance, SpellType_PreCombatBuff) || caster->AICastSpell(g->members[i]->GetPet(), iChance, SpellType_PreCombatBuff))
return true;
}
}
}
}
}
return false;
}
Mob* EntityList::GetMobByBotID(uint32 botID) {
Mob* Result = nullptr;
if(botID > 0) {
auto it = mob_list.begin();
for (auto it = mob_list.begin(); it != mob_list.end(); ++it) {
if(!it->second)
continue;
if(it->second->IsBot() && it->second->CastToBot()->GetBotID() == botID) {
Result = it->second;
break;
}
}
}
return Result;
}
Bot* EntityList::GetBotByBotID(uint32 botID) {
Bot* Result = nullptr;
if(botID > 0) {
for(std::list<Bot*>::iterator botListItr = bot_list.begin(); botListItr != bot_list.end(); ++botListItr) {
Bot* tempBot = *botListItr;
if(tempBot && tempBot->GetBotID() == botID) {
Result = tempBot;
break;
}
}
}
return Result;
}
Bot* EntityList::GetBotByBotName(std::string botName) {
Bot* Result = nullptr;
if(!botName.empty()) {
for(std::list<Bot*>::iterator botListItr = bot_list.begin(); botListItr != bot_list.end(); ++botListItr) {
Bot* tempBot = *botListItr;
if(tempBot && std::string(tempBot->GetName()) == botName) {
Result = tempBot;
break;
}
}
}
return Result;
}
Client* EntityList::GetBotOwnerByBotEntityID(uint16 entityID) {
Client* Result = nullptr;
if (entityID > 0) {
for (std::list<Bot*>::iterator botListItr = bot_list.begin(); botListItr != bot_list.end(); ++botListItr) {
Bot* tempBot = *botListItr;
if (tempBot && tempBot->GetID() == entityID) {
Result = tempBot->GetBotOwner()->CastToClient();
break;
}
}
}
return Result;
}
void EntityList::AddBot(Bot *newBot, bool SendSpawnPacket, bool dontqueue) {
if(newBot) {
newBot->SetID(GetFreeID());
newBot->SetSpawned();
if(SendSpawnPacket) {
if(dontqueue) {
EQApplicationPacket* outapp = new EQApplicationPacket();
newBot->CreateSpawnPacket(outapp);
outapp->priority = 6;
QueueClients(newBot, outapp, true);
safe_delete(outapp);
} else {
NewSpawn_Struct* ns = new NewSpawn_Struct;
memset(ns, 0, sizeof(NewSpawn_Struct));
newBot->FillSpawnStruct(ns, newBot);
AddToSpawnQueue(newBot->GetID(), &ns);
safe_delete(ns);
}
parse->EventNPC(EVENT_SPAWN, newBot, nullptr, "", 0);
}
bot_list.push_back(newBot);
mob_list.insert(std::pair<uint16, Mob*>(newBot->GetID(), newBot));
}
}
std::list<Bot*> EntityList::GetBotsByBotOwnerCharacterID(uint32 botOwnerCharacterID) {
std::list<Bot*> Result;
if(botOwnerCharacterID > 0) {
for(std::list<Bot*>::iterator botListItr = bot_list.begin(); botListItr != bot_list.end(); ++botListItr) {
Bot* tempBot = *botListItr;
if(tempBot && tempBot->GetBotOwnerCharacterID() == botOwnerCharacterID)
Result.push_back(tempBot);
}
}
return Result;
}
bool EntityList::RemoveBot(uint16 entityID) {
bool Result = false;
if(entityID > 0) {
for(std::list<Bot*>::iterator botListItr = bot_list.begin(); botListItr != bot_list.end(); ++botListItr) {
Bot* tempBot = *botListItr;
if(tempBot && tempBot->GetID() == entityID) {
bot_list.erase(botListItr);
Result = true;
break;
}
}
}
return Result;
}
void EntityList::ShowSpawnWindow(Client* client, int Distance, bool NamedOnly) {
const char *WindowTitle = "Bot Tracking Window";
std::string WindowText;
int LastCon = -1;
int CurrentCon = 0;
Mob* curMob = nullptr;
uint32 array_counter = 0;
auto it = mob_list.begin();
for (auto it = mob_list.begin(); it != mob_list.end(); ++it) {
curMob = it->second;
if (curMob && DistanceNoZ(curMob->GetPosition(), client->GetPosition()) <= Distance) {
if(curMob->IsTrackable()) {
Mob* cur_entity = curMob;
int Extras = (cur_entity->IsBot() || cur_entity->IsPet() || cur_entity->IsFamiliar() || cur_entity->IsClient());
const char *const MyArray[] = {
"a_","an_","Innkeep_","Barkeep_",
"Guard_","Merchant_","Lieutenant_",
"Banker_","Centaur_","Aviak_","Baker_",
"Sir_","Armorer_","Deathfist_","Deputy_",
"Sentry_","Sentinel_","Leatherfoot_",
"Corporal_","goblin_","Bouncer_","Captain_",
"orc_","fire_","inferno_","young_","cinder_",
"flame_","gnomish_","CWG_","sonic_","greater_",
"ice_","dry_","Priest_","dark-boned_",
"Tentacle_","Basher_","Dar_","Greenblood_",
"clockwork_","guide_","rogue_","minotaur_",
"brownie_","Teir'","dark_","tormented_",
"mortuary_","lesser_","giant_","infected_",
"wharf_","Apprentice_","Scout_","Recruit_",
"Spiritist_","Pit_","Royal_","scalebone_",
"carrion_","Crusader_","Trooper_","hunter_",
"decaying_","iksar_","klok_","templar_","lord_",
"froglok_","war_","large_","charbone_","icebone_",
"Vicar_","Cavalier_","Heretic_","Reaver_","venomous_",
"Sheildbearer_","pond_","mountain_","plaguebone_","Brother_",
"great_","strathbone_","briarweb_","strathbone_","skeletal_",
"minion_","spectral_","myconid_","spurbone_","sabretooth_",
"Tin_","Iron_","Erollisi_","Petrifier_","Burynai_",
"undead_","decayed_","You_","smoldering_","gyrating_",
"lumpy_","Marshal_","Sheriff_","Chief_","Risen_",
"lascar_","tribal_","fungi_","Xi_","Legionnaire_",
"Centurion_","Zun_","Diabo_","Scribe_","Defender_","Capt_",
"blazing_","Solusek_","imp_","hexbone_","elementalbone_",
"stone_","lava_","_",""
};
unsigned int MyArraySize;
for ( MyArraySize = 0; true; MyArraySize++) {
if (!(*(MyArray[MyArraySize])))
break;
};
if (NamedOnly) {
bool ContinueFlag = false;
const char *CurEntityName = cur_entity->GetName();
for (int Index = 0; Index < MyArraySize; Index++) {
if (!strncasecmp(CurEntityName, MyArray[Index], strlen(MyArray[Index])) || (Extras)) {
ContinueFlag = true;
break;
};
};
if (ContinueFlag)
continue;
};
CurrentCon = client->GetLevelCon(cur_entity->GetLevel());
if(CurrentCon != LastCon) {
if(LastCon != -1)
WindowText += "</c>";
LastCon = CurrentCon;
switch(CurrentCon) {
case CON_GREEN: {
WindowText += "<c \"#00FF00\">";
break;
}
case CON_LIGHTBLUE: {
WindowText += "<c \"#8080FF\">";
break;
}
case CON_BLUE: {
WindowText += "<c \"#2020FF\">";
break;
}
case CON_YELLOW: {
WindowText += "<c \"#FFFF00\">";
break;
}
case CON_RED: {
WindowText += "<c \"#FF0000\">";
break;
}
default: {
WindowText += "<c \"#FFFFFF\">";
break;
}
}
}
WindowText += cur_entity->GetCleanName();
WindowText += "<br>";
if(strlen(WindowText.c_str()) > 4000) {
WindowText += "</c><br><br>List truncated... too many mobs to display";
break;
}
}
}
}
WindowText += "</c>";
client->SendPopupToClient(WindowTitle, WindowText.c_str());
return;
}
/**
* @param close_mobs
* @param scanning_mob
*/
void EntityList::ScanCloseClientMobs(std::unordered_map<uint16, Mob*>& close_mobs, Mob* scanning_mob)
{
float scan_range = RuleI(Range, MobCloseScanDistance) * RuleI(Range, MobCloseScanDistance);
close_mobs.clear();
for (auto& e : mob_list) {
auto mob = e.second;
if (!mob->IsClient()) {
continue;
}
if (mob->GetID() <= 0) {
continue;
}
float distance = DistanceSquared(scanning_mob->GetPosition(), mob->GetPosition());
if (distance <= scan_range) {
close_mobs.insert(std::pair<uint16, Mob*>(mob->GetID(), mob));
}
else if (mob->GetAggroRange() >= scan_range) {
close_mobs.insert(std::pair<uint16, Mob*>(mob->GetID(), mob));
}
}
LogAIScanClose("Close Client Mob List Size [{}] for mob [{}]", close_mobs.size(), scanning_mob->GetCleanName());
}
uint8 Bot::GetNumberNeedingHealedInGroup(uint8 hpr, bool includePets) {
uint8 needHealed = 0;
Group *g = nullptr;
if(this->HasGroup()) {
g = this->GetGroup();
if(g) {
for(int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if(g->members[i] && !g->members[i]->qglobal) {
if(g->members[i]->GetHPRatio() <= hpr)
needHealed++;
if(includePets) {
if(g->members[i]->GetPet() && g->members[i]->GetPet()->GetHPRatio() <= hpr)
needHealed++;
}
}
}
}
}
return needHealed;
}
int Bot::GetRawACNoShield(int &shield_ac) {
int ac = itembonuses.AC + spellbonuses.AC;
shield_ac = 0;
EQ::ItemInstance* inst = GetBotItem(EQ::invslot::slotSecondary);
if(inst) {
if (inst->GetItem()->ItemType == EQ::item::ItemTypeShield) {
ac -= inst->GetItem()->AC;
shield_ac = inst->GetItem()->AC;
for (uint8 i = EQ::invaug::SOCKET_BEGIN; i <= EQ::invaug::SOCKET_END; i++) {
if(inst->GetAugment(i)) {
ac -= inst->GetAugment(i)->GetItem()->AC;
shield_ac += inst->GetAugment(i)->GetItem()->AC;
}
}
}
}
return ac;
}
uint32 Bot::CalcCurrentWeight() {
const EQ::ItemData* TempItem = nullptr;
EQ::ItemInstance* inst = nullptr;
uint32 Total = 0;
for (int i = EQ::invslot::EQUIPMENT_BEGIN; i <= EQ::invslot::EQUIPMENT_END; ++i) {
inst = GetBotItem(i);
if(inst) {
TempItem = inst->GetItem();
if (TempItem)
Total += TempItem->Weight;
}
}
float Packrat = ((float)spellbonuses.Packrat + (float)aabonuses.Packrat);
if (Packrat > 0)
Total = (uint32)((float)Total * (1.0f - ((Packrat * 1.0f) / 100.0f)));
return Total;
}
int Bot::GroupLeadershipAAHealthEnhancement() {
Group *g = GetGroup();
if(!g || (g->GroupCount() < 3))
return 0;
switch(g->GetLeadershipAA(groupAAHealthEnhancement)) {
case 0:
return 0;
case 1:
return 30;
case 2:
return 60;
case 3:
return 100;
}
return 0;
}
int Bot::GroupLeadershipAAManaEnhancement() {
Group *g = GetGroup();
if(!g || (g->GroupCount() < 3))
return 0;
switch(g->GetLeadershipAA(groupAAManaEnhancement)) {
case 0:
return 0;
case 1:
return 30;
case 2:
return 60;
case 3:
return 100;
}
return 0;
}
int Bot::GroupLeadershipAAHealthRegeneration() {
Group *g = GetGroup();
if(!g || (g->GroupCount() < 3))
return 0;
switch(g->GetLeadershipAA(groupAAHealthRegeneration)) {
case 0:
return 0;
case 1:
return 4;
case 2:
return 6;
case 3:
return 8;
}
return 0;
}
int Bot::GroupLeadershipAAOffenseEnhancement() {
Group *g = GetGroup();
if(!g || (g->GroupCount() < 3))
return 0;
switch(g->GetLeadershipAA(groupAAOffenseEnhancement)) {
case 0:
return 0;
case 1:
return 10;
case 2:
return 19;
case 3:
return 28;
case 4:
return 34;
case 5:
return 40;
}
return 0;
}
bool Bot::GetNeedsCured(Mob *tar) {
bool needCured = false;
if(tar) {
if(tar->FindType(SE_PoisonCounter) || tar->FindType(SE_DiseaseCounter) || tar->FindType(SE_CurseCounter) || tar->FindType(SE_CorruptionCounter)) {
uint32 buff_count = tar->GetMaxTotalSlots();
int buffsWithCounters = 0;
needCured = true;
for (unsigned int j = 0; j < buff_count; j++) {
if(tar->GetBuffs()[j].spellid != SPELL_UNKNOWN) {
if(CalculateCounters(tar->GetBuffs()[j].spellid) > 0) {
buffsWithCounters++;
if(buffsWithCounters == 1 && (tar->GetBuffs()[j].ticsremaining < 2 || (int32)((tar->GetBuffs()[j].ticsremaining * 6) / tar->GetBuffs()[j].counters) < 2)) {
needCured = false;
break;
}
}
}
}
}
}
return needCured;
}
bool Bot::GetNeedsHateRedux(Mob *tar) {
// This really should be a scalar function based in class Mob that returns 'this' state..but, is inline with current Bot coding...
// TODO: Good starting point..but, can be refined..
// TODO: Still awaiting bot spell rework..
if (!tar || !tar->IsEngaged() || !tar->HasTargetReflection() || !tar->GetTarget()->IsNPC())
return false;
//if (tar->IsClient()) {
// switch (tar->GetClass()) {
// // TODO: figure out affectable classes..
// // Might need flag to allow player to determine redux req...
// default:
// return false;
// }
//}
//else if (tar->IsBot()) {
if (tar->IsBot()) {
switch (tar->GetClass()) {
case ROGUE:
if (tar->CanFacestab() || tar->CastToBot()->m_evade_timer.Check(false))
return false;
case CLERIC:
case DRUID:
case SHAMAN:
case NECROMANCER:
case WIZARD:
case MAGICIAN:
case ENCHANTER:
return true;
default:
return false;
}
}
return false;
}
bool Bot::HasOrMayGetAggro() {
bool mayGetAggro = false;
if(GetTarget() && GetTarget()->GetHateTop()) {
Mob *topHate = GetTarget()->GetHateTop();
if(topHate == this)
mayGetAggro = true;
else {
uint32 myHateAmt = GetTarget()->GetHateAmount(this);
uint32 topHateAmt = GetTarget()->GetHateAmount(topHate);
if(myHateAmt > 0 && topHateAmt > 0 && (uint8)((myHateAmt / topHateAmt) * 100) > 90)
mayGetAggro = true;
}
}
return mayGetAggro;
}
void Bot::SetDefaultBotStance() {
EQ::constants::StanceType defaultStance = EQ::constants::stanceBalanced;
if (GetClass() == WARRIOR)
defaultStance = EQ::constants::stanceAggressive;
_baseBotStance = EQ::constants::stancePassive;
_botStance = defaultStance;
}
void Bot::BotGroupSay(Mob *speaker, const char *msg, ...) {
char buf[1000];
va_list ap;
va_start(ap, msg);
vsnprintf(buf, 1000, msg, ap);
va_end(ap);
if(speaker->HasGroup()) {
Group *g = speaker->GetGroup();
if(g)
g->GroupMessage(speaker->CastToMob(), 0, 100, buf);
} else
speaker->Say("%s", buf);
}
bool Bot::UseDiscipline(uint32 spell_id, uint32 target) {
if(!IsValidSpell(spell_id)) {
BotGroupSay(this, "Not a valid spell.");
return false;
}
const SPDat_Spell_Struct &spell = spells[spell_id];
uint8 level_to_use = spell.classes[GetClass() - 1];
if(level_to_use == 255 || level_to_use > GetLevel()) {
return false;
}
if(GetEndurance() > spell.endurance_cost)
SetEndurance(GetEndurance() - spell.endurance_cost);
else
return false;
if(spell.recast_time > 0) {
if(CheckDisciplineRecastTimers(this, spells[spell_id].timer_id)) {
if(spells[spell_id].timer_id > 0 && spells[spell_id].timer_id < MAX_DISCIPLINE_TIMERS)
SetDisciplineRecastTimer(spells[spell_id].timer_id, spell.recast_time);
} else {
uint32 remain = (GetDisciplineRemainingTime(this, spells[spell_id].timer_id) / 1000);
GetOwner()->Message(Chat::White, "%s can use this discipline in %d minutes %d seconds.", GetCleanName(), (remain / 60), (remain % 60));
return false;
}
}
if(IsCasting())
InterruptSpell();
CastSpell(spell_id, target, EQ::spells::CastingSlot::Discipline);
return true;
}
// new healrotation code
bool Bot::CreateHealRotation(uint32 interval_ms, bool fast_heals, bool adaptive_targeting, bool casting_override)
{
if (IsHealRotationMember())
return false;
if (!IsHealRotationMemberClass(GetClass()))
return false;
m_member_of_heal_rotation = std::make_shared<HealRotation>(this, interval_ms, fast_heals, adaptive_targeting, casting_override);
return IsHealRotationMember();
}
bool Bot::DestroyHealRotation()
{
if (!IsHealRotationMember())
return true;
m_member_of_heal_rotation->ClearTargetPool();
m_member_of_heal_rotation->ClearMemberPool();
return !IsHealRotationMember();
}
bool Bot::JoinHealRotationMemberPool(std::shared_ptr<HealRotation>* heal_rotation)
{
if (IsHealRotationMember())
return false;
if (!heal_rotation->use_count())
return false;
if (!(*heal_rotation))
return false;
if (!IsHealRotationMemberClass(GetClass()))
return false;
if (!(*heal_rotation)->AddMemberToPool(this))
return false;
m_member_of_heal_rotation = *heal_rotation;
return true;
}
bool Bot::LeaveHealRotationMemberPool()
{
if (!IsHealRotationMember()) {
m_member_of_heal_rotation.reset();
return true;
}
m_member_of_heal_rotation->RemoveMemberFromPool(this);
m_member_of_heal_rotation.reset();
return !IsHealRotationMember();
}
bool Bot::UseHealRotationFastHeals()
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->FastHeals();
}
bool Bot::UseHealRotationAdaptiveTargeting()
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->AdaptiveTargeting();
}
bool Bot::IsHealRotationActive()
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->IsActive();
}
bool Bot::IsHealRotationReady()
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->CastingReady();
}
bool Bot::IsHealRotationCaster()
{
if (!IsHealRotationMember())
return false;
return (m_member_of_heal_rotation->CastingMember() == this);
}
bool Bot::HealRotationPokeTarget()
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->PokeCastingTarget();
}
Mob* Bot::HealRotationTarget()
{
if (!IsHealRotationMember())
return nullptr;
return m_member_of_heal_rotation->CastingTarget();
}
bool Bot::AdvanceHealRotation(bool use_interval)
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->AdvanceRotation(use_interval);
}
bool Bot::IsMyHealRotationSet()
{
if (!IsHealRotationMember())
return false;
if (!m_member_of_heal_rotation->IsActive() && !m_member_of_heal_rotation->IsHOTActive())
return false;
if (!m_member_of_heal_rotation->CastingReady())
return false;
if (m_member_of_heal_rotation->CastingMember() != this)
return false;
if (m_member_of_heal_rotation->MemberIsCasting(this))
return false;
if (!m_member_of_heal_rotation->PokeCastingTarget())
return false;
return true;
}
bool Bot::AmICastingForHealRotation()
{
if (!IsHealRotationMember())
return false;
return m_member_of_heal_rotation->MemberIsCasting(this);
}
void Bot::SetMyCastingForHealRotation(bool flag)
{
if (!IsHealRotationMember())
return;
m_member_of_heal_rotation->SetMemberIsCasting(this, flag);
}
bool Bot::DyeArmor(int16 slot_id, uint32 rgb, bool all_flag, bool save_flag)
{
if (all_flag) {
if (slot_id != INVALID_INDEX)
return false;
for (uint8 i = EQ::textures::textureBegin; i < EQ::textures::weaponPrimary; ++i) {
uint8 inv_slot = EQ::InventoryProfile::CalcSlotFromMaterial(i);
EQ::ItemInstance* inst = m_inv.GetItem(inv_slot);
if (!inst)
continue;
inst->SetColor(rgb);
SendWearChange(i);
}
}
else {
uint8 mat_slot = EQ::InventoryProfile::CalcMaterialFromSlot(slot_id);
if (mat_slot == EQ::textures::materialInvalid || mat_slot >= EQ::textures::weaponPrimary)
return false;
EQ::ItemInstance* inst = m_inv.GetItem(slot_id);
if (!inst)
return false;
inst->SetColor(rgb);
SendWearChange(mat_slot);
}
if (save_flag) {
int16 save_slot = slot_id;
if (all_flag)
save_slot = -2;
if (!database.botdb.SaveEquipmentColor(GetBotID(), save_slot, rgb)) {
if (GetBotOwner() && GetBotOwner()->IsClient())
GetBotOwner()->CastToClient()->Message(Chat::Red, "%s", BotDatabase::fail::SaveEquipmentColor());
return false;
}
}
return true;
}
std::string Bot::CreateSayLink(Client* c, const char* message, const char* name)
{
// TODO: review
int saylink_size = strlen(message);
char* escaped_string = new char[saylink_size * 2];
database.DoEscapeString(escaped_string, message, saylink_size);
uint32 saylink_id = database.LoadSaylinkID(escaped_string);
safe_delete_array(escaped_string);
EQ::SayLinkEngine linker;
linker.SetLinkType(EQ::saylink::SayLinkItemData);
linker.SetProxyItemID(SAYLINK_ITEM_ID);
linker.SetProxyAugment1ID(saylink_id);
linker.SetProxyText(name);
auto saylink = linker.GenerateLink();
return saylink;
}
void Bot::StopMoving()
{
//SetCombatJitterFlag(false);
//m_combat_jitter_timer.Start(zone->random.Int(BOT_COMBAT_JITTER_INTERVAL_MIN, BOT_COMBAT_JITTER_INTERVAL_MAX));
Mob::StopMoving();
}
void Bot::StopMoving(float new_heading)
{
//SetCombatJitterFlag(false);
//m_combat_jitter_timer.Start(zone->random.Int(BOT_COMBAT_JITTER_INTERVAL_MIN, BOT_COMBAT_JITTER_INTERVAL_MAX));
Mob::StopMoving(new_heading);
}
uint8 Bot::spell_casting_chances[SPELL_TYPE_COUNT][PLAYER_CLASS_COUNT][EQ::constants::STANCE_TYPE_COUNT][cntHSND] = { 0 };
#endif
| 1 | 11,033 | discipline vs. Discipline? Not sure of which is correct. Also not sure if there is already an existing string const. | EQEmu-Server | cpp |
@@ -135,6 +135,7 @@ public abstract class ResourceDescriptorConfig {
.setEntityId(nameMap.get(p).toUpperCamel())
.setEntityName(overrideConfig.getEntityName())
.setCommonResourceName(overrideConfig.getCommonResourceName())
+ .setAssignedProtoFile(protoFile)
.build();
} else {
return SingleResourceNameConfig.newBuilder() | 1 | /* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.ResourceDescriptor;
import com.google.api.codegen.util.Name;
import com.google.api.pathtemplate.PathTemplate;
import com.google.api.pathtemplate.ValidationException;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Class that represents a google.api.ResourceDescriptor annotation, and is used to construct
* ResourceNameConfig objects.
*/
@AutoValue
public abstract class ResourceDescriptorConfig {
/** The unified resource type, taken from the annotation. */
public abstract String getUnifiedResourceType();
/** List of resource patterns, taken from the annotation. */
public abstract ImmutableList<String> getPatterns();
/** The name field taken from the annotation. */
public abstract String getNameField();
/** The history field taken from the annotation. */
public abstract ResourceDescriptor.History getHistory();
/**
* Boolean for whether this resource should be represented in client libraries by a Oneof object.
*/
public abstract boolean getRequiresOneofConfig();
/**
* Pattern for a single resource that will be treated differently for the purposes of entity
* naming. This pattern will also exist in getPatterns. If there is no single resource, will be
* "".
*/
public abstract String getSinglePattern();
/**
* Returns the proto file to which the resource name config has been assigned. This is required to
* ensure that a consistent namespace can be calculated for the resource name.
*/
public abstract ProtoFile getAssignedProtoFile();
/** The entity name for the resource config. */
public abstract String getDerivedEntityName();
public static ResourceDescriptorConfig from(
ResourceDescriptor descriptor, ProtoFile assignedProtoFile) {
// The logic for requiresOneofConfig and requiresSinglePattern is finicky, so let's lay out
// the desired result for all possible combinations of History and number of patterns:
// (history, patterns) -> (requiresOneofConfig, requiresSinglePattern)
//
// (HISTORY_UNSPECIFIED, 1) -> (false, true)
// (HISTORY_UNSPECIFIED, 2+) -> (true, false)
// (ORIGINALLY_SINGLE_PATTERN, 1) -> (false, true) !!! WARNING, very odd
// (ORIGINALLY_SINGLE_PATTERN, 2+) -> (true, true)
// (FUTURE_MULTI_PATTERN, 1) -> (true, false)
// (FUTURE_MULTI_PATTERN, 2+) -> (true, false) !!! WARNING, very odd
boolean requiresOneofConfig =
descriptor.getHistory() == ResourceDescriptor.History.FUTURE_MULTI_PATTERN
|| descriptor.getPatternList().size() > 1;
boolean requiresSinglePattern =
descriptor.getHistory() == ResourceDescriptor.History.ORIGINALLY_SINGLE_PATTERN
|| (descriptor.getHistory() == ResourceDescriptor.History.HISTORY_UNSPECIFIED
&& descriptor.getPatternList().size() == 1);
String unqualifiedTypeName = getUnqualifiedTypeName(descriptor.getType());
return new AutoValue_ResourceDescriptorConfig(
descriptor.getType(),
ImmutableList.copyOf(descriptor.getPatternList()),
descriptor.getNameField(),
descriptor.getHistory(),
requiresOneofConfig,
requiresSinglePattern ? descriptor.getPattern(0) : "",
assignedProtoFile,
requiresOneofConfig ? (unqualifiedTypeName + "Oneof") : unqualifiedTypeName);
}
private static String getUnqualifiedTypeName(String typeName) {
return typeName.substring(typeName.lastIndexOf("/") + 1);
}
private String getUnqualifiedTypeName() {
return getUnqualifiedTypeName(getUnifiedResourceType());
}
private static ArrayList<ResourceNameConfig> buildSingleResourceNameConfigs(
DiagCollector diagCollector,
List<String> patterns,
Map<String, Name> nameMap,
ProtoFile protoFile,
Map<String, SingleResourceNameConfig> configOverrides) {
try {
return patterns
.stream()
.map(
(String p) -> {
String gapicConfigEntityId = nameMap.get(p).toLowerUnderscore();
if (configOverrides.containsKey(gapicConfigEntityId)) {
SingleResourceNameConfig overrideConfig =
configOverrides.get(gapicConfigEntityId);
return SingleResourceNameConfig.newBuilder()
.setNamePattern(p)
.setNameTemplate(PathTemplate.create(p))
.setEntityId(nameMap.get(p).toUpperCamel())
.setEntityName(overrideConfig.getEntityName())
.setCommonResourceName(overrideConfig.getCommonResourceName())
.build();
} else {
return SingleResourceNameConfig.newBuilder()
.setNamePattern(p)
.setNameTemplate(PathTemplate.create(p))
.setEntityId(nameMap.get(p).toUpperCamel())
.setEntityName(nameMap.get(p))
.build();
}
})
.collect(Collectors.toCollection(ArrayList::new));
} catch (ValidationException e) {
// Catch exception that may be thrown by PathTemplate.create
diagCollector.addDiag(Diag.error(SimpleLocation.TOPLEVEL, e.getMessage()));
return new ArrayList<>();
}
}
/** Package-private for use in GapicProductConfig. */
List<ResourceNameConfig> buildResourceNameConfigs(
DiagCollector diagCollector, Map<String, SingleResourceNameConfig> configOverrides) {
Name unqualifiedTypeName = Name.anyCamel(getUnqualifiedTypeName());
HashMap<String, Name> entityNameMap = buildEntityNameMap(getPatterns(), unqualifiedTypeName);
for (String key : entityNameMap.keySet()) {
if (key.equals(getSinglePattern())) {
entityNameMap.put(key, unqualifiedTypeName);
}
}
ArrayList<ResourceNameConfig> resourceNameConfigs =
buildSingleResourceNameConfigs(
diagCollector, getPatterns(), entityNameMap, getAssignedProtoFile(), configOverrides);
if (getRequiresOneofConfig()) {
String oneofId = getUnqualifiedTypeName() + "Oneof";
resourceNameConfigs.add(
new AutoValue_ResourceNameOneofConfig(
oneofId,
Name.anyCamel(oneofId),
ImmutableList.copyOf(resourceNameConfigs),
getAssignedProtoFile()));
}
return resourceNameConfigs;
}
/** Package-private for use in GapicProductConfig. */
List<ResourceNameConfig> buildParentResourceNameConfigs(
DiagCollector diagCollector, Map<String, SingleResourceNameConfig> configOverrides) {
List<String> parentPatterns = getParentPatterns();
HashMap<String, Name> entityNameMap = buildEntityNameMap(parentPatterns, Name.from(""));
ArrayList<ResourceNameConfig> resourceNameConfigs =
buildSingleResourceNameConfigs(
diagCollector, parentPatterns, entityNameMap, getAssignedProtoFile(), configOverrides);
if (parentPatterns.size() > 1) {
String oneofId = "ParentOneof";
resourceNameConfigs.add(
new AutoValue_ResourceNameOneofConfig(
oneofId,
Name.anyCamel(oneofId),
ImmutableList.copyOf(resourceNameConfigs),
getAssignedProtoFile()));
}
return resourceNameConfigs;
}
/** Package-private for use in ResourceNameMessageConfigs. */
String getDerivedParentEntityName() {
List<String> parentPatterns = getParentPatterns();
if (parentPatterns.size() == 0) {
throw new IllegalArgumentException(
String.format(
"Unexpected error - size of getParentPatterns is zero. patterns: [%s]",
String.join(", ", getPatterns())));
}
if (parentPatterns.size() > 1) {
return "ParentOneof";
} else {
List<String> segments = getSegments(parentPatterns.get(0));
if (segments.size() == 0) {
throw new IllegalArgumentException(
String.format(
"Unexpected error - size of segments is zero. pattern: %s", parentPatterns.get(0)));
}
String lastSegment = segments.get(segments.size() - 1);
if (isVariableBinding(lastSegment)) {
return Name.from(unwrapVariableSegment(lastSegment)).toUpperCamel();
} else {
return Name.anyCamel(lastSegment).toUpperCamel();
}
}
}
/** Package-private for use in ResourceNameMessageConfigs. */
List<String> getParentPatterns() {
return getPatterns()
.stream()
.map(ResourceDescriptorConfig::getParentPattern)
.distinct()
.collect(Collectors.toList());
}
@VisibleForTesting
static String getParentPattern(String pattern) {
List<String> segments = getSegments(pattern);
int index = segments.size() - 2;
while (index >= 0 && !isVariableBinding(segments.get(index))) {
index--;
}
index++;
if (index <= 0) {
return "";
}
return String.join("/", segments.subList(0, index));
}
private static List<String> getSegments(String pattern) {
return ImmutableList.copyOf(pattern.split("/"));
}
private static boolean isVariableBinding(String segment) {
return segment.startsWith("{") && segment.endsWith("}");
}
private static String unwrapVariableSegment(String segment) {
return segment.substring(1, segment.length() - 1);
}
/**
* Builds a map from patterns to unique entity names. Uses a trie structure to determine the
* shortest unique name that can be used.
*/
@VisibleForTesting
static HashMap<String, Name> buildEntityNameMap(List<String> patterns, Name suffix) {
TrieNode trie = new TrieNode();
Map<String, List<String>> patternsToSegmentsMap =
patterns
.stream()
.collect(
Collectors.toMap(
Function.identity(),
(String p) ->
Lists.reverse(
getSegments(p)
.stream()
.filter(ResourceDescriptorConfig::isVariableBinding)
.map(ResourceDescriptorConfig::unwrapVariableSegment)
.collect(Collectors.toList()))));
for (List<String> segments : patternsToSegmentsMap.values()) {
insertSegmentsIntoTrie(segments, trie);
}
HashMap<String, Name> nameMap = new HashMap<>();
for (String pattern : patternsToSegmentsMap.keySet()) {
List<String> identifyingNamePieces = new ArrayList<>();
TrieNode node = trie;
List<String> segments = patternsToSegmentsMap.get(pattern);
for (String segment : segments) {
if (node.size() > 1) {
identifyingNamePieces.add(segment);
}
node = node.get(segment);
}
Name entityName =
Name.from(Lists.reverse(identifyingNamePieces).toArray(new String[0])).join(suffix);
if (entityName.toLowerCamel().isEmpty()) {
// This can occur for a single pattern and empty suffix
if (segments.size() > 0) {
entityName = Name.from(segments.get(0));
}
}
nameMap.put(pattern, entityName);
}
return nameMap;
}
private static class TrieNode extends HashMap<String, TrieNode> {}
private static void insertSegmentsIntoTrie(List<String> segments, TrieNode trieNode) {
for (String segment : segments) {
if (!trieNode.containsKey(segment)) {
trieNode.put(segment, new TrieNode());
}
trieNode = trieNode.get(segment);
}
}
}
| 1 | 30,348 | I believe this will break common resources, as they are defined in a common file, but must generate classes in service-specific namespace, so using protoFile to determine package of the generated class would not work, because common_resources namespace does not match service namespace. | googleapis-gapic-generator | java |
@@ -63,6 +63,8 @@ NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
+SERIES_DEFAULT_NAME = "0"
+
class InternalFrame(object):
""" | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An internal immutable DataFrame with some metadata to manage indexes.
"""
import re
from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING
from itertools import accumulate
from collections import OrderedDict
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like
from pyspark import sql as spark
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Window
from pyspark.sql.functions import PandasUDFType, pandas_udf
from pyspark.sql.types import BooleanType, DataType, StructField, StructType, LongType
try:
from pyspark.sql.types import to_arrow_type
except ImportError:
from pyspark.sql.pandas.types import to_arrow_type
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from databricks.koalas.series import Series
from databricks.koalas.config import get_option
from databricks.koalas.typedef import infer_pd_series_spark_type, spark_type_to_pandas_dtype
from databricks.koalas.utils import (
column_labels_level,
default_session,
lazy_property,
name_like_string,
scol_for,
verify_temp_column_name,
)
# A function to turn given numbers to Spark columns that represent Koalas index.
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
# A pattern to check if the name of a Spark column is a Koalas index name or not.
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
class InternalFrame(object):
"""
The internal immutable DataFrame which manages Spark DataFrame and column names and index
information.
.. note:: this is an internal class. It is not supposed to be exposed to users and users
should not directly access to it.
The internal immutable DataFrame represents the index information for a DataFrame it belongs to.
For instance, if we have a Koalas DataFrame as below, pandas DataFrame does not store the index
as columns.
>>> kdf = ks.DataFrame({
... 'A': [1, 2, 3, 4],
... 'B': [5, 6, 7, 8],
... 'C': [9, 10, 11, 12],
... 'D': [13, 14, 15, 16],
... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])
>>> kdf # doctest: +NORMALIZE_WHITESPACE
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
However, all columns including index column are also stored in Spark DataFrame internally
as below.
>>> kdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
In order to fill this gap, the current metadata is used by mapping Spark's internal column
to Koalas' index. See the method below:
* `spark_frame` represents the internal Spark DataFrame
* `data_spark_column_names` represents non-indexing Spark column names
* `data_spark_columns` represents non-indexing Spark columns
* `index_spark_column_names` represents internal index Spark column names
* `index_spark_columns` represents internal index Spark columns
* `spark_column_names` represents all columns
* `index_names` represents the external index name as a label
* `index_map` is zipped pairs of `index_spark_column_names` and `index_names`
* `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.
* `to_pandas_frame` represents pandas DataFrame derived by the metadata
>>> internal = kdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None]
>>> internal.index_map
OrderedDict([('__index_level_0__', None)])
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
In case that index is set to one of the existing column as below:
>>> kdf1 = kdf.set_index("A")
>>> kdf1 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
>>> kdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal = kdf1._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[('A',)]
>>> internal.index_map
OrderedDict([('A', ('A',))])
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
In case that index becomes a multi index as below:
>>> kdf2 = kdf.set_index("A", append=True)
>>> kdf2 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
>>> kdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal = kdf2._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__', 'A']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None, ('A',)]
>>> internal.index_map
OrderedDict([('__index_level_0__', None), ('A', ('A',))])
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
For multi-level columns, it also holds column_labels
>>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),
... ('Y', 'C'), ('Y', 'D')])
>>> kdf3 = ks.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16],
... [17, 18, 19, 20]], columns = columns)
>>> kdf3 # doctest: +NORMALIZE_WHITESPACE
X Y
A B C D
0 1 2 3 4
1 5 6 7 8
2 9 10 11 12
3 13 14 15 16
4 17 18 19 20
>>> internal = kdf3._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+------+------+------+------+-----------------+
|__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|
+-----------------+------+------+------+------+-----------------+
| 0| 1| 2| 3| 4| ...|
| 1| 5| 6| 7| 8| ...|
| 2| 9| 10| 11| 12| ...|
| 3| 13| 14| 15| 16| ...|
| 4| 17| 18| 19| 20| ...|
+-----------------+------+------+------+------+-----------------+
>>> internal.data_spark_column_names
['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']
>>> internal.column_labels
[('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]
For Series, it also holds scol to represent the column.
>>> kseries = kdf1.B
>>> kseries
A
1 5
2 6
3 7
4 8
Name: B, dtype: int64
>>> internal = kseries._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.spark_column
Column<b'B'>
>>> internal.data_spark_column_names
['B']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B']
>>> internal.index_names
[('A',)]
>>> internal.index_map
OrderedDict([('A', ('A',))])
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+
| A| B|
+---+---+
| 1| 5|
| 2| 6|
| 3| 7|
| 4| 8|
+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B
A
1 5
2 6
3 7
4 8
"""
def __init__(
self,
spark_frame: spark.DataFrame,
index_map: Optional[Dict[str, Optional[Tuple[str, ...]]]],
column_labels: Optional[List[Tuple[str, ...]]] = None,
data_spark_columns: Optional[List[spark.Column]] = None,
column_label_names: Optional[List[str]] = None,
spark_column: Optional[spark.Column] = None,
) -> None:
"""
Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and
index fields and names.
:param spark_frame: Spark DataFrame to be managed.
:param index_map: dictionary of string pairs
Each pair holds the index field name which exists in Spark fields,
and the index name.
:param column_labels: list of tuples with the same length
The multi-level values in the tuples.
:param data_spark_columns: list of Spark Column
Spark Columns to appear as columns. If spark_column is not None,
this argument is ignored, otherwise if this is None, calculated
from spark_frame.
:param column_label_names: Names for each of the index levels.
:param spark_column: Spark Column to be managed.
See the examples below to refer what each parameter means.
>>> column_labels = pd.MultiIndex.from_tuples(
... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"])
>>> row_index = pd.MultiIndex.from_tuples(
... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],
... names=["row_index_a", "row_index_b"])
>>> kdf = ks.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)
>>> kdf.set_index(('a', 'x'), append=True, inplace=True)
>>> kdf # doctest: +NORMALIZE_WHITESPACE
column_labels_a a b
column_labels_b y z
row_index_a row_index_b (a, x)
foo bar 1 2 3
4 5 6
zoo bar 7 8 9
>>> internal = kdf[('a', 'y')]._internal
>>> internal._sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+-----------------+------+------+------+...
|__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...
+-----------------+-----------------+------+------+------+...
| foo| bar| 1| 2| 3|...
| foo| bar| 4| 5| 6|...
| zoo| bar| 7| 8| 9|...
+-----------------+-----------------+------+------+------+...
>>> internal._index_map # doctest: +NORMALIZE_WHITESPACE
OrderedDict([('__index_level_0__', ('row_index_a',)),
('__index_level_1__', ('row_index_b',)),
('(a, x)', ('a', 'x'))])
>>> internal._column_labels
[('a', 'y')]
>>> internal._data_spark_columns
[Column<b'(a, y)'>]
>>> list(internal._column_label_names)
['column_labels_a', 'column_labels_b']
>>> internal.spark_column
Column<b'(a, y)'>
"""
assert isinstance(spark_frame, spark.DataFrame)
assert not spark_frame.isStreaming, "Koalas does not support Structured Streaming."
if index_map is None:
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
# Create default index.
spark_frame = InternalFrame.attach_default_index(spark_frame)
index_map = OrderedDict({SPARK_DEFAULT_INDEX_NAME: None})
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
assert isinstance(index_map, OrderedDict), index_map
assert all(
isinstance(index_field, str)
and (
index_name is None
or (
isinstance(index_name, tuple)
and all(isinstance(name, str) for name in index_name)
)
)
for index_field, index_name in index_map.items()
), index_map
assert spark_column is None or isinstance(spark_column, spark.Column)
assert data_spark_columns is None or all(
isinstance(scol, spark.Column) for scol in data_spark_columns
)
self._sdf = spark_frame # type: spark.DataFrame
self._index_map = index_map # type: Dict[str, Optional[Tuple[str, ...]]]
self._spark_column = spark_column # type: Optional[spark.Column]
if spark_column is not None:
self._data_spark_columns = [spark_column]
elif data_spark_columns is None:
index_columns = set(index_column for index_column in self._index_map)
self._data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if col not in index_columns and col not in HIDDEN_COLUMNS
]
else:
self._data_spark_columns = data_spark_columns
if spark_column is not None:
assert column_labels is not None and len(column_labels) == 1, column_labels
assert all(
label is None or (isinstance(label, tuple) and len(label) > 0)
for label in column_labels
), column_labels
self._column_labels = column_labels
elif column_labels is None:
self._column_labels = [
(spark_frame.select(scol).columns[0],) for scol in self._data_spark_columns
]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
assert all(isinstance(i, tuple) for i in column_labels), column_labels
assert len(set(len(i) for i in column_labels)) <= 1, column_labels
self._column_labels = column_labels
if column_label_names is not None and not is_list_like(column_label_names):
raise ValueError("Column_index_names should be list-like or None for a MultiIndex")
if isinstance(column_label_names, list):
if all(name is None for name in column_label_names):
self._column_label_names = None
else:
self._column_label_names = column_label_names
else:
self._column_label_names = column_label_names
@staticmethod
def attach_default_index(sdf, default_index_type=None):
"""
This method attaches a default index to Spark DataFrame. Spark does not have the index
notion so corresponding column should be generated.
There are several types of default index can be configured by `compute.default_index_type`.
>>> spark_frame = ks.range(10).to_spark()
>>> spark_frame
DataFrame[id: bigint]
It adds the default index column '__index_level_0__'.
>>> spark_frame = InternalFrame.attach_default_index(spark_frame)
>>> spark_frame
DataFrame[__index_level_0__: int, id: bigint]
It throws an exception if the given column name already exists.
>>> InternalFrame.attach_default_index(spark_frame)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: '__index_level_0__' already exists...
"""
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = get_option("compute.default_index_type")
scols = [scol_for(sdf, column) for column in sdf.columns]
if default_index_type == "sequence":
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())) - 1
)
return sdf.select(sequential_index.alias(index_column), *scols)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_distributed_column(sdf, column_name):
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)
@staticmethod
def attach_distributed_sequence_column(sdf, column_name):
"""
This method attaches a Spark column that has a sequence in a distributed manner.
This is equivalent to the column assigned when default index type 'distributed-sequence'.
>>> sdf = ks.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.sort("sequence").show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
scols = [scol_for(sdf, column) for column in sdf.columns]
spark_partition_column = verify_temp_column_name(sdf, "__spark_partition_id__")
offset_column = verify_temp_column_name(sdf, "__offset__")
row_number_column = verify_temp_column_name(sdf, "__row_number__")
# 1. Calculates counts per each partition ID. `counts` here is, for instance,
# {
# 1: 83,
# 6: 83,
# 3: 83,
# ...
# }
sdf = sdf.withColumn(spark_partition_column, F.spark_partition_id())
counts = map(
lambda x: (x["key"], x["count"]),
sdf.groupby(sdf[spark_partition_column].alias("key")).count().collect(),
)
# 2. Calculates cumulative sum in an order of partition id.
# Note that it does not matter if partition id guarantees its order or not.
# We just need a one-by-one sequential id.
# sort by partition key.
sorted_counts = sorted(counts, key=lambda x: x[0])
# get cumulative sum in an order of partition key.
cumulative_counts = [0] + list(accumulate(map(lambda count: count[1], sorted_counts)))
# zip it with partition key.
sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts))
# 3. Attach offset for each partition.
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def offset(id):
current_partition_offset = sums[id.iloc[0]]
return pd.Series(current_partition_offset).repeat(len(id))
sdf = sdf.withColumn(offset_column, offset(spark_partition_column))
# 4. Calculate row_number in each partition.
w = Window.partitionBy(spark_partition_column).orderBy(F.monotonically_increasing_id())
row_number = F.row_number().over(w)
sdf = sdf.withColumn(row_number_column, row_number)
# 5. Calculate the index.
return sdf.select(
(sdf[offset_column] + sdf[row_number_column] - 1).alias(column_name), *scols
)
def spark_column_name_for(self, label: Tuple[str, ...]) -> str:
""" Return the actual Spark column name for the given column label. """
return self.spark_frame.select(self.spark_column_for(label)).columns[0]
def spark_column_for(self, label: Tuple[str, ...]):
""" Return Spark Column for the given column label. """
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label] # type: ignore
else:
raise KeyError(name_like_string(label))
def spark_type_for(self, label: Tuple[str, ...]) -> DataType:
""" Return DataType for the given column label. """
return self.spark_frame.select(self.spark_column_for(label)).schema[0].dataType
def spark_column_nullable_for(self, label: Tuple[str, ...]) -> bool:
""" Return nullability for the given column label. """
return self.spark_frame.select(self.spark_column_for(label)).schema[0].nullable
@property
def spark_frame(self) -> spark.DataFrame:
""" Return the managed Spark DataFrame. """
return self._sdf
@property
def spark_column(self) -> Optional[spark.Column]:
""" Return the managed Spark Column. """
return self._spark_column
@lazy_property
def data_spark_column_names(self) -> List[str]:
""" Return the managed column field names. """
return self.spark_frame.select(self.data_spark_columns).columns
@property
def data_spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed data columns. """
return self._data_spark_columns
@lazy_property
def index_spark_column_names(self) -> List[str]:
""" Return the managed index field names. """
return list(self.index_map.keys())
@lazy_property
def index_spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed index columns. """
return [scol_for(self.spark_frame, column) for column in self.index_spark_column_names]
@lazy_property
def spark_column_names(self) -> List[str]:
""" Return all the field names including index field names. """
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed columns including index columns. """
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(not spark_column._jc.equals(scol._jc) for scol in index_spark_columns)
]
@property
def index_map(self) -> Dict[str, Optional[Tuple[str, ...]]]:
""" Return the managed index information. """
assert len(self._index_map) > 0
return self._index_map
@lazy_property
def index_names(self) -> List[Optional[Tuple[str, ...]]]:
""" Return the managed index names. """
return list(self.index_map.values())
@property
def column_labels(self) -> List[Tuple[str, ...]]:
""" Return the managed column index. """
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
""" Return the level of the column index. """
return column_labels_level(self._column_labels)
@property
def column_label_names(self) -> Optional[List[str]]:
""" Return names of the index levels. """
return self._column_label_names
@lazy_property
def to_internal_spark_frame(self) -> spark.DataFrame:
"""
Return as Spark DataFrame. This contains index columns as well
and should be only used for internal purposes.
"""
index_spark_columns = self.index_spark_columns
data_columns = []
for i, (label, spark_column, column_name) in enumerate(
zip(self.column_labels, self.data_spark_columns, self.data_spark_column_names)
):
if all(not spark_column._jc.equals(scol._jc) for scol in index_spark_columns):
name = str(i) if label is None else name_like_string(label)
if column_name != name:
spark_column = spark_column.alias(name)
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
""" Return as pandas DataFrame. """
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
column_names = []
for i, (label, spark_column, column_name) in enumerate(
zip(self.column_labels, self.data_spark_columns, self.data_spark_column_names)
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column._jc.equals(index_spark_column._jc):
column_names.append(index_spark_column_name)
break
else:
name = str(i) if label is None else name_like_string(label)
if column_name != name:
column_name = name
column_names.append(column_name)
append = False
for index_field in self.index_spark_column_names:
drop = index_field not in column_names
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[column_names]
if self.column_labels_level > 1:
pdf.columns = pd.MultiIndex.from_tuples(self._column_labels)
else:
pdf.columns = [None if label is None else label[0] for label in self._column_labels]
if self._column_label_names is not None:
pdf.columns.names = self._column_label_names
index_names = self.index_names
if len(index_names) > 0:
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
return pdf
@lazy_property
def resolved_copy(self):
""" Copy the immutable InternalFrame with the updates resolved. """
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
if self.spark_column is None:
return self.copy(
spark_frame=sdf,
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
else:
return self.copy(
spark_frame=sdf, spark_column=scol_for(sdf, self.data_spark_column_names[0])
)
def with_new_sdf(
self, spark_frame: spark.DataFrame, data_columns: Optional[List[str]] = None
) -> "InternalFrame":
""" Copy the immutable _InternalFrame with the updates by the specified Spark DataFrame.
:param spark_frame: the new Spark DataFrame
:param data_columns: the new column names.
If None, the original one is used.
:return: the copied _InternalFrame.
"""
assert self.spark_column is None
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf, data_spark_columns=[scol_for(sdf, col) for col in data_columns]
)
def with_new_columns(
self,
scols_or_ksers: List[Union[spark.Column, "Series"]],
column_labels: Optional[List[Tuple[str, ...]]] = None,
column_label_names: Optional[Union[List[str], _NoValueType]] = _NoValue,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable _InternalFrame with the updates by the specified Spark Columns or Series.
:param scols_or_ksers: the new Spark Columns or Series.
:param column_labels: the new column index.
If None, the its column_labels is used when the corresponding `scols_or_ksers` is
Series, otherwise the original one is used.
:return: the copied _InternalFrame.
"""
from databricks.koalas.series import Series
if column_labels is None:
if all(isinstance(scol_or_kser, Series) for scol_or_kser in scols_or_ksers):
column_labels = [kser._internal.column_labels[0] for kser in scols_or_ksers]
else:
assert len(scols_or_ksers) == len(self.column_labels), (
len(scols_or_ksers),
len(self.column_labels),
)
column_labels = []
for scol_or_kser, label in zip(scols_or_ksers, self.column_labels):
if isinstance(scol_or_kser, Series):
column_labels.append(scol_or_kser._internal.column_labels[0])
else:
column_labels.append(label)
else:
assert len(scols_or_ksers) == len(column_labels), (
len(scols_or_ksers),
len(column_labels),
)
data_spark_columns = []
for scol_or_kser in scols_or_ksers:
if isinstance(scol_or_kser, Series):
scol = scol_or_kser._internal.spark_column
else:
scol = scol_or_kser
data_spark_columns.append(scol)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
column_label_names=column_label_names,
spark_column=None,
)
def with_filter(self, pred: Union[spark.Column, "Series"]):
""" Copy the immutable _InternalFrame with the updates by the predicate.
:param pred: the predicate to filter.
:return: the copied _InternalFrame.
"""
from databricks.koalas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
pred = pred.spark.column
else:
spark_type = self.spark_frame.select(pred).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
sdf = self.spark_frame.select(self.spark_columns).filter(pred)
if self.spark_column is None:
return self.with_new_sdf(sdf)
else:
return self.copy(
spark_frame=sdf, spark_column=scol_for(sdf, self.data_spark_column_names[0])
)
def copy(
self,
spark_frame: Union[spark.DataFrame, _NoValueType] = _NoValue,
index_map: Optional[Union[Dict[str, Optional[Tuple[str, ...]]], _NoValueType]] = _NoValue,
column_labels: Optional[Union[List[Tuple[str, ...]], _NoValueType]] = _NoValue,
data_spark_columns: Optional[Union[List[spark.Column], _NoValueType]] = _NoValue,
column_label_names: Optional[Union[List[str], _NoValueType]] = _NoValue,
spark_column: Optional[Union[spark.Column, _NoValueType]] = _NoValue,
) -> "InternalFrame":
""" Copy the immutable DataFrame.
:param spark_frame: the new Spark DataFrame. If None, then the original one is used.
:param index_map: the new index information. If None, then the original one is used.
:param column_labels: the new column index.
:param data_spark_columns: the new Spark Columns. If None, then the original ones are used.
:param column_label_names: the new names of the index levels.
:param spark_column: the new Spark Column. If None, then the original one is used.
:return: the copied immutable DataFrame.
"""
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_map is _NoValue:
index_map = self._index_map
if column_labels is _NoValue:
column_labels = self._column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self._data_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
if spark_column is _NoValue:
spark_column = self.spark_column
return InternalFrame(
spark_frame,
index_map=index_map,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
column_label_names=column_label_names,
spark_column=spark_column,
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
""" Create an immutable DataFrame from pandas DataFrame.
:param pdf: :class:`pd.DataFrame`
:return: the created immutable DataFrame
"""
columns = pdf.columns
data_columns = [name_like_string(col) for col in columns]
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = None
column_label_names = columns.names
index_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
]
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(index_names))]
pdf = pdf.copy()
pdf.index.names = index_columns
reset_index = pdf.reset_index()
reset_index.columns = index_columns + data_columns
schema = StructType(
[
StructField(
name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any()),
)
for name, col in reset_index.iteritems()
]
)
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
sdf = default_session().createDataFrame(reset_index, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(zip(index_columns, index_names)),
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names,
)
| 1 | 15,460 | How about `SPARK_DEFAULT_SERIES_NAME`? | databricks-koalas | py |
@@ -36,9 +36,18 @@ public class TableProperties {
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
- public static final String COMMIT_NUM_STATUS_CHECKS = "commit.num-status-checks";
+ public static final String COMMIT_NUM_STATUS_CHECKS = "commit.status-checks.num-retries";
public static final int COMMIT_NUM_STATUS_CHECKS_DEFAULT = 3;
+ public static final String COMMIT_STATUS_CHECKS_MIN_WAIT_MS = "commit.status-check.min-wait-ms";
+ public static final long COMMIT_STATUS_CHECKS_MIN_WAIT_MS_DEFAULT = 1000L; // 1s
+
+ public static final String COMMIT_STATUS_CHECKS_MAX_WAIT_MS = "commit.status-check.max-wait-ms";
+ public static final long COMMIT_STATUS_CHECKS_MAX_WAIT_MS_DEFAULT = 60000L; // 1 minute
+
+ public static final String COMMIT_STATUS_CHECKS_TOTAL_WAIT_MS = "commit.status-check.total-timeout-ms";
+ public static final long COMMIT_STATUS_CHECKS_TOTAL_WAIT_MS_DEFAULT = 1800000; // 30 minutes
+
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {
}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String COMMIT_NUM_STATUS_CHECKS = "commit.num-status-checks";
public static final int COMMIT_NUM_STATUS_CHECKS_DEFAULT = 3;
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String ORC_VECTORIZATION_ENABLED = "read.orc.vectorization.enabled";
public static final boolean ORC_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit";
public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0;
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = 536870912; // 512 MB
public static final String SPARK_WRITE_PARTITIONED_FANOUT_ENABLED = "write.spark.fanout.enabled";
public static final boolean SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT = false;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
public static final String WRITE_DISTRIBUTION_MODE = "write.distribution-mode";
public static final String WRITE_DISTRIBUTION_MODE_NONE = "none";
public static final String WRITE_DISTRIBUTION_MODE_HASH = "hash";
public static final String WRITE_DISTRIBUTION_MODE_RANGE = "range";
public static final String WRITE_DISTRIBUTION_MODE_DEFAULT = WRITE_DISTRIBUTION_MODE_NONE;
public static final String GC_ENABLED = "gc.enabled";
public static final boolean GC_ENABLED_DEFAULT = true;
public static final String MAX_SNAPSHOT_AGE_MS = "history.expire.max-snapshot-age-ms";
public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT = 5 * 24 * 60 * 60 * 1000; // 5 days
public static final String MIN_SNAPSHOTS_TO_KEEP = "history.expire.min-snapshots-to-keep";
public static final int MIN_SNAPSHOTS_TO_KEEP_DEFAULT = 1;
public static final String DELETE_ISOLATION_LEVEL = "write.delete.isolation-level";
public static final String DELETE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String DELETE_MODE = "write.delete.mode";
public static final String DELETE_MODE_DEFAULT = "copy-on-write";
public static final String UPDATE_ISOLATION_LEVEL = "write.update.isolation-level";
public static final String UPDATE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String UPDATE_MODE = "write.update.mode";
public static final String UPDATE_MODE_DEFAULT = "copy-on-write";
public static final String MERGE_ISOLATION_LEVEL = "write.merge.isolation-level";
public static final String MERGE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String MERGE_MODE = "write.merge.mode";
public static final String MERGE_MODE_DEFAULT = "copy-on-write";
public static final String MERGE_CARDINALITY_CHECK_ENABLED = "write.merge.cardinality-check.enabled";
public static final boolean MERGE_CARDINALITY_CHECK_ENABLED_DEFAULT = true;
}
| 1 | 37,630 | The other properties are in `commit.status-check`, not `commit.status-checks`. Could you remove the extra `s`? | apache-iceberg | java |
@@ -6,6 +6,12 @@ var setupDatabase = require('./shared').setupDatabase;
describe('Authentication', function() {
before(function() {
+ const configuration = this.configuration;
+ if (configuration.usingUnifiedTopology()) {
+ // The unified topology does not currently support authentication
+ return this.skip();
+ }
+
return setupDatabase(this.configuration);
});
| 1 | 'use strict';
var f = require('util').format;
var test = require('./shared').assert;
var setupDatabase = require('./shared').setupDatabase;
describe('Authentication', function() {
before(function() {
return setupDatabase(this.configuration);
});
/**
* Fail due to illegal authentication mechanism
*
* @ignore
*/
it('should fail due to illegal authentication mechanism', {
metadata: { requires: { topology: ['auth'], mongodb: '<=2.6.x' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server;
// restart server
configuration.manager.restart(true).then(function() {
var client = new MongoClient(
new Server(configuration.host, configuration.port, { auto_reconnect: true }),
{ w: 1 }
);
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.admin().addUser('admin', 'admin', function(err) {
test.equal(null, err);
client.close();
var client1 = new MongoClient(
new Server(configuration.host, configuration.port, { auto_reconnect: true }),
{ w: 1, user: 'admin', password: 'admin', authMechanism: 'SCRAM-SHA-1' }
);
client1.connect(function(err) {
test.ok(err);
test.equal(59, err.code);
// restart server
configuration.manager.restart(true).then(function() {
done();
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly authenticate with kay.kay', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server;
// restart server
configuration.manager.restart(true).then(function() {
var client = new MongoClient(
new Server(configuration.host, configuration.port, { auto_reconnect: true }),
{ w: 1 }
);
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.admin().addUser('kay:kay', 'abc123', function(err) {
test.equal(null, err);
client.close();
MongoClient.connect('mongodb://kay%3Akay:abc123@localhost:27017/admin', function(err) {
test.equal(null, err);
// restart server
configuration.manager.restart(true).then(function() {
done();
});
});
});
});
});
}
});
/**
* Retrieve the server information for the current
* instance of the db client
*
* @ignore
*/
it('should correctly call validateCollection using authenticatedMode', {
metadata: { requires: { topology: ['single', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection(
'shouldCorrectlyCallValidateCollectionUsingAuthenticatedMode'
);
collection.insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
var adminDb = db.admin();
adminDb.addUser('admin', 'admin', configuration.writeConcernMax(), function(err) {
test.equal(null, err);
MongoClient.connect('mongodb://admin:admin@localhost:27017/admin', function(err) {
test.equal(null, err);
adminDb.validateCollection(
'shouldCorrectlyCallValidateCollectionUsingAuthenticatedMode',
function(err, doc) {
test.equal(null, err);
test.ok(doc != null);
adminDb.removeUser('admin', function(err) {
test.equal(null, err);
client.close();
done();
});
}
);
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly issue authenticated event on successful authentication', {
metadata: { requires: { topology: 'single' } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
var client = configuration.newClient({ w: 1 }, { poolSize: 1 });
// DOC_LINE var client = new MongoClient(new Server('localhost', 27017));
// DOC_START
// Establish connection to db
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Grab a collection object
var collection = db.collection('test');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin15', 'admin15', function(err, result) {
test.equal(null, err);
test.ok(result != null);
client.close();
client = new MongoClient('mongodb://admin15:admin15@localhost:27017/admin');
client.once('authenticated', function() {
done();
});
// Authenticate using the newly added user
client.connect(function(err, client) {
test.equal(null, err);
client.close();
});
});
});
});
// DOC_END
}
});
it('should correctly authenticate against normal db', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server;
// restart server
configuration.manager.restart(true).then(function() {
var client = new MongoClient(new Server('127.0.0.1', 27017, { auto_reconnect: true }), {
w: 1
});
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// An admin user must be defined for db level authentication to work correctly
db.admin().addUser('admin', 'admin', function(err) {
test.equal(null, err);
client.close();
new MongoClient(new Server('127.0.0.1', 27017, { auto_reconnect: true }), {
w: 1,
user: 'admin',
password: 'admin',
authSource: 'admin'
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.addUser('user', 'user', function(err) {
test.equal(null, err);
// Logout admin
client.logout(function(err) {
test.equal(null, err);
// Attempt to save a document
db.collection('test').insert({ a: 1 }, function(err) {
test.ok(err != null);
// // Login the user
new MongoClient(new Server('127.0.0.1', 27017, { auto_reconnect: true }), {
w: 1,
user: 'user',
password: 'user',
authSource: configuration.db
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
// Logout the user
client.logout(function(err) {
test.equal(null, err);
// Attempt to save a document
db.collection('test').insert({ a: 1 }, function(err) {
test.ok(err != null);
client.close();
// restart server
configuration.manager.restart(true).then(function() {
done();
});
});
});
});
});
});
});
});
});
});
});
});
}
});
it('should correctly reapply the authentications', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server;
// restart server
configuration.manager.restart(true).then(function() {
var client = new MongoClient(new Server('localhost', 27017, { auto_reconnect: true }), {
w: 1
});
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.admin().addUser('admin', 'admin', function(err) {
test.equal(null, err);
// Attempt to save a document
db.collection('test').insert({ a: 1 }, function(err) {
test.ok(err != null);
client.close();
// Login the user
new MongoClient(new Server('127.0.0.1', 27017, { auto_reconnect: true }), {
w: 1,
user: 'admin',
password: 'admin',
authSource: 'admin'
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
// Bounce server
configuration.manager.restart(false).then(function() {
// Reconnect should reapply the credentials
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
});
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
});
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
});
// Reconnect should reapply the credentials
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
client.close();
// restart server
configuration.manager.restart(true).then(function() {
done();
});
});
});
});
});
});
});
});
});
}
});
it('ordered bulk operation should fail correctly when not authenticated', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server;
// restart server
configuration.manager.restart(true).then(function() {
var client = new MongoClient(new Server('127.0.0.1', 27017, { auto_reconnect: true }), {
w: 1
});
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.admin().addUser('admin', 'admin', function(err) {
test.equal(null, err);
// Attempt to save a document
var col = db.collection('test');
// Initialize the Ordered Batch
var batch = col.initializeOrderedBulkOp();
// Add some operations to be executed in order
batch.insert({ a: 1 });
batch.find({ a: 1 }).updateOne({ $set: { b: 1 } });
batch
.find({ a: 2 })
.upsert()
.updateOne({ $set: { b: 2 } });
batch.insert({ a: 3 });
batch.find({ a: 3 }).remove({ a: 3 });
// Execute the operations
batch.execute(function(err) {
test.ok(err != null);
test.ok(err.code != null);
test.ok(err.errmsg != null);
configuration.manager.restart(true).then(function() {
client.close();
done();
});
});
});
});
});
}
});
it('unordered bulk operation should fail correctly when not authenticated', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server;
// restart server
configuration.manager.restart(true).then(function() {
var client = new MongoClient(new Server('127.0.0.1', 27017, { auto_reconnect: true }), {
w: 1
});
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.admin().addUser('admin', 'admin', function(err) {
test.equal(null, err);
// Attempt to save a document
var col = db.collection('test');
// Initialize the Ordered Batch
var batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch.insert({ a: 1 });
batch.find({ a: 1 }).updateOne({ $set: { b: 1 } });
batch
.find({ a: 2 })
.upsert()
.updateOne({ $set: { b: 2 } });
batch.insert({ a: 3 });
batch.find({ a: 3 }).remove({ a: 3 });
// Execute the operations
batch.execute(function(err) {
test.ok(err != null);
test.ok(err.code != null);
test.ok(err.errmsg != null);
configuration.manager.restart(true).then(function() {
client.close();
done();
});
});
});
});
});
}
});
// /**********************************************************************************************
// ReplsetRep ReplsetRepl tReplsetRe etRepl Repl t plsetReplse eplsetReplse
// setReplsetR setReplsetRe setReplset plsetR plsetRepls tReplsetRepl etReplsetRep
// pls pls epls plse epls pls epl etRep etRe lset setR pls Rep et
// tReplsetRe tRe etReplsetRe et plset epl set
// lsetRepls lsetRe plsetRepls pl Repls etRepl epl
// ReplsetR Replset tReplsetR tR Repls plsetRe et
// setReplse setRepl lse lse etRe tReplse pls
// epl Rep e epl Rep tRep Re lset lse tRe
// etR setRep etRe tRe set set se epls Repl Repl epl lse
// eplse eplset eplsetR plse Replse tReplsetRep etReplsetR lsetRep setR etRepls
// etRep tRep etReplsetRep setRep lsetReplset plsetRepl ReplsetRepls plsetRe
// **********************************************************************************************/
var setUp = function(configuration, options, callback) {
var ReplSetManager = require('mongodb-topology-manager').ReplSet;
// Check if we have any options
if (typeof options === 'function') (callback = options), (options = null);
// Override options
var rsOptions;
if (options) {
rsOptions = options;
} else {
rsOptions = {
server: {
keyFile: __dirname + '/data/keyfile.txt',
auth: null,
replSet: 'rs'
},
client: {
replSet: 'rs'
}
};
}
// Set up the nodes
var nodes = [
{
options: {
bind_ip: 'localhost',
port: 31000,
dbpath: f('%s/../db/31000', __dirname)
}
},
{
options: {
bind_ip: 'localhost',
port: 31001,
dbpath: f('%s/../db/31001', __dirname)
}
},
{
// arbiter: true,
options: {
bind_ip: 'localhost',
port: 31002,
dbpath: f('%s/../db/31002', __dirname)
}
}
];
// Merge in any node start up options
for (var i = 0; i < nodes.length; i++) {
for (var name in rsOptions.server) {
nodes[i].options[name] = rsOptions.server[name];
}
}
// Create a manager
var replicasetManager = new ReplSetManager('mongod', nodes, rsOptions.client);
// Purge the set
replicasetManager.purge().then(function() {
// Start the server
replicasetManager
.start()
.then(function() {
setTimeout(function() {
callback(null, replicasetManager);
}, 10000);
})
.catch(function() {
process.exit(0);
});
});
};
/**
* @ignore
*/
it('should correctly handle replicaset master stepdown and stepup without loosing auth', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
// Connect
new MongoClient(replSet, { w: 1 }).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('root', 'root', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
// Login the user
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ user: 'root', password: 'root', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
replicasetManager
.stepDownPrimary(
false,
{ stepDownSecs: 1, force: true },
{
provider: 'default',
db: 'admin',
user: 'root',
password: 'root'
}
)
.then(function() {
db.collection('replicaset_test_auth').insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
})
.catch(function(e) {
done(e);
});
});
});
});
});
}
});
/**
* @ignore
*/
it(
'Should correctly perform nearest read from secondaries without auth fail when priamry is first seed',
{
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
Server = configuration.require.Server,
ReadPreference = configuration.require.ReadPreference,
MongoClient = configuration.require.MongoClient,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
// Connect
new MongoClient(replSet, {
w: 1,
readPreference: ReadPreference.NEAREST
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('root', 'root', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
MongoClient.connect(
'mongodb://root:root@localhost:31000,localhost:31001,localhost:31002/admin?replicaSet=rs&readPreference=nearest',
function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.collection('replicaset_test_auth').insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
db.collection('replicaset_test_auth').findOne({}, function(err) {
test.equal(null, err);
db.collection('replicaset_test_auth').findOne({}, function(err) {
test.equal(null, err);
db.collection('replicaset_test_auth').findOne({}, function(err) {
test.equal(null, err);
db.collection('replicaset_test_auth').findOne({}, function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
});
});
});
}
);
});
});
});
}
}
);
/**
* @ignore
*/
it('should correctly create indexes without hanging when different seedlists', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
Server = configuration.require.Server,
ReadPreference = configuration.require.ReadPreference,
MongoClient = configuration.require.MongoClient,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
// Connect
new MongoClient(replSet, {
w: 1,
readPreference: ReadPreference.NEAREST
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('root', 'root', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
MongoClient.connect(
'mongodb://root:root@localhost:31000,localhost:31001,localhost:31002/admin?replicaSet=rs&readPreference=secondary',
function(err, client) {
test.equal(null, err);
// Attempt create index
client
.db('replicaset_test_auth')
.collection('createIndexes1')
.ensureIndex({ expiresAt: 1 }, { expireAfterSeconds: 0 }, function(err) {
test.equal(null, err);
client.close();
MongoClient.connect(
'mongodb://root:root@localhost:31002/admin?replicaSet=rs&readPreference=secondary',
function(err, client) {
test.equal(null, err);
client
.db('replicaset_test_auth')
.collection('createIndexes2')
.ensureIndex({ expiresAt: 1 }, { expireAfterSeconds: 0 }, function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
}
);
});
}
);
});
});
});
}
});
/**
* @ignore
*/
it('should correctly authenticate using primary', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.connect(function(err) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('admin', 'admin', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
// Login the user
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'admin', password: 'admin', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user to the db
db.addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
// Close the connection
client.close();
// connection string
var config = f(
'mongodb://me:secret@localhost:%s/%s?replicaSet=%s',
31000,
configuration.db,
'rs'
);
// Connect
MongoClient.connect(config, function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.collections(function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly authenticate with two seeds', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('admin', 'admin', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'admin', password: 'admin', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
// Close the connection
client.close();
// connection string
var config = f(
'mongodb://me:secret@localhost:%s,localhost:%s/%s?replicaSet=%s',
31000,
31001,
configuration.db,
'rs'
);
// Connect
MongoClient.connect(config, function(error, client) {
test.equal(null, error);
var db = client.db(configuration.db);
db.collections(function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly authenticate with only secondary seed', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.on('all', function(client) {
test.equal(null, err);
var p_db = client.db(configuration.db);
// Add a user
p_db.admin().addUser('admin', 'admin', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'admin', password: 'admin', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var p_db = client.db(configuration.db);
p_db.admin().addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
// Close the connection
client.close();
// connection string
var config = f(
'mongodb://me:secret@localhost:%s/%s?authSource=admin&readPreference=secondary&replicaSet=%s&maxPoolSize=1',
31000,
configuration.db,
'rs'
);
// Connect
MongoClient.connect(config, function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.collection('test').insert({ a: 1 }, function(err) {
test.equal(null, err);
// Logout
client.logout(function() {
// Should fail
db.collection('test').findOne(function(err) {
test.ok(err != null);
// Connect
MongoClient.connect(config, function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
replicasetManager
.secondaries()
.then(function(managers) {
// Shutdown the first secondary
managers[0]
.stop()
.then(function() {
// Shutdown the second secondary
managers[1]
.stop()
.then(function() {
// Let's restart a secondary
managers[0]
.start()
.then(function() {
// Let's restart a secondary
managers[1]
.start()
.then(function() {
client.topology.once('joined', function() {
// Should fail
db.collection('test').findOne(function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
})
.catch(function(e) {
done(e);
});
})
.catch(function(e) {
done(e);
});
})
.catch(function(e) {
done(e);
});
})
.catch(function(e) {
done(e);
});
})
.catch(function(e) {
done(e);
});
});
});
});
});
});
});
});
});
});
client.connect(function(err) {
test.equal(null, err);
});
});
}
});
/**
* @ignore
*/
it('should correctly authenticate and ensure index', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.admin().addUser('me', 'secret', { w: 3 }, function runWhatever(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'me', password: 'secret', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.addUser('test', 'test', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'test', password: 'test', authSource: configuration.db }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.collection('userconfirm', function(err, result) {
test.equal(null, err);
var userconfirm = result;
var ensureIndexOptions = { unique: true, w: 0, background: true };
userconfirm.ensureIndex([['confirmcode', 1]], ensureIndexOptions, function(
err
) {
test.equal(null, err);
db_p.collection('session', function(err, result) {
test.equal(null, err);
var session = result;
session.ensureIndex([['sid', 1]], ensureIndexOptions, function(err) {
test.equal(null, err);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly authenticate and use read preference', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p
.admin()
.addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function runWhatever(err) {
test.equal(null, err);
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'me', password: 'secret', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.addUser('test', 'test', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'test', password: 'test', authSource: configuration.db }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.collection('userconfirm2').insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
db_p.collection('userconfirm2').findOne(function(err, item) {
test.equal(null, err);
test.equal(1, item.a);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly bring replicaset step down primary and still read from secondary', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReadPreference = configuration.require.ReadPreference,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.on('all', function(client) {
test.ok(client != null);
var db_p = client.db(configuration.db);
db_p
.admin()
.addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function runWhatever(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'me', password: 'secret', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.collection('test').insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
db_p.addUser('test', 'test', { w: 3, wtimeout: 25000 }, function(err, result) {
test.equal(null, err);
test.ok(result != null);
client.topology.on('joined', function(t) {
if (t === 'primary') {
var counter = 10;
var errors = 0;
for (var i = 0; i < counter; i++) {
db_p
.collection('test')
.find({ a: 1 })
.setReadPreference(ReadPreference.SECONDARY)
.toArray(function(err) {
counter = counter - 1;
if (err != null) {
errors = errors + 1;
}
if (counter === 0) {
test.equal(0, errors);
client.close();
replicasetManager.stop().then(function() {
done();
});
}
});
}
}
});
// Step down the primary
replicasetManager
.stepDownPrimary(
false,
{
stepDownSecs: 1,
force: true,
returnImmediately: true
},
{
provider: 'default',
db: 'admin',
user: 'me',
password: 'secret'
}
)
.catch(function(e) {
done(e);
});
});
});
});
});
});
client.connect(function(err) {
test.equal(null, err);
});
});
}
});
/**
* @ignore
*/
it('should correctly auth with secondary after killing primary', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReadPreference = configuration.require.ReadPreference,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
// Add a user
db_p.admin().addUser('admin', 'admin', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'admin', password: 'admin', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.collection('test').insert({ a: 1 }, { w: 1 }, function(err) {
test.equal(null, err);
db_p.addUser('test', 'test', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'test', password: 'test', authSource: configuration.db }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
// shutdown the primary
replicasetManager.primary().then(function(primary) {
primary.stop().then(function() {
db_p.serverConfig.on('joined', function(t) {
if (t === 'primary') {
var counter = 1000;
var errors = 0;
for (var i = 0; i < counter; i++) {
db_p
.collection('test')
.find({ a: 1 })
.setReadPreference(ReadPreference.SECONDARY)
.toArray(function(err) {
test.equal(null, err);
counter = counter - 1;
if (counter === 0) {
test.equal(0, errors);
client.close();
replicasetManager.stop().then(function() {
done();
});
}
});
}
}
});
});
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly auth against replicaset admin db using MongoClient', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
var client = new MongoClient(replSet, { w: 3 });
client.connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p
.admin()
.addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function runWhatever(err) {
test.equal(null, err);
client.close();
MongoClient.connect(
f(
'mongodb://me:secret@%s:%s/%s?rs_name=%s&readPreference=secondary&w=3',
'localhost',
31000,
'admin',
'rs'
),
function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Insert document
db
.collection('authcollectiontest')
.insert({ a: 1 }, { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
// Find the document
db
.collection('authcollectiontest')
.find()
.toArray(function(err, docs) {
test.equal(1, docs.length);
test.equal(1, docs[0].a);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
// });
}
);
});
});
});
}
});
/**
* @ignore
*/
it('should correctly auth against normal db using MongoClient', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
ReplSet = configuration.require.ReplSet;
setUp(configuration, function(err, replicasetManager) {
var replSet = new ReplSet(
[new Server('localhost', 31000), new Server('localhost', 31001)],
{
rs_name: 'rs',
poolSize: 1
}
);
new MongoClient(replSet, { w: 3 }).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.admin().addUser('admin', 'admin', { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(
new ReplSet([new Server('localhost', 31000), new Server('localhost', 31001)], {
rs_name: 'rs',
poolSize: 1
}),
{ w: 1, user: 'admin', password: 'admin', authSource: 'admin' }
).connect(function(err, client) {
test.equal(null, err);
var db_p = client.db(configuration.db);
db_p.addUser('me', 'secret', { w: 3, wtimeout: 25000 }, function runWhatever(err) {
test.equal(null, err);
client.close();
MongoClient.connect(
f(
'mongodb://me:secret@%s:%s/%s?rs_name=%s&readPreference=secondary&w=3',
'localhost',
31000,
configuration.db,
'rs'
),
function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Insert document
db
.collection('authcollectiontest1')
.insert({ a: 1 }, { w: 3, wtimeout: 25000 }, function(err) {
test.equal(null, err);
// Find the document
db
.collection('authcollectiontest1')
.find()
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(1, docs[0].a);
client.close();
replicasetManager.stop().then(function() {
done();
});
});
});
}
);
});
});
});
});
});
}
});
// /*************************************************************************************
//
// sMong sMong ngosMo sMong ongosM sMong ongosM ngos n
// ngosM ngosM osMongos ngosM osMong ongosMongo gosMongo gosMongosM
// ongo sMo Mong Mong Mongo ngos gosM gosM sMon sMon sMong Mong
// osMon ongo gos osM gosMon sMo sMon ong ngo gos ngosM
// ongos o Mo sMon ongo MongosM ngo ngo osMo Mong Mongo
// osMong Mo go ngos osMo gosMong sMo sMo ongo gosM Mongo
// on osMo go Mo Mon ong Mon osMongo ngo ngosMo sMo Mon osMo
// os ongosM gos gos osM gos ongosMo Mo Mongo ngo gos ngos
// ong sMong sMon Mong Mong Mon sMongo gos gosM sMon sMon sMon sMon
// ngosMo gos ongosM osMongos ngosM gosMo ongosMon gosMongo ngosMongos
// Mongo Mo osMong ngosMo sMong ongo sMongosM ongosM sMongosMo
//
// **************************************************************************************/
var setUpSharded = function(configuration, options, callback) {
var ShardingManager = require('../topology_test_definitions').Sharded;
// Check if we have any options
if (typeof options === 'function') (callback = options), (options = null);
// Create Replicaset Manager
var shardedManager = new ShardingManager({
shard: {
auth: null,
keyFile: __dirname + '/data/keyfile.txt'
},
config: {
auth: null,
keyFile: __dirname + '/data/keyfile.txt'
},
proxy: {
keyFile: __dirname + '/data/keyfile.txt'
}
});
// Start SSL replicaset manager
shardedManager.purge().then(function() {
shardedManager
.start()
.then(function() {
callback(null, shardedManager);
})
.catch(function(e) {
callback(e, null);
});
});
};
/**
* @ignore
*/
it('should correctly connect and authenticate against admin database using mongos', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
Mongos = configuration.require.Mongos;
setUpSharded(configuration, function(err, manager) {
var mongos = new Mongos([new Server('localhost', 51000)], { poolSize: 1 });
var client = new MongoClient(mongos, { w: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('admin', 'admin', { w: 'majority' }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(new Mongos([new Server('localhost', 51000)], { poolSize: 1 }), {
w: 1,
user: 'admin',
password: 'admin',
authSource: 'admin'
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.addUser('me', 'secret', { w: 'majority' }, function(err) {
test.equal(null, err);
// Close the connection
client.close();
setTimeout(function() {
// connection string
var config = f('mongodb://me:secret@localhost:%s/%s', 51000, configuration.db);
// Connect
MongoClient.connect(config, function(error, client) {
test.equal(null, error);
var db = client.db(configuration.db);
db.collections(function(error) {
test.equal(null, error);
client.close();
manager.stop().then(function() {
done();
});
});
});
}, 5000);
});
});
});
});
});
}
});
/**
* @ignore
*/
it('should correctly handle proxy stepdown and stepup without loosing auth for sharding', {
metadata: { requires: { topology: ['auth'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration,
MongoClient = configuration.require.MongoClient,
Server = configuration.require.Server,
Mongos = configuration.require.Mongos;
setUpSharded(configuration, function(err, manager) {
var mongos = new Mongos([new Server('localhost', 51000)], { poolSize: 1 });
var client = new MongoClient(mongos, { w: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
// Add a user
db.admin().addUser('admin', 'admin', { w: 'majority' }, function(err) {
test.equal(null, err);
client.close();
new MongoClient(new Mongos([new Server('localhost', 51000)], { poolSize: 1 }), {
w: 1,
user: 'admin',
password: 'admin',
authSource: 'admin'
}).connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
db.addUser('me', 'secret', { w: 'majority' }, function(err) {
test.equal(null, err);
// Close the connection
client.close();
// connection string
var config = f('mongodb://me:secret@localhost:%s/%s', 51000, configuration.db);
// Connect
MongoClient.connect(config, function(error, client) {
test.equal(null, error);
var db = client.db(configuration.db);
db.collections(function(error) {
test.equal(null, error);
// Get the proxies
var proxies = manager.proxies();
proxies[0].stop().then(function() {
proxies[1].stop().then(function() {
db.collections(function(error) {
test.equal(null, error);
});
proxies[0].start().then(function() {
proxies[1].start().then(function() {
db.collections(function(error) {
test.equal(null, error);
client.close();
manager.stop().then(function() {
done();
});
});
});
});
});
});
});
});
});
});
});
});
});
}
});
});
| 1 | 14,817 | Side note: I'd love to see more of these fields exposed on configuration. | mongodb-node-mongodb-native | js |
@@ -5,7 +5,7 @@
contact_us = (Rails.configuration.branding[:organisation][:contact_us_url] || contact_us_url)
email_subject = _('Query or feedback related to %{tool_name}') %{ :tool_name => tool_name }
user_name = User.find_by(email: @resource.email).nil? ? @resource.email : User.find_by(email: @resource.email).name(false)
- inviter_name = @resource.is_a?(User) ? @resource.invited_by.name(false) : @resource.invited_by.name
+ inviter_name = @resource.invited_by.name
%>
<% FastGettext.with_locale FastGettext.default_locale do %>
<p> | 1 | <%
tool_name = Rails.configuration.branding[:application][:name]
link = accept_invitation_url(@resource, :invitation_token => @token)
helpdesk_email = Rails.configuration.branding[:organisation][:helpdesk_email]
contact_us = (Rails.configuration.branding[:organisation][:contact_us_url] || contact_us_url)
email_subject = _('Query or feedback related to %{tool_name}') %{ :tool_name => tool_name }
user_name = User.find_by(email: @resource.email).nil? ? @resource.email : User.find_by(email: @resource.email).name(false)
inviter_name = @resource.is_a?(User) ? @resource.invited_by.name(false) : @resource.invited_by.name
%>
<% FastGettext.with_locale FastGettext.default_locale do %>
<p>
<%= _('Hello %{user_name}') %{ :user_name => user_name } %>
</p>
<p>
<%= _("Your colleague %{inviter_name} has invited you to contribute to "\
" their Data Management Plan in %{tool_name}") % {
tool_name: tool_name,
inviter_name: inviter_name
} %>
</p>
<p>
<%= sanitize(_('%{click_here} to accept the invitation, (or copy %{link} into your browser). If you don\'t want to accept the invitation, please ignore this email.') % {
click_here: link_to(_('Click here'), link), link: link
}) %>
</p>
<p>
<%= _('All the best') %>
<br />
<%= _('The %{tool_name} team') %{:tool_name => tool_name} %>
</p>
<p>
<%= _('Please do not reply to this email.') %>
<%= sanitize(_('If you have any questions or need help, please contact us at %{helpdesk_email} or visit %{contact_us_url}') % {
helpdesk_email: mail_to(helpdesk_email, helpdesk_email,
subject: email_subject),
contact_us_url: link_to(contact_us, contact_us)
}) %>
</p>
<% end %>
| 1 | 19,027 | In the case of accounts generated by API clients, what name gets put for the `inviter_name` or `invited_by` record? | DMPRoadmap-roadmap | rb |
@@ -33,6 +33,8 @@ public class MutableQbftConfigOptions extends MutableBftConfigOptions implements
qbftConfigOptions.getValidatorContractAddress().map(String::toLowerCase);
}
+ // TODO-lucas do I need to implement getStartBlock here?
+
@Override
public Optional<String> getValidatorContractAddress() {
return validatorContractAddress; | 1 | /*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.qbft;
import org.hyperledger.besu.config.QbftConfigOptions;
import org.hyperledger.besu.consensus.common.bft.BftForksSchedule;
import org.hyperledger.besu.consensus.common.bft.MutableBftConfigOptions;
import java.util.Optional;
/**
* A mutable {@link QbftConfigOptions} that is used for building config for transitions in the
* {@link BftForksSchedule}.
*/
public class MutableQbftConfigOptions extends MutableBftConfigOptions implements QbftConfigOptions {
private Optional<String> validatorContractAddress;
public MutableQbftConfigOptions(final QbftConfigOptions qbftConfigOptions) {
super(qbftConfigOptions);
this.validatorContractAddress =
qbftConfigOptions.getValidatorContractAddress().map(String::toLowerCase);
}
@Override
public Optional<String> getValidatorContractAddress() {
return validatorContractAddress;
}
public void setValidatorContractAddress(final Optional<String> validatorContractAddress) {
this.validatorContractAddress = validatorContractAddress;
}
}
| 1 | 26,645 | I think this class is only relevant for updating the QbftConfigOptions based on the transitions config, so probably not. | hyperledger-besu | java |
@@ -89,5 +89,10 @@ namespace Nethermind.JsonRpc
///
/// </summary>
public const int ExecutionError = -32015;
+
+ /// <summary>
+ /// Request exceedes defined tracer timeout limit
+ /// </sumary>
+ public const int TracerTimeout = -32016;
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
namespace Nethermind.JsonRpc
{
public static class ErrorCodes
{
public const int None = 0;
/// <summary>
/// Invalid JSON
/// </summary>
public const int ParseError = -32700;
/// <summary>
/// JSON is not a valid request object
/// </summary>
public const int InvalidRequest = -32600;
/// <summary>
/// Method does not exist
/// </summary>
public const int MethodNotFound = -32601;
/// <summary>
/// Invalid method parameters
/// </summary>
public const int InvalidParams = -32602;
/// <summary>
/// Internal JSON-RPC error
/// </summary>
public const int InternalError = -32603;
/// <summary>
/// Missing or invalid parameters
/// </summary>
public const int InvalidInput = -32000;
/// <summary>
/// Requested resource not found
/// </summary>
public const int ResourceNotFound = -32001;
/// <summary>
/// Requested resource not available
/// </summary>
public const int ResourceUnavailable = -32002;
/// <summary>
/// Transaction creation failed
/// </summary>
public const int TransactionRejected = -32010;
/// <summary>
/// Account locked
/// </summary>
public const int AccountLocked = -32020;
/// <summary>
/// Method is not implemented
/// </summary>
public const int MethodNotSupported = -32004;
/// <summary>
/// Request exceeds defined limit
/// </summary>
public const int LimitExceeded = -32005;
/// <summary>
/// Version of JSON-RPC protocol is not supported
/// </summary>
public const int RpcVersionNotSupported = -32015;
/// <summary>
///
/// </summary>
public const int ExecutionError = -32015;
}
} | 1 | 24,194 | what is the number thrown by Geth? | NethermindEth-nethermind | .cs |
@@ -2036,8 +2036,8 @@ common.checkDatabaseConfigMatch = (apiConfig, frontendConfig) => {
// mongodb0.example.com:27017
let apiMongoHost = apiConfig.split("/")[2];
let frontendMongoHost = frontendConfig.split("/")[2];
- let apiMongoDb = apiConfig.split("/")[3];
- let frontendMongoDb = frontendConfig.split("/")[3];
+ let apiMongoDb = apiConfig.split("/")[3].split('?')[0];
+ let frontendMongoDb = frontendConfig.split("/")[3].split('?')[0];
if (apiMongoHost === frontendMongoHost && apiMongoDb === frontendMongoDb) {
return true;
} | 1 | /**
* Module for some common utility functions and references
* @module api/utils/common
*/
/** @lends module:api/utils/common */
var common = {},
moment = require('moment-timezone'),
time = require('time')(Date),
crypto = require('crypto'),
logger = require('./log.js'),
mcc_mnc_list = require('mcc-mnc-list'),
plugins = require('../../plugins/pluginManager.js'),
countlyConfig = require('./../config', 'dont-enclose'),
argon2 = require('argon2');
var matchHtmlRegExp = /"|'|&(?!amp;|quot;|#39;|lt;|gt;|#46;|#36;)|<|>/;
var matchLessHtmlRegExp = /[<>]/;
/**
* Escape special characters in the given string of html.
* @param {string} string - The string to escape for inserting into HTML
* @param {bool} more - if false, escapes only tags, if true escapes also quotes and ampersands
* @returns {string} escaped string
**/
common.escape_html = function(string, more) {
var str = '' + string;
var match;
if (more) {
match = matchHtmlRegExp.exec(str);
}
else {
match = matchLessHtmlRegExp.exec(str);
}
if (!match) {
return str;
}
var escape;
var html = '';
var index = 0;
var lastIndex = 0;
for (index = match.index; index < str.length; index++) {
switch (str.charCodeAt(index)) {
case 34: // "
escape = '"';
break;
case 38: // &
escape = '&';
break;
case 39: // '
escape = ''';
break;
case 60: // <
escape = '<';
break;
case 62: // >
escape = '>';
break;
default:
continue;
}
if (lastIndex !== index) {
html += str.substring(lastIndex, index);
}
lastIndex = index + 1;
html += escape;
}
return lastIndex !== index ? html + str.substring(lastIndex, index) : html;
};
/**
* Escape special characters in the given value, may be nested object
* @param {string} key - key of the value
* @param {vary} value - value to escape
* @param {bool} more - if false, escapes only tags, if true escapes also quotes and ampersands
* @returns {vary} escaped value
**/
function escape_html_entities(key, value, more) {
if (typeof value === 'object' && value && (value.constructor === Object || value.constructor === Array)) {
if (Array.isArray(value)) {
let replacement = [];
for (let k = 0; k < value.length; k++) {
if (typeof value[k] === "string") {
let ob = getJSON(value[k]);
if (ob.valid) {
replacement[common.escape_html(k, more)] = JSON.stringify(escape_html_entities(k, ob.data, more));
}
else {
replacement[k] = common.escape_html(value[k], more);
}
}
else {
replacement[k] = escape_html_entities(k, value[k], more);
}
}
return replacement;
}
else {
let replacement = {};
for (let k in value) {
if (Object.hasOwnProperty.call(value, k)) {
if (typeof value[k] === "string") {
let ob = getJSON(value[k]);
if (ob.valid) {
replacement[common.escape_html(k, more)] = JSON.stringify(escape_html_entities(k, ob.data, more));
}
else {
replacement[common.escape_html(k, more)] = common.escape_html(value[k], more);
}
}
else {
replacement[common.escape_html(k, more)] = escape_html_entities(k, value[k], more);
}
}
}
return replacement;
}
}
return value;
}
/**
* Check if string is a valid json
* @param {string} val - string that might be json encoded
* @returns {object} with property data for parsed data and property valid to check if it was valid json encoded string or not
**/
function getJSON(val) {
var ret = {valid: false};
try {
ret.data = JSON.parse(val);
if (ret.data && typeof ret.data === "object") {
ret.valid = true;
}
}
catch (ex) {
//silent error
}
return ret;
}
/**
* Logger object for creating module specific logging
* @type {module:api/utils/log~Logger}
* @example
* var log = common.log('myplugin:api');
* log.i('myPlugin got a request: %j', params.qstring);
*/
common.log = logger;
/**
* Mapping some common property names from longer understandable to shorter representation stored in database
* @type {object}
*/
common.dbMap = {
'events': 'e',
'total': 't',
'new': 'n',
'unique': 'u',
'duration': 'd',
'durations': 'ds',
'frequency': 'f',
'loyalty': 'l',
'sum': 's',
'dur': 'dur',
'count': 'c'
};
/**
* Mapping some common user property names from longer understandable to shorter representation stored in database
* @type {object}
*/
common.dbUserMap = {
'device_id': 'did',
'user_id': 'uid',
'first_seen': 'fs',
'last_seen': 'ls',
'last_payment': 'lp',
'session_duration': 'sd',
'total_session_duration': 'tsd',
'session_count': 'sc',
'device': 'd',
'carrier': 'c',
'city': 'cty',
'region': 'rgn',
'country_code': 'cc',
'platform': 'p',
'platform_version': 'pv',
'app_version': 'av',
'last_begin_session_timestamp': 'lbst',
'last_end_session_timestamp': 'lest',
'has_ongoing_session': 'hos',
'previous_events': 'pe',
'resolution': 'r'
};
/**
* Mapping some common event property names from longer understandable to shorter representation stored in database
* @type {object}
*/
common.dbEventMap = {
'user_properties': 'up',
'timestamp': 'ts',
'segmentations': 'sg',
'count': 'c',
'sum': 's',
'duration': 'dur',
'previous_events': 'pe'
};
/**
* Default {@link countlyConfig} object for API server
* @type {object}
*/
common.config = countlyConfig;
/**
* Reference to time module
* @type {object}
*/
common.time = time;
/**
* Reference to momentjs
* @type {object}
*/
common.moment = moment;
/**
* Reference to crypto module
* @type {object}
*/
common.crypto = crypto;
/**
* Operating syste/platform mappings from what can be passed in metrics to shorter representations
* stored in db as prefix to OS segmented values
* @type {object}
*/
common.os_mapping = {
"unknown": "unk",
"undefined": "unk",
"tvos": "atv",
"watchos": "wos",
"unity editor": "uty",
"qnx": "qnx",
"os/2": "os2",
"windows": "mw",
"open bsd": "ob",
"searchbot": "sb",
"sun os": "so",
"solaris": "so",
"beos": "bo",
"mac osx": "o",
"macos": "o",
"mac": "o",
"webos": "web",
"brew": "brew"
};
/**
* Whole base64 alphabet for fetching splitted documents
* @type {object}
*/
common.base64 = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "+", "/"];
common.dbPromise = function() {
var args = Array.prototype.slice.call(arguments);
return new Promise(function(resolve, reject) {
var collection = common.db.collection(args[0]),
method = args[1];
if (method === 'find') {
collection[method].apply(collection, args.slice(2)).toArray(function(err, result) {
if (err) {
reject(err);
}
else {
resolve(result);
}
});
}
else {
collection[method].apply(collection, args.slice(2).concat([function(err, result) {
if (err) {
reject(err);
}
else {
resolve(result);
}
}]));
}
});
};
/**
* Fetches nested property values from an obj.
* @param {object} obj - standard countly metric object
* @param {string} desc - dot separate path to fetch from object
* @returns {object} fetched object from provided path
* @example
* //outputs {"u":20,"t":20,"n":5}
* common.getDescendantProp({"2017":{"1":{"2":{"u":20,"t":20,"n":5}}}}, "2017.1.2");
*/
common.getDescendantProp = function(obj, desc) {
desc = String(desc);
if (desc.indexOf(".") === -1) {
return obj[desc];
}
var arr = desc.split(".");
while (arr.length && (obj = obj[arr.shift()])) {
//doing operator in the loop condition
}
return obj;
};
/**
* Checks if provided value could be converted to a number,
* even if current type is other, as string, as example value "42"
* @param {any} n - value to check if it can be converted to number
* @returns {boolean} true if can be a number, false if can't be a number
* @example
* common.isNumber(1) //outputs true
* common.isNumber("2") //outputs true
* common.isNumber("test") //outputs false
*/
common.isNumber = function(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
};
/**
* This default Countly behavior of type conversion for storing proeprties accepted through API requests
* dealing with numbers as strings and too long numbers
* @param {any} value - value to convert to usable type
* @returns {varies} converted value
* @example
* common.convertToType(1) //outputs 1
* common.convertToType("2") //outputs 2
* common.convertToType("test") //outputs "test"
* common.convertToType("12345678901234567890") //outputs "12345678901234567890"
*/
common.convertToType = function(value) {
//handle array values
if (Array.isArray(value)) {
for (var i = 0; i < value.length; i++) {
value[i] = common.convertToType(value[i]);
}
return value;
}
//if value can be a number
else if (common.isNumber(value)) {
//check if it is string but is less than 16 length
if (value.length && value.length <= 16) {
//convert to number
return parseFloat(value);
}
//check if it is number, but longer than 16 digits (max limit)
else if ((value + "").length > 16) {
//convert to string
return value + "";
}
else {
//return number as is
return value;
}
}
else {
//return as string
return value + "";
}
};
/**
* Safe division between numbers providing 0 as result in cases when dividing by 0
* @param {number} dividend - number which to divide
* @param {number} divisor - number by which to divide
* @returns {number} result of division
* @example
* //outputs 0
* common.safeDivision(100, 0);
*/
common.safeDivision = function(dividend, divisor) {
var tmpAvgVal;
tmpAvgVal = dividend / divisor;
if (!tmpAvgVal || tmpAvgVal === Number.POSITIVE_INFINITY) {
tmpAvgVal = 0;
}
return tmpAvgVal;
};
/**
* Pad number with specified character from left to specified length
* @param {number} number - number to pad
* @param {number} width - pad to what length in symbols
* @returns {string} padded number
* @example
* //outputs 0012
* common.zeroFill(12, 4, "0");
*/
common.zeroFill = function(number, width) {
width -= number.toString().length;
if (width > 0) {
return new Array(width + (/\./.test(number) ? 2 : 1)).join('0') + number;
}
return number + ""; // always return a string
};
/**
* Add item or array to existing array only if values are not already in original array
* @param {array} arr - original array where to add unique elements
* @param {string|number|array} item - item to add or array to merge
*/
common.arrayAddUniq = function(arr, item) {
if (!arr) {
arr = [];
}
if (toString.call(item) === "[object Array]") {
for (var i = 0; i < item.length; i++) {
if (arr.indexOf(item[i]) === -1) {
arr[arr.length] = item[i];
}
}
}
else {
if (arr.indexOf(item) === -1) {
arr[arr.length] = item;
}
}
};
/**
* Create HMAC sha1 hash from provided value and optional salt
* @param {string} str - value to hash
* @param {string=} addSalt - optional salt, uses ms timestamp by default
* @returns {string} HMAC sha1 hash
*/
common.sha1Hash = function(str, addSalt) {
var salt = (addSalt) ? new Date().getTime() : '';
return crypto.createHmac('sha1', salt + '').update(str + '').digest('hex');
};
/**
* Create HMAC sha512 hash from provided value and optional salt
* @param {string} str - value to hash
* @param {string=} addSalt - optional salt, uses ms timestamp by default
* @returns {string} HMAC sha1 hash
*/
common.sha512Hash = function(str, addSalt) {
var salt = (addSalt) ? new Date().getTime() : '';
return crypto.createHmac('sha512', salt + '').update(str + '').digest('hex');
};
/**
* Create argon2 hash string
* @param {string} str - string to hash
* @returns {promise} hash promise
**/
common.argon2Hash = function(str) {
return argon2.hash(str);
};
/**
* Create MD5 hash from provided value
* @param {string} str - value to hash
* @returns {string} MD5 hash
*/
common.md5Hash = function(str) {
return crypto.createHash('md5').update(str + '').digest('hex');
};
/**
* Modifies provided object in the format object["2012.7.20.property"] = increment.
* Usualy used when filling up Countly metric model data
* @param {params} params - {@link params} object
* @param {object} object - object to fill
* @param {string} property - meric value or segment or property to fill/increment
* @param {number=} increment - by how much to increments, default is 1
* @returns {void} void
* @example
* var obj = {};
* common.fillTimeObject(params, obj, "u", 1);
* console.log(obj);
* //outputs
* { '2017.u': 1,
* '2017.2.u': 1,
* '2017.2.23.u': 1,
* '2017.2.23.8.u': 1,
* '2017.w8.u': 1 }
*/
common.fillTimeObject = function(params, object, property, increment) {
increment = (increment) ? increment : 1;
var timeObj = params.time;
if (!timeObj || !timeObj.yearly || !timeObj.monthly || !timeObj.weekly || !timeObj.daily || !timeObj.hourly) {
return false;
}
object[timeObj.yearly + '.' + property] = increment;
object[timeObj.monthly + '.' + property] = increment;
object[timeObj.daily + '.' + property] = increment;
// If the property parameter contains a dot, hourly data is not saved in
// order to prevent two level data (such as 2012.7.20.TR.u) to get out of control.
if (property.indexOf('.') === -1) {
object[timeObj.hourly + '.' + property] = increment;
}
// For properties that hold the unique visitor count we store weekly data as well.
if (property.substr(-2) === ("." + common.dbMap.unique) ||
property === common.dbMap.unique ||
property.substr(0, 2) === (common.dbMap.frequency + ".") ||
property.substr(0, 2) === (common.dbMap.loyalty + ".") ||
property.substr(0, 3) === (common.dbMap.durations + ".") ||
property === common.dbMap.paying) {
object[timeObj.yearly + ".w" + timeObj.weekly + '.' + property] = increment;
}
};
/**
* Creates a time object from request's milisecond or second timestamp in provided app's timezone
* @param {string} appTimezone - app's timezone
* @param {number} reqTimestamp - timestamp in the request
* @returns {timeObject} Time object for current request
*/
common.initTimeObj = function(appTimezone, reqTimestamp) {
var currTimestamp,
curMsTimestamp,
currDate,
currDateWithoutTimestamp = new Date();
// Check if the timestamp parameter exists in the request and is a 10 or 13 digit integer, handling also float timestamps with ms after dot
if (reqTimestamp && (Math.round(parseFloat(reqTimestamp, 10)) + "").length === 10 && common.isNumber(reqTimestamp)) {
// If the received timestamp is greater than current time use the current time as timestamp
currTimestamp = (parseInt(reqTimestamp, 10) > time.time()) ? time.time() : parseInt(reqTimestamp, 10);
curMsTimestamp = (parseInt(reqTimestamp, 10) > time.time()) ? time.time() * 1000 : parseFloat(reqTimestamp, 10) * 1000;
currDate = new Date(currTimestamp * 1000);
}
else if (reqTimestamp && (Math.round(parseFloat(reqTimestamp, 10)) + "").length === 13 && common.isNumber(reqTimestamp)) {
var tmpTimestamp = Math.floor(parseInt(reqTimestamp, 10) / 1000);
curMsTimestamp = (tmpTimestamp > time.time()) ? Date.now() : parseInt(reqTimestamp, 10);
currTimestamp = (tmpTimestamp > time.time()) ? time.time() : tmpTimestamp;
currDate = new Date(currTimestamp * 1000);
}
else {
currTimestamp = time.time(); // UTC
currDate = new Date();
curMsTimestamp = currDate.getTime();
}
currDate.setTimezone(appTimezone);
currDateWithoutTimestamp.setTimezone(appTimezone);
var tmpMoment = moment(currDate);
tmpMoment.tz(appTimezone);
/**
* @typedef timeObject
* @type {object}
* @global
* @property {momentjs} now - momentjs instance for request's time in app's timezone
* @property {momentjs} nowUTC - momentjs instance for request's time in UTC
* @property {momentjs} nowWithoutTimestamp - momentjs instance for current time in app's timezone
* @property {number} timestamp - request's seconds timestamp
* @property {number} mstimestamp - request's miliseconds timestamp
* @property {string} yearly - year of request time in app's timezone in YYYY format
* @property {string} monthly - month of request time in app's timezone in YYYY.M format
* @property {string} daily - date of request time in app's timezone in YYYY.M.D format
* @property {string} hourly - hour of request time in app's timezone in YYYY.M.D.H format
* @property {number} weekly - week of request time in app's timezone as result day of the year, divided by 7
* @property {string} month - month of request time in app's timezone in format M
* @property {string} day - day of request time in app's timezone in format D
* @property {string} hour - hour of request time in app's timezone in format H
*/
return {
now: tmpMoment,
nowUTC: moment.utc(currDate),
nowWithoutTimestamp: moment(currDateWithoutTimestamp).tz(appTimezone),
timestamp: currTimestamp,
mstimestamp: curMsTimestamp,
yearly: tmpMoment.format("YYYY"),
monthly: tmpMoment.format("YYYY.M"),
daily: tmpMoment.format("YYYY.M.D"),
hourly: tmpMoment.format("YYYY.M.D.H"),
weekly: Math.ceil(tmpMoment.format("DDD") / 7),
month: tmpMoment.format("M"),
day: tmpMoment.format("D"),
hour: tmpMoment.format("H")
};
};
/**
* Creates a Date object from provided seconds timestamp in provided timezone
* @param {number} timestamp - unix timestamp in seconds
* @param {string} timezone - name of the timezone
* @returns {Date} Date object for provided time
*/
common.getDate = function(timestamp, timezone) {
var tmpDate = (timestamp) ? new Date(timestamp * 1000) : new Date();
if (timezone) {
tmpDate.setTimezone(timezone);
}
return tmpDate;
};
/**
* Returns day of the year from provided seconds timestamp in provided timezone
* @param {number} timestamp - unix timestamp in seconds
* @param {string} timezone - name of the timezone
* @returns {number} current day of the year
*/
common.getDOY = function(timestamp, timezone) {
var endDate = (timestamp) ? new Date(timestamp * 1000) : new Date();
if (timezone) {
endDate.setTimezone(timezone);
}
var startDate = (timestamp) ? new Date(timestamp * 1000) : new Date();
if (timezone) {
startDate.setTimezone(timezone);
}
startDate.setMonth(0);
startDate.setDate(1);
startDate.setHours(0);
startDate.setMinutes(0);
startDate.setSeconds(0);
startDate.setMilliseconds(0);
var diff = endDate - startDate;
var oneDay = 1000 * 60 * 60 * 24;
var currDay = Math.ceil(diff / oneDay);
return currDay;
};
/**
* Returns amount of days in provided year
* @param {number} year - year to check for days
* @returns {number} number of days in provided year
*/
common.getDaysInYear = function(year) {
if (new Date(year, 1, 29).getMonth() === 1) {
return 366;
}
else {
return 365;
}
};
/**
* Returns amount of iso weeks in provided year
* @param {number} year - year to check for days
* @returns {number} number of iso weeks in provided year
*/
common.getISOWeeksInYear = function(year) {
var d = new Date(year, 0, 1),
isLeap = new Date(year, 1, 29).getMonth() === 1;
//Check for a Jan 1 that's a Thursday or a leap year that has a
//Wednesday Jan 1. Otherwise it's 52
return d.getDay() === 4 || isLeap && d.getDay() === 3 ? 53 : 52;
};
/**
* Validates provided arguments
* @param {object} args - arguments to validate
* @param {object} argProperties - rules for validating each argument
* @param {boolean} argProperties.required - should property be present in args
* @param {string} argProperties.type - what type should property be, possible values: String, Array, Number, URL, Boolean, Object
* @param {string} argProperties.max-length - property should not be longer than provided value
* @param {string} argProperties.min-length - property should not be shorter than provided value
* @param {string} argProperties.exclude-from-ret-obj - should property be present in returned validated args object
* @param {string} argProperties.has-number - should string property has any number in it
* @param {string} argProperties.has-char - should string property has any latin character in it
* @param {string} argProperties.has-upchar - should string property has any upper cased latin character in it
* @param {string} argProperties.has-special - should string property has any none latin character in it
* @param {boolean} returnErrors - return error details as array or only boolean result
* @returns {object} validated args in obj property, or false as result property if args do not pass validation and errors array
*/
common.validateArgs = function(args, argProperties, returnErrors) {
if (arguments.length === 2) {
returnErrors = false;
}
var returnObj;
if (returnErrors) {
returnObj = {
result: true,
errors: [],
obj: {}
};
}
else {
returnObj = {};
}
if (!args) {
if (returnErrors) {
returnObj.result = false;
returnObj.errors.push("Missing 'args' parameter");
delete returnObj.obj;
return returnObj;
}
else {
return false;
}
}
for (var arg in argProperties) {
var argState = true;
if (argProperties[arg].required) {
if (args[arg] === void 0) {
if (returnErrors) {
returnObj.errors.push("Missing " + arg + " argument");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (args[arg] !== void 0) {
if (argProperties[arg].type) {
if (argProperties[arg].type === 'Number' || argProperties[arg].type === 'String') {
if (toString.call(args[arg]) !== '[object ' + argProperties[arg].type + ']') {
if (returnErrors) {
returnObj.errors.push("Invalid type for " + arg);
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
else if (argProperties[arg].type === 'URL') {
if (toString.call(args[arg]) !== '[object String]') {
if (returnErrors) {
returnObj.errors.push("Invalid type for " + arg);
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
else if (args[arg] && !/^([a-z]([a-z]|\d|\+|-|\.)*):(\/\/(((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:)*@)?((\[(|(v[\da-f]{1,}\.(([a-z]|\d|-|\.|_|~)|[!$&'()*+,;=]|:)+))\])|((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5]))|(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=])*)(:\d*)?)(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)*)*|(\/((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)+(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)*)*)?)|((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)+(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)*)*)|((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)){0})(\?((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)|[\uE000-\uF8FF]|\/|\?)*)?(#((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!$&'()*+,;=]|:|@)|\/|\?)*)?$/i.test(args[arg])) {
if (returnErrors) {
returnObj.errors.push("Invalid url string " + arg);
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
else if (argProperties[arg].type === 'Boolean') {
if (!(args[arg] !== true || args[arg] !== false || toString.call(args[arg]) !== '[object Boolean]')) {
if (returnErrors) {
returnObj.errors.push("Invalid type for " + arg);
returnObj.result = false;
argState = false;
}
}
}
else if (argProperties[arg].type === 'Array') {
if (!Array.isArray(args[arg])) {
if (returnErrors) {
returnObj.errors.push("Invalid type for " + arg);
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
else if (argProperties[arg].type === 'Object') {
if (toString.call(args[arg]) !== '[object ' + argProperties[arg].type + ']' && !(!argProperties[arg].required && args[arg] === null)) {
if (returnErrors) {
returnObj.errors.push("Invalid type for " + arg);
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
else {
if (returnErrors) {
returnObj.errors.push("Invalid type declaration for " + arg);
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
else {
if (toString.call(args[arg]) !== '[object String]') {
if (returnErrors) {
returnObj.errors.push(arg + " should be string");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argProperties[arg]['max-length']) {
if (args[arg].length > argProperties[arg]['max-length']) {
if (returnErrors) {
returnObj.errors.push("Length of " + arg + " is greater than max length value");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argProperties[arg]['min-length']) {
if (args[arg].length < argProperties[arg]['min-length']) {
if (returnErrors) {
returnObj.errors.push("Length of " + arg + " is lower than min length value");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argProperties[arg]['has-number']) {
if (!/\d/.test(args[arg])) {
if (returnErrors) {
returnObj.errors.push(arg + " should has number");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argProperties[arg]['has-char']) {
if (!/[A-Za-z]/.test(args[arg])) {
if (returnErrors) {
returnObj.errors.push(arg + " should has char");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argProperties[arg]['has-upchar']) {
if (!/[A-Z]/.test(args[arg])) {
if (returnErrors) {
returnObj.errors.push(arg + " should has upchar");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argProperties[arg]['has-special']) {
if (!/[^A-Za-z\d]/.test(args[arg])) {
if (returnErrors) {
returnObj.errors.push(arg + " should has special character");
returnObj.result = false;
argState = false;
}
else {
return false;
}
}
}
if (argState && returnErrors && !argProperties[arg]['exclude-from-ret-obj']) {
returnObj.obj[arg] = args[arg];
}
else if (!returnErrors && !argProperties[arg]['exclude-from-ret-obj']) {
returnObj[arg] = args[arg];
}
}
}
if (returnErrors && !returnObj.result) {
delete returnObj.obj;
return returnObj;
}
else {
return returnObj;
}
};
/**
* Fix event keys before storing in database by removing dots and $ from the string, removing other prefixes and limiting length
* @param {string} eventKey - key value to fix
* @returns {string|false} escaped key or false if not possible to use key at all
*/
common.fixEventKey = function(eventKey) {
var shortEventName = eventKey.replace(/system\.|\.\.|\$/g, "");
if (shortEventName.length >= 128) {
return false;
}
else {
return shortEventName;
}
};
/**
* Block {@link module:api/utils/common.returnMessage} and {@link module:api/utils/common.returnOutput} from ouputting anything
* @param {params} params - params object
*/
common.blockResponses = function(params) {
params.blockResponses = true;
};
/**
* Unblock/allow {@link module:api/utils/common.returnMessage} and {@link module:api/utils/common.returnOutput} ouputting anything
* @param {params} params - params object
*/
common.unblockResponses = function(params) {
params.blockResponses = false;
};
/**
* Custom API response handler callback
* @typedef APICallback
* @callback APICallback
* @type {function}
* @global
* @param {bool} error - true if there was problem processing request, and false if request was processed successfully
* @param {string} responseMessage - what API returns
* @param {object} headers - what API would have returned to HTTP request
* @param {number} returnCode - HTTP code, what API would have returned to HTTP request
* @param {params} params - request context that was passed to requestProcessor, modified during request processing
*/
/**
* Return raw headers and body
* @param {params} params - params object
* @param {number} returnCode - http code to use
* @param {string} body - raw data to output
* @param {object} heads - headers to add to the output
*/
common.returnRaw = function(params, returnCode, body, heads) {
params.response = {
code: returnCode,
body: body
};
if (params && params.APICallback && typeof params.APICallback === 'function') {
if (!params.blockResponses && (!params.res || !params.res.finished)) {
if (!params.res) {
params.res = {};
}
params.res.finished = true;
params.APICallback(returnCode !== 200, body, heads, returnCode, params);
}
return;
}
//set provided in configuration headers
var headers = {};
if (heads) {
for (var i in heads) {
headers[i] = heads[i];
}
}
if (params && params.res && params.res.writeHead && !params.blockResponses) {
if (!params.res.finished) {
params.res.writeHead(returnCode, headers);
if (body) {
params.res.write(body);
}
params.res.end();
}
else {
console.error("Output already closed, can't write more");
console.trace();
console.log(params);
}
}
};
/**
* Output message as request response with provided http code
* @param {params} params - params object
* @param {number} returnCode - http code to use
* @param {string} message - Message to output, will be encapsulated in JSON object under result property
* @param {object} heads - headers to add to the output
*/
common.returnMessage = function(params, returnCode, message, heads) {
params.response = {
code: returnCode,
body: JSON.stringify({result: message}, escape_html_entities)
};
if (params && params.APICallback && typeof params.APICallback === 'function') {
if (!params.blockResponses && (!params.res || !params.res.finished)) {
if (!params.res) {
params.res = {};
}
params.res.finished = true;
params.APICallback(returnCode !== 200, JSON.stringify({result: message}), heads, returnCode, params);
}
return;
}
//set provided in configuration headers
var headers = {
'Content-Type': 'application/json; charset=utf-8',
'Access-Control-Allow-Origin': '*'
};
var add_headers = (plugins.getConfig("security").api_additional_headers || "").replace(/\r\n|\r|\n/g, "\n").split("\n");
var parts;
for (let i = 0; i < add_headers.length; i++) {
if (add_headers[i] && add_headers[i].length) {
parts = add_headers[i].split(/:(.+)?/);
if (parts.length === 3) {
headers[parts[0]] = parts[1];
}
}
}
if (heads) {
for (let i in heads) {
headers[i] = heads[i];
}
}
if (params && params.res && params.res.writeHead && !params.blockResponses) {
if (!params.res.finished) {
params.res.writeHead(returnCode, headers);
if (params.qstring.callback) {
params.res.write(params.qstring.callback + '(' + JSON.stringify({result: message}, escape_html_entities) + ')');
}
else {
params.res.write(JSON.stringify({result: message}, escape_html_entities));
}
params.res.end();
}
else {
console.error("Output already closed, can't write more");
console.trace();
console.log(params);
}
}
};
/**
* Output message as request response with provided http code
* @param {params} params - params object
* @param {output} output - object to stringify and output
* @param {string} noescape - prevent escaping HTML entities
* @param {object} heads - headers to add to the output
*/
common.returnOutput = function(params, output, noescape, heads) {
var escape = noescape ? undefined : function(k, v) {
return escape_html_entities(k, v, true);
};
params.response = {
code: 200,
body: JSON.stringify(output, escape)
};
if (params && params.APICallback && typeof params.APICallback === 'function') {
if (!params.blockResponses && (!params.res || !params.res.finished)) {
if (!params.res) {
params.res = {};
}
params.res.finished = true;
params.APICallback(false, output, heads, 200, params);
}
return;
}
//set provided in configuration headers
var headers = {
'Content-Type': 'application/json; charset=utf-8',
'Access-Control-Allow-Origin': '*'
};
var add_headers = (plugins.getConfig("security").api_additional_headers || "").replace(/\r\n|\r|\n/g, "\n").split("\n");
var parts;
for (let i = 0; i < add_headers.length; i++) {
if (add_headers[i] && add_headers[i].length) {
parts = add_headers[i].split(/:(.+)?/);
if (parts.length === 3) {
headers[parts[0]] = parts[1];
}
}
}
if (heads) {
for (let i in heads) {
headers[i] = heads[i];
}
}
if (params && params.res && params.res.writeHead && !params.blockResponses) {
if (!params.res.finished) {
params.res.writeHead(200, headers);
if (params.qstring.callback) {
params.res.write(params.qstring.callback + '(' + JSON.stringify(output, escape) + ')');
}
else {
params.res.write(JSON.stringify(output, escape));
}
params.res.end();
}
else {
console.error("Output already closed, can't write more");
console.trace();
console.log(params);
}
}
};
var ipLogger = common.log('ip:api');
/**
* Get IP address from request object
* @param {req} req - nodejs request object
* @returns {string} ip address
*/
common.getIpAddress = function(req) {
var ipAddress = (req) ? req.headers['x-forwarded-for'] || req.headers['x-real-ip'] || req.connection.remoteAddress || req.socket.remoteAddress || (req.connection.socket ? req.connection.socket.remoteAddress : '') : "";
/* Since x-forwarded-for: client, proxy1, proxy2, proxy3 */
var ips = ipAddress.split(',');
//if ignoreProxies not setup, use outmost left ip address
if (!countlyConfig.ignoreProxies || !countlyConfig.ignoreProxies.length) {
ipLogger.d("From %s found ip %s", ipAddress, ips[0]);
return stripPort(ips[0]);
}
//search for the outmost right ip address ignoring provided proxies
var ip = "";
for (var i = ips.length - 1; i >= 0; i--) {
ips[i] = stripPort(ips[i]);
var masks = false;
if (countlyConfig.ignoreProxies && countlyConfig.ignoreProxies.length) {
masks = countlyConfig.ignoreProxies.some(function(elem) {
return ips[i].startsWith(elem);
});
}
if (ips[i] !== "127.0.0.1" && (!countlyConfig.ignoreProxies || !masks)) {
ip = ips[i];
break;
}
}
ipLogger.d("From %s found ip %s", ipAddress, ip);
return ip;
};
/**
* This function takes ipv4 or ipv6 with possible port, removes port information and returns plain ip address
* @param {string} ip - ip address to check for port and return plain ip
* @returns {string} plain ip address
*/
function stripPort(ip) {
var parts = (ip + "").split(".");
//check if ipv4
if (parts.length === 4) {
return ip.split(":")[0].trim();
}
else {
parts = (ip + "").split(":");
if (parts.length === 9) {
parts.pop();
}
if (parts.length === 8) {
ip = parts.join(":");
//remove enclosing [] for ipv6 if they are there
if (ip[0] === "[") {
ip = ip.substring(1);
}
if (ip[ip.length - 1] === "]") {
ip = ip.slice(0, -1);
}
}
}
return (ip + "").trim();
}
/**
* Modifies provided object filling properties used in zero documents in the format object["2012.7.20.property"] = increment.
* Usualy used when filling up Countly metric model zero document
* @param {params} params - {@link params} object
* @param {object} object - object to fill
* @param {string} property - meric value or segment or property to fill/increment
* @param {number=} increment - by how much to increments, default is 1
* @returns {void} void
* @example
* var obj = {};
* common.fillTimeObjectZero(params, obj, "u", 1);
* console.log(obj);
* //outputs
* { 'd.u': 1, 'd.2.u': 1, 'd.w8.u': 1 }
*/
common.fillTimeObjectZero = function(params, object, property, increment) {
var tmpIncrement = (increment) ? increment : 1,
timeObj = params.time;
if (!timeObj || !timeObj.yearly || !timeObj.month) {
return false;
}
if (property instanceof Array) {
for (var i = 0; i < property.length; i++) {
object['d.' + property[i]] = tmpIncrement;
object['d.' + timeObj.month + '.' + property[i]] = tmpIncrement;
// For properties that hold the unique visitor count we store weekly data as well.
if (property[i].substr(-2) === ("." + common.dbMap.unique) ||
property[i] === common.dbMap.unique ||
property[i].substr(0, 2) === (common.dbMap.frequency + ".") ||
property[i].substr(0, 2) === (common.dbMap.loyalty + ".") ||
property[i].substr(0, 3) === (common.dbMap.durations + ".") ||
property[i] === common.dbMap.paying) {
object['d.' + "w" + timeObj.weekly + '.' + property[i]] = tmpIncrement;
}
}
}
else {
object['d.' + property] = tmpIncrement;
object['d.' + timeObj.month + '.' + property] = tmpIncrement;
if (property.substr(-2) === ("." + common.dbMap.unique) ||
property === common.dbMap.unique ||
property.substr(0, 2) === (common.dbMap.frequency + ".") ||
property.substr(0, 2) === (common.dbMap.loyalty + ".") ||
property.substr(0, 3) === (common.dbMap.durations + ".") ||
property === common.dbMap.paying) {
object['d.' + "w" + timeObj.weekly + '.' + property] = tmpIncrement;
}
}
return true;
};
/**
* Modifies provided object filling properties used in monthly documents in the format object["2012.7.20.property"] = increment.
* Usualy used when filling up Countly metric model monthly document
* @param {params} params - {@link params} object
* @param {object} object - object to fill
* @param {string} property - meric value or segment or property to fill/increment
* @param {number=} increment - by how much to increments, default is 1
* @param {boolean=} forceHour - force recording hour information too, dfault is false
* @returns {void} void
* @example
* var obj = {};
* common.fillTimeObjectMonth(params, obj, "u", 1);
* console.log(obj);
* //outputs
* { 'd.23.u': 1, 'd.23.12.u': 1 }
*/
common.fillTimeObjectMonth = function(params, object, property, increment, forceHour) {
var tmpIncrement = (increment) ? increment : 1,
timeObj = params.time;
if (!timeObj || !timeObj.yearly || !timeObj.month || !timeObj.weekly || !timeObj.day || !timeObj.hour) {
return false;
}
if (property instanceof Array) {
for (var i = 0; i < property.length; i++) {
object['d.' + timeObj.day + '.' + property[i]] = tmpIncrement;
// If the property parameter contains a dot, hourly data is not saved in
// order to prevent two level data (such as 2012.7.20.TR.u) to get out of control.
if (forceHour || property[i].indexOf('.') === -1) {
object['d.' + timeObj.day + '.' + timeObj.hour + '.' + property[i]] = tmpIncrement;
}
}
}
else {
object['d.' + timeObj.day + '.' + property] = tmpIncrement;
if (forceHour || property.indexOf('.') === -1) {
object['d.' + timeObj.day + '.' + timeObj.hour + '.' + property] = tmpIncrement;
}
}
return true;
};
/**
* Record data in Countly standard metric model
* Can be used by plugins to record data, similar to sessions and users, with optional segments
* @param {params} params - {@link params} object
* @param {string} collection - name of the collections where to store data
* @param {string} id - id to prefix document ids, like app_id or segment id, etc
* @param {array} metrics - array of metrics to record, as ["u","t", "n"]
* @param {number=} value - value to increment all metrics for, default 1
* @param {object} segments - object with segments to record data, key segment name and value segment value
* @param {array} uniques - names of the metrics, which should be treated as unique, and stored in 0 docs and be estimated on output
* @param {number} lastTimestamp - timestamp in seconds to be used to determine if unique metrics it unique for specific period
* @example
* //recording attribution
* common.recordCustomMetric(params, "campaigndata", campaignId, ["clk", "aclk"], 1, {pl:"Android", brw:"Chrome"}, ["clk"], user["last_click"]);
*/
common.recordCustomMetric = function(params, collection, id, metrics, value, segments, uniques, lastTimestamp) {
value = value || 1;
var updateUsersZero = {},
updateUsersMonth = {},
tmpSet = {};
if (metrics) {
for (let i = 0; i < metrics.length; i++) {
recordMetric(params, metrics[i], {
segments: segments,
value: value,
unique: (uniques && uniques.indexOf(metrics[i]) !== -1) ? true : false,
lastTimestamp: lastTimestamp
},
tmpSet, updateUsersZero, updateUsersMonth);
}
}
var dbDateIds = common.getDateIds(params);
if (Object.keys(updateUsersZero).length || Object.keys(tmpSet).length) {
var update = {
$set: {
m: dbDateIds.zero,
a: params.app_id + ""
}
};
if (Object.keys(updateUsersZero).length) {
update.$inc = updateUsersZero;
}
if (Object.keys(tmpSet).length) {
update.$addToSet = {};
for (let i in tmpSet) {
update.$addToSet[i] = {$each: tmpSet[i]};
}
}
common.db.collection(collection).update({'_id': id + "_" + dbDateIds.zero}, update, {'upsert': true}, function() {});
}
if (Object.keys(updateUsersMonth).length) {
common.db.collection(collection).update({'_id': id + "_" + dbDateIds.month}, {
$set: {
m: dbDateIds.month,
a: params.app_id + ""
},
'$inc': updateUsersMonth
}, {'upsert': true}, function() {});
}
};
/**
* Record data in Countly standard metric model
* Can be used by plugins to record data, similar to sessions and users, with optional segments
* @param {params} params - {@link params} object
* @param {object} props - object defining what to record
* @param {string} props.collection - name of the collections where to store data
* @param {string} props.id - id to prefix document ids, like app_id or segment id, etc
* @param {object} props.metrics - object defining metrics to record, using key as metric name and value object for segmentation, unique, etc
* @param {number=} props.metrics[].value - value to increment current metric for, default 1
* @param {object} props.metrics[].segments - object with segments to record data, key segment name and value segment value or array of segment values
* @param {boolean} props.metrics[].unique - if metric should be treated as unique, and stored in 0 docs and be estimated on output
* @param {number} props.metrics[].lastTimestamp - timestamp in seconds to be used to determine if unique metrics it unique for specific period
* @param {array} props.metrics[].hourlySegments - array of segments that should have hourly data too (by default hourly data not recorded for segments)
* @example
* //recording attribution
* common.recordCustomMetric(params, "campaigndata", campaignId, ["clk", "aclk"], 1, {pl:"Android", brw:"Chrome"}, ["clk"], user["last_click"]);
*/
common.recordMetric = function(params, props) {
var updateUsersZero = {},
updateUsersMonth = {},
tmpSet = {};
for (let i in props.metrics) {
props.metrics[i].value = props.metrics[i].value || 1;
recordMetric(params, i, props.metrics[i], tmpSet, updateUsersZero, updateUsersMonth);
}
var dbDateIds = common.getDateIds(params);
if (Object.keys(updateUsersZero).length || Object.keys(tmpSet).length) {
var update = {
$set: {
m: dbDateIds.zero,
a: params.app_id + ""
}
};
if (Object.keys(updateUsersZero).length) {
update.$inc = updateUsersZero;
}
if (Object.keys(tmpSet).length) {
update.$addToSet = {};
for (let i in tmpSet) {
update.$addToSet[i] = {$each: tmpSet[i]};
}
}
common.db.collection(props.collection).update({'_id': props.id + "_" + dbDateIds.zero}, update, {'upsert': true}, function() {});
}
if (Object.keys(updateUsersMonth).length) {
common.db.collection(props.collection).update({'_id': props.id + "_" + dbDateIds.month}, {
$set: {
m: dbDateIds.month,
a: params.app_id + ""
},
'$inc': updateUsersMonth
}, {'upsert': true}, function() {});
}
};
/**
* Record specific metric
* @param {params} params - params object
* @param {string} metric - metric to record
* @param {object} props - properties of a metric defining how to record it
* @param {object} tmpSet - object with already set meta properties
* @param {object} updateUsersZero - object with already set update for zero docs
* @param {object} updateUsersMonth - object with already set update for months docs
**/
function recordMetric(params, metric, props, tmpSet, updateUsersZero, updateUsersMonth) {
var zeroObjUpdate = [],
monthObjUpdate = [];
if (props.unique) {
if (props.lastTimestamp) {
var currDate = common.getDate(params.time.timestamp, params.appTimezone),
lastDate = common.getDate(props.lastTimestamp, params.appTimezone),
secInMin = (60 * (currDate.getMinutes())) + currDate.getSeconds(),
secInHour = (60 * 60 * (currDate.getHours())) + secInMin,
secInMonth = (60 * 60 * 24 * (currDate.getDate() - 1)) + secInHour,
secInYear = (60 * 60 * 24 * (common.getDOY(params.time.timestamp, params.appTimezone) - 1)) + secInHour;
if (props.lastTimestamp < (params.time.timestamp - secInMin)) {
updateUsersMonth['d.' + params.time.day + '.' + params.time.hour + '.' + metric] = props.value;
}
if (props.lastTimestamp < (params.time.timestamp - secInHour)) {
updateUsersMonth['d.' + params.time.day + '.' + metric] = props.value;
}
if (lastDate.getFullYear() + "" === params.time.yearly + "" &&
Math.ceil(common.moment(lastDate).tz(params.appTimezone).format("DDD") / 7) < params.time.weekly) {
updateUsersZero["d.w" + params.time.weekly + '.' + metric] = props.value;
}
if (props.lastTimestamp < (params.time.timestamp - secInMonth)) {
updateUsersZero['d.' + params.time.month + '.' + metric] = props.value;
}
if (props.lastTimestamp < (params.time.timestamp - secInYear)) {
updateUsersZero['d.' + metric] = props.value;
}
}
else {
common.fillTimeObjectZero(params, updateUsersZero, metric, props.value);
common.fillTimeObjectMonth(params, updateUsersMonth, metric, props.value);
}
}
else {
zeroObjUpdate.push(metric);
monthObjUpdate.push(metric);
}
if (props.segments) {
for (var j in props.segments) {
if (Array.isArray(props.segments[j])) {
for (var k = 0; k < props.segments[j].length; k++) {
recordSegmentMetric(params, metric, j, props.segments[j][k], props, tmpSet, updateUsersZero, updateUsersMonth, zeroObjUpdate, monthObjUpdate);
}
}
else if (props.segments[j]) {
recordSegmentMetric(params, metric, j, props.segments[j], props, tmpSet, updateUsersZero, updateUsersMonth, zeroObjUpdate, monthObjUpdate);
}
}
}
common.fillTimeObjectZero(params, updateUsersZero, zeroObjUpdate, props.value);
common.fillTimeObjectMonth(params, updateUsersMonth, monthObjUpdate, props.value);
}
/**
* Record specific metric segment
* @param {params} params - params object
* @param {string} metric - metric to record
* @param {string} name - name of the segment to record
* @param {string} val - value of the segment to record
* @param {object} props - properties of a metric defining how to record it
* @param {object} tmpSet - object with already set meta properties
* @param {object} updateUsersZero - object with already set update for zero docs
* @param {object} updateUsersMonth - object with already set update for months docs
* @param {array} zeroObjUpdate - segments to fill for for zero docs
* @param {array} monthObjUpdate - segments to fill for months docs
**/
function recordSegmentMetric(params, metric, name, val, props, tmpSet, updateUsersZero, updateUsersMonth, zeroObjUpdate, monthObjUpdate) {
var escapedMetricKey = name.replace(/^\$/, "").replace(/\./g, ":");
var escapedMetricVal = (val + "").replace(/^\$/, "").replace(/\./g, ":");
if (!tmpSet["meta." + escapedMetricKey]) {
tmpSet["meta." + escapedMetricKey] = [];
}
tmpSet["meta." + escapedMetricKey].push(escapedMetricVal);
var recordHourly = (props.hourlySegments && props.hourlySegments.indexOf(name) !== -1) ? true : false;
if (props.unique) {
if (props.lastTimestamp) {
var currDate = common.getDate(params.time.timestamp, params.appTimezone),
lastDate = common.getDate(props.lastTimestamp, params.appTimezone),
secInMin = (60 * (currDate.getMinutes())) + currDate.getSeconds(),
secInHour = (60 * 60 * (currDate.getHours())) + secInMin,
secInMonth = (60 * 60 * 24 * (currDate.getDate() - 1)) + secInHour,
secInYear = (60 * 60 * 24 * (common.getDOY(params.time.timestamp, params.appTimezone) - 1)) + secInHour;
if (props.lastTimestamp < (params.time.timestamp - secInMin)) {
updateUsersMonth['d.' + params.time.day + '.' + params.time.hour + '.' + escapedMetricVal + '.' + metric] = props.value;
}
if (props.lastTimestamp < (params.time.timestamp - secInHour)) {
updateUsersMonth['d.' + params.time.day + '.' + escapedMetricVal + '.' + metric] = props.value;
}
if (lastDate.getFullYear() + "" === params.time.yearly + "" &&
Math.ceil(common.moment(lastDate).tz(params.appTimezone).format("DDD") / 7) < params.time.weekly) {
updateUsersZero["d.w" + params.time.weekly + '.' + escapedMetricVal + '.' + metric] = props.value;
}
if (props.lastTimestamp < (params.time.timestamp - secInMonth)) {
updateUsersZero['d.' + params.time.month + '.' + escapedMetricVal + '.' + metric] = props.value;
}
if (props.lastTimestamp < (params.time.timestamp - secInYear)) {
updateUsersZero['d.' + escapedMetricVal + '.' + metric] = props.value;
}
}
else {
common.fillTimeObjectZero(params, updateUsersZero, escapedMetricVal + '.' + metric, props.value);
common.fillTimeObjectMonth(params, updateUsersMonth, escapedMetricVal + '.' + metric, props.value, recordHourly);
}
}
else {
if (recordHourly) {
common.fillTimeObjectZero(params, updateUsersZero, escapedMetricVal + '.' + metric, props.value);
common.fillTimeObjectMonth(params, updateUsersMonth, escapedMetricVal + '.' + metric, props.value, recordHourly);
}
else {
zeroObjUpdate.push(escapedMetricVal + "." + metric);
monthObjUpdate.push(escapedMetricVal + "." + metric);
}
}
}
/**
* Get object of date ids that should be used in fetching standard metric model documents
* @param {params} params - {@link params} object
* @returns {object} with date ids, as {zero:"2017:0", month:"2017:2"}
*/
common.getDateIds = function(params) {
if (!params || !params.time) {
return {
zero: "0000:0",
month: "0000:1"
};
}
return {
zero: params.time.yearly + ":0",
month: params.time.yearly + ":" + params.time.month
};
};
/**
* Get diference between 2 momentjs instances in specific measurement
* @param {moment} moment1 - momentjs with start date
* @param {moment} moment2 - momentjs with end date
* @param {string} measure - units of difference, can be minutes, hours, days, weeks
* @returns {number} difference in provided units
*/
common.getDiff = function(moment1, moment2, measure) {
var divider = 1;
switch (measure) {
case "minutes":
divider = 60;
break;
case "hours":
divider = 60 * 60;
break;
case "days":
divider = 60 * 60 * 24;
break;
case "weeks":
divider = 60 * 60 * 24 * 7;
break;
}
return Math.floor((moment1.unix() - moment2.unix()) / divider);
};
/**
* Compares two version strings with : as delimiter (which we used to escape dots in app versions)
* @param {string} v1 - first version
* @param {string} v2 - second version
* @param {object} options - providing additional options
* @param {string} options.delimiter - delimiter between version, subversion, etc, defaults :
* @param {string} options.zeroExtend - changes the result if one version string has less parts than the other. In this case the shorter string will be padded with "zero" parts instead of being considered smaller.
* @param {string} options.lexicographical - compares each part of the version strings lexicographically instead of naturally; this allows suffixes such as "b" or "dev" but will cause "1.10" to be considered smaller than "1.2".
* @returns {number} 0 if they are both the same, 1 if first one is higher and -1 is second one is higher
*/
common.versionCompare = function(v1, v2, options) {
var lexicographical = options && options.lexicographical,
zeroExtend = options && options.zeroExtend,
delimiter = options && options.delimiter || ":",
v1parts = v1.split(delimiter),
v2parts = v2.split(delimiter);
/**
* Check if provided version is correct
* @param {string} x - version to test
* @returns {boolean} if version is correct
**/
function isValidPart(x) {
return (lexicographical ? /^\d+[A-Za-z]*$/ : /^\d+$/).test(x);
}
if (!v1parts.every(isValidPart) || !v2parts.every(isValidPart)) {
return NaN;
}
if (zeroExtend) {
while (v1parts.length < v2parts.length) {
v1parts.push("0");
}
while (v2parts.length < v1parts.length) {
v2parts.push("0");
}
}
if (!lexicographical) {
v1parts = v1parts.map(Number);
v2parts = v2parts.map(Number);
}
for (var i = 0; i < v1parts.length; ++i) {
if (v2parts.length === i) {
return 1;
}
if (v1parts[i] === v2parts[i]) {
continue;
}
else if (v1parts[i] > v2parts[i]) {
return 1;
}
else {
return -1;
}
}
if (v1parts.length !== v2parts.length) {
return -1;
}
return 0;
};
/**
* Adjust timestamp with app's timezone for timestamp queries that should equal bucket results
* @param {number} ts - miliseconds timestamp
* @param {string} tz - timezone
* @returns {number} adjusted timestamp for timezone
*/
common.adjustTimestampByTimezone = function(ts, tz) {
var d = new Date();
d.setTimezone(tz);
return ts - (d.getTimezoneOffset() * 60);
};
/**
* Getter/setter for dot notatons:
* @param {object} obj - object to use
* @param {string} is - path of properties to get
* @param {varies} value - value to set
* @returns {varies} value at provided path
* @example
* common.dot({a: {b: {c: 'string'}}}, 'a.b.c') === 'string'
* common.dot({a: {b: {c: 'string'}}}, ['a', 'b', 'c']) === 'string'
* common.dot({a: {b: {c: 'string'}}}, 'a.b.c', 5) === 5
* common.dot({a: {b: {c: 'string'}}}, 'a.b.c') === 5
*/
common.dot = function(obj, is, value) {
if (typeof is === 'string') {
return common.dot(obj, is.split('.'), value);
}
else if (is.length === 1 && value !== undefined) {
obj[is[0]] = value;
return value;
}
else if (is.length === 0) {
return obj;
}
else if (!obj) {
return obj;
}
else {
return common.dot(obj[is[0]], is.slice(1), value);
}
};
/**
* Not deep object and primitive type comparison function
*
* @param {Any} a object to compare
* @param {Any} b object to compare
* @param {Boolean} checkFromA true if check should be performed agains keys of a, resulting in true even if b has more keys
* @return {Boolean} true if objects are equal, false if different types or not equal
*/
common.equal = function(a, b, checkFromA) {
if (a === b) {
return true;
}
else if (typeof a !== typeof b) {
return false;
}
else if ((a === null && b !== null) || (a !== null && b === null)) {
return false;
}
else if ((a === undefined && b !== undefined) || (a !== undefined && b === undefined)) {
return false;
}
else if (typeof a === 'object') {
if (!checkFromA && Object.keys(a).length !== Object.keys(b).length) {
return false;
}
for (let k in a) {
if (a[k] !== b[k]) {
return false;
}
}
return true;
}
else {
return false;
}
};
/**
* Returns plain object with key set to value
* @param {varies} arguments - every odd value will be used as key and every event value as value for odd key
* @returns {object} new object with set key/value properties
*/
common.o = function() {
var o = {};
for (var i = 0; i < arguments.length; i += 2) {
o[arguments[i]] = arguments[i + 1];
}
return o;
};
/**
* Return index of array with objects where property = value
* @param {array} array - array where to search value
* @param {string} property - property where to look for value
* @param {varies} value - value you are searching for
* @returns {number} index of the array
*/
common.indexOf = function(array, property, value) {
for (var i = 0; i < array.length; i += 1) {
if (array[i][property] === value) {
return i;
}
}
return -1;
};
/**
* Optionally load module if it exists
* @param {string} module - module name
* @param {object} options - additional opeitons
* @param {boolean} options.rethrow - throw exception if there is some other error
* @param {varies} value - value you are searching for
* @returns {number} index of the array
*/
common.optional = function(module, options) {
try {
if (module[0] in {'.': 1}) {
module = process.cwd() + module.substr(1);
}
return require(module);
}
catch (err) {
if (err.code !== 'MODULE_NOT_FOUND' && options && options.rethrow) {
throw err;
}
}
return null;
};
/**
* Create promise for function which result should be checked periodically
* @param {function} func - function to run when verifying result, should return true if success
* @param {number} count - how many times to run the func before giving up, if result is always negative
* @param {number} interval - how often to retest function on negative result in miliseconds
* @returns {Promise} promise for checking task
*/
common.checkPromise = function(func, count, interval) {
return new Promise((resolve, reject) => {
/**
* Check promise
**/
function check() {
if (func()) {
resolve();
}
else if (count <= 0) {
reject('Timed out');
}
else {
count--;
setTimeout(check, interval);
}
}
check();
});
};
/**
* Single method to update app_users document for specific user for SDK requests
* @param {params} params - params object
* @param {object} update - update query for mongodb, should contain operators on highest level, as $set or $unset
* @param {boolean} no_meta - if true, won't update some auto meta data, like first api call, last api call, etc.
* @param {function} callback - function to run when update is done or failes, passing error and result as arguments
*/
common.updateAppUser = function(params, update, no_meta, callback) {
//backwards compatability
if (typeof no_meta === "function") {
callback = no_meta;
no_meta = false;
}
if (Object.keys(update).length) {
for (var i in update) {
if (i.indexOf("$") !== 0) {
let err = "Unkown modifier " + i + " in " + update + " for " + params.href;
console.log(err);
if (callback) {
callback(err);
}
return;
}
}
var user = params.app_user || {};
if (!no_meta && !params.qstring.no_meta) {
if (typeof user.fac === "undefined") {
if (!update.$setOnInsert) {
update.$setOnInsert = {};
}
if (!update.$setOnInsert.fac) {
update.$setOnInsert.fac = params.time.mstimestamp;
}
}
if (typeof user.lac === "undefined" || user.lac < params.time.mstimestamp) {
if (!update.$set) {
update.$set = {};
}
if (!update.$set.lac) {
update.$set.lac = params.time.mstimestamp;
}
}
}
if (params.qstring.device_id && typeof user.did === "undefined") {
if (!update.$set) {
update.$set = {};
}
if (!update.$set.did) {
update.$set.did = params.qstring.device_id;
}
}
if (plugins.getConfig("api", params.app && params.app.plugins, true).prevent_duplicate_requests && user.last_req !== params.request_hash) {
if (!update.$set) {
update.$set = {};
}
update.$set.last_req = params.request_hash;
}
common.db.collection('app_users' + params.app_id).findAndModify({'_id': params.app_user_id}, {}, update, {
new: true,
upsert: true
}, function(err, res) {
if (!err && res && res.value) {
params.app_user = res.value;
}
if (callback) {
callback(err, res);
}
});
}
else if (callback) {
callback();
}
};
/**
* Update carrier from metrics to convert mnc/mcc code to carrier name
* @param {object} metrics - metrics object from SDK request
*/
common.processCarrier = function(metrics) {
if (metrics && metrics._carrier) {
var carrier = metrics._carrier + "";
//random hash without spaces
if (carrier.length === 16 && carrier.indexOf(" ") === -1) {
delete metrics._carrier;
return;
}
//random code
if ((carrier.length === 5 || carrier.length === 6) && /^[0-9]+$/.test(carrier)) {
//check if mcc and mnc match some operator
var arr = mcc_mnc_list.filter({ mccmnc: carrier });
if (arr && arr.length && (arr[0].brand || arr[0].operator)) {
carrier = arr[0].brand || arr[0].operator;
}
else {
delete metrics._carrier;
return;
}
}
carrier = carrier.replace(/\w\S*/g, function(txt) {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
});
metrics._carrier = carrier;
}
};
/**
* Parse Sequence
* @param {number} num - sequence number for id
* @returns {string} converted to base 62 number
*/
common.parseSequence = (num) => {
const valSeq = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"];
const digits = [];
const base = valSeq.length;
let result = "";
while (num > base - 1) {
digits.push(num % base);
num = Math.floor(num / base);
}
digits.push(num);
for (let i = digits.length - 1; i >= 0; --i) {
result = result + valSeq[digits[i]];
}
return result;
};
/**
* Promise that tries to catch errors
* @param {function} f function which is usually passed to Promise constructor
* @return {Promise} Promise with constructor catching errors by rejecting the promise
*/
common.p = f => {
return new Promise((res, rej) => {
try {
f(res, rej);
}
catch (e) {
rej(e);
}
});
};
/**
* Revive json encoded data, as for example, regular expressions
* @param {string} key - key of json object
* @param {vary} value - value of json object
* @returns {vary} modified value, if it had revivable data
*/
common.reviver = (key, value) => {
if (value.toString().indexOf("__REGEXP ") === 0) {
const m = value.split("__REGEXP ")[1].match(/\/(.*)\/(.*)?/);
return new RegExp(m[1], m[2] || "");
}
else {
return value;
}
};
/**
* Check db host match for both of API and Frontend config
* @param {object} apiConfig - mongodb object from API config
* @param {object} frontendConfig - mongodb object from Frontend config
* @returns {boolean} isMatched - is config correct?
*/
common.checkDatabaseConfigMatch = (apiConfig, frontendConfig) => {
if (typeof apiConfig === typeof frontendConfig) {
if (typeof apiConfig === "string") {
// mongodb://mongodb0.example.com:27017/admin
if (!apiConfig.includes("@") && !frontendConfig.includes("@")) {
// mongodb0.example.com:27017
let apiMongoHost = apiConfig.split("/")[2];
let frontendMongoHost = frontendConfig.split("/")[2];
let apiMongoDb = apiConfig.split("/")[3];
let frontendMongoDb = frontendConfig.split("/")[3];
if (apiMongoHost === frontendMongoHost && apiMongoDb === frontendMongoDb) {
return true;
}
else {
return false;
}
}
//mongodb://myDBReader:D1fficultP%[email protected]:27017/admin
else if (apiConfig.includes("@") && frontendConfig.includes("@")) {
let apiMongoHost = apiConfig.split("@")[1].split("/")[0];
let apiMongoDb = apiConfig.split("@")[1].split("/")[1];
let frontendMongoHost = frontendConfig.split("@")[1].split("/")[0];
let frontendMongoDb = frontendConfig.split("@")[1].split("/")[1];
if (apiMongoHost === frontendMongoHost && apiMongoDb === frontendMongoDb) {
return true;
}
else {
return false;
}
}
else {
return false;
}
}
else if (typeof apiConfig === "object") {
/**
* {
* mongodb: {
* host: 'localhost',
*
* }
* }
*/
if (apiConfig.hasOwnProperty('host') && frontendConfig.hasOwnProperty('host')) {
if (apiConfig.host === frontendConfig.host && apiConfig.db === frontendConfig.db) {
return true;
}
else {
return false;
}
}
/**
* {
* mongodb: {
* replSetServers: [
* '192.168.3.1:27017',
* '192.168.3.2:27017
* ]
* }
* }
*/
else if (apiConfig.hasOwnProperty('replSetServers') && frontendConfig.hasOwnProperty('replSetServers')) {
if (apiConfig.replSetServers.length === frontendConfig.replSetServers.length && apiConfig.db === frontendConfig.db) {
let isCorrect = true;
for (let i = 0; i < apiConfig.replSetServers.length; i++) {
if (apiConfig.replSetServers[i] !== frontendConfig.replSetServers[i]) {
isCorrect = false;
}
}
return isCorrect;
}
else {
return false;
}
}
else {
return false;
}
}
else {
return false;
}
}
else {
return false;
}
};
module.exports = common; | 1 | 13,268 | This would still crash on malformed database connection string like "test" | Countly-countly-server | js |
@@ -0,0 +1,14 @@
+package interfaces
+
+import "context"
+
+type pluginNameKey struct{}
+
+func PluginNameFromHostServiceContext(ctx context.Context) (string, bool) {
+ name, ok := ctx.Value(pluginNameKey{}).(string)
+ return name, ok
+}
+
+func WithPluginName(ctx context.Context, name string) context.Context {
+ return context.WithValue(ctx, pluginNameKey{}, name)
+} | 1 | 1 | 10,846 | it is not an interface, maybe we can move to another package? | spiffe-spire | go |
|
@@ -4,14 +4,15 @@ import { stopPropagation, stopImmediatePropagation, isImmediatePropagationStoppe
import { getEditorInstance } from './editors';
import EventManager from './eventManager';
import { EditorState } from './editors/_baseEditor';
+import { getParentWindow } from './helpers/dom/element';
class EditorManager {
/**
* @param {Handsontable} instance
- * @param {GridSettings} priv
+ * @param {GridSettings} tableMeta
* @param {Selection} selection
*/
- constructor(instance, priv, selection) {
+ constructor(instance, tableMeta, selection) {
/**
* Instance of {@link Handsontable}
* | 1 | import { CellCoords } from './3rdparty/walkontable/src';
import { KEY_CODES, isMetaKey, isCtrlMetaKey } from './helpers/unicode';
import { stopPropagation, stopImmediatePropagation, isImmediatePropagationStopped } from './helpers/dom/event';
import { getEditorInstance } from './editors';
import EventManager from './eventManager';
import { EditorState } from './editors/_baseEditor';
class EditorManager {
/**
* @param {Handsontable} instance
* @param {GridSettings} priv
* @param {Selection} selection
*/
constructor(instance, priv, selection) {
/**
* Instance of {@link Handsontable}
*
* @private
* @type {Handsontable}
*/
this.instance = instance;
/**
* Reference to an instance's private GridSettings object.
*
* @private
* @type {GridSettings}
*/
this.priv = priv;
/**
* Instance of {@link Selection}
*
* @private
* @type {Selection}
*/
this.selection = selection;
/**
* Instance of {@link EventManager}.
*
* @private
* @type {EventManager}
*/
this.eventManager = new EventManager(instance);
/**
* Determines if EditorManager is destroyed.
*
* @private
* @type {Boolean}
*/
this.destroyed = false;
/**
* Determines if EditorManager is locked.
*
* @private
* @type {Boolean}
*/
this.lock = false;
/**
* A reference to an instance of the activeEditor.
*
* @private
* @type {*}
*/
this.activeEditor = void 0;
this.instance.addHook('afterDocumentKeyDown', event => this.onAfterDocumentKeyDown(event));
this.eventManager.addEventListener(this.instance.rootDocument.documentElement, 'keydown', (event) => {
if (!this.destroyed) {
this.instance.runHooks('afterDocumentKeyDown', event);
}
});
// Open editor when text composition is started (IME editor)
this.eventManager.addEventListener(this.instance.rootDocument.documentElement, 'compositionstart', (event) => {
if (!this.destroyed && this.activeEditor && !this.activeEditor.isOpened() && this.instance.isListening()) {
this.openEditor('', event);
}
});
this.instance.view.wt.update('onCellDblClick', (event, coords, elem) => this.onCellDblClick(event, coords, elem));
}
/**
* Lock the editor from being prepared and closed. Locking the editor prevents its closing and
* reinitialized after selecting the new cell. This feature is necessary for a mobile editor.
*/
lockEditor() {
this.lock = true;
}
/**
* Unlock the editor from being prepared and closed. This method restores the original behavior of
* the editors where for every new selection its instances are closed.
*/
unlockEditor() {
this.lock = false;
}
/**
* Destroy current editor, if exists.
*
* @param {Boolean} revertOriginal
*/
destroyEditor(revertOriginal) {
if (!this.lock) {
this.closeEditor(revertOriginal);
}
}
/**
* Get active editor.
*
* @returns {*}
*/
getActiveEditor() {
return this.activeEditor;
}
/**
* Prepare text input to be displayed at given grid cell.
*/
prepareEditor() {
if (this.lock) {
return;
}
if (this.activeEditor && this.activeEditor.isWaiting()) {
this.closeEditor(false, false, (dataSaved) => {
if (dataSaved) {
this.prepareEditor();
}
});
return;
}
const { row, col } = this.instance.selection.selectedRange.current().highlight;
const prop = this.instance.colToProp(col);
const originalValue = this.instance.getSourceDataAtCell(this.instance.runHooks('modifyRow', row), col);
const cellProperties = this.instance.getCellMeta(row, col);
const editorClass = this.instance.getCellEditor(cellProperties);
if (editorClass) {
this.activeEditor = getEditorInstance(editorClass, this.instance);
const td = this.activeEditor.getEditedCell();
this.activeEditor.prepare(row, col, prop, td, originalValue, cellProperties);
} else {
this.activeEditor = void 0;
}
}
/**
* Check is editor is opened/showed.
*
* @returns {Boolean}
*/
isEditorOpened() {
return this.activeEditor && this.activeEditor.isOpened();
}
/**
* Open editor with initial value.
*
* @param {null|String} newInitialValue new value from which editor will start if handled property it's not the `null`.
* @param {Event} event
*/
openEditor(newInitialValue, event) {
if (!this.activeEditor) {
return;
}
const readOnly = this.activeEditor.cellProperties.readOnly;
if (readOnly) {
// move the selection after opening the editor with ENTER key
if (event && event.keyCode === KEY_CODES.ENTER) {
this.moveSelectionAfterEnter();
}
} else {
this.activeEditor.beginEditing(newInitialValue, event);
}
}
/**
* Close editor, finish editing cell.
*
* @param {Boolean} restoreOriginalValue
* @param {Boolean} [isCtrlPressed]
* @param {Function} [callback]
*/
closeEditor(restoreOriginalValue, isCtrlPressed, callback) {
if (this.activeEditor) {
this.activeEditor.finishEditing(restoreOriginalValue, isCtrlPressed, callback);
} else if (callback) {
callback(false);
}
}
/**
* Close editor and save changes.
*
* @param {Boolean} isCtrlPressed
*/
closeEditorAndSaveChanges(isCtrlPressed) {
this.closeEditor(false, isCtrlPressed);
}
/**
* Close editor and restore original value.
*
* @param {Boolean} isCtrlPressed
*/
closeEditorAndRestoreOriginalValue(isCtrlPressed) {
return this.closeEditor(true, isCtrlPressed);
}
/**
* Controls selection's behaviour after clicking `Enter`.
*
* @private
* @param {Boolean} isShiftPressed
*/
moveSelectionAfterEnter(isShiftPressed) {
const enterMoves = typeof this.priv.settings.enterMoves === 'function' ? this.priv.settings.enterMoves(event) : this.priv.settings.enterMoves;
if (isShiftPressed) {
// move selection up
this.selection.transformStart(-enterMoves.row, -enterMoves.col);
} else {
// move selection down (add a new row if needed)
this.selection.transformStart(enterMoves.row, enterMoves.col, true);
}
}
/**
* Controls selection behaviour after clicking `arrow up`.
*
* @private
* @param {Boolean} isShiftPressed
*/
moveSelectionUp(isShiftPressed) {
if (isShiftPressed) {
this.selection.transformEnd(-1, 0);
} else {
this.selection.transformStart(-1, 0);
}
}
/**
* Controls selection's behaviour after clicking `arrow down`.
*
* @private
* @param {Boolean} isShiftPressed
*/
moveSelectionDown(isShiftPressed) {
if (isShiftPressed) {
// expanding selection down with shift
this.selection.transformEnd(1, 0);
} else {
this.selection.transformStart(1, 0);
}
}
/**
* Controls selection's behaviour after clicking `arrow right`.
*
* @private
* @param {Boolean} isShiftPressed
*/
moveSelectionRight(isShiftPressed) {
if (isShiftPressed) {
this.selection.transformEnd(0, 1);
} else {
this.selection.transformStart(0, 1);
}
}
/**
* Controls selection's behaviour after clicking `arrow left`.
*
* @private
* @param {Boolean} isShiftPressed
*/
moveSelectionLeft(isShiftPressed) {
if (isShiftPressed) {
this.selection.transformEnd(0, -1);
} else {
this.selection.transformStart(0, -1);
}
}
/**
* onAfterDocumentKeyDown callback.
*
* @private
* @param {KeyboardEvent} event
*/
onAfterDocumentKeyDown(event) {
if (!this.instance.isListening()) {
return;
}
this.instance.runHooks('beforeKeyDown', event);
// keyCode 229 aka 'uninitialized' doesn't take into account with editors. This key code is produced when unfinished
// character is entering (using IME editor). It is fired mainly on linux (ubuntu) with installed ibus-pinyin package.
if (this.destroyed || event.keyCode === 229) {
return;
}
if (isImmediatePropagationStopped(event)) {
return;
}
this.priv.lastKeyCode = event.keyCode;
if (!this.selection.isSelected()) {
return;
}
// catch CTRL but not right ALT (which in some systems triggers ALT+CTRL)
const isCtrlPressed = (event.ctrlKey || event.metaKey) && !event.altKey;
if (this.activeEditor && !this.activeEditor.isWaiting()) {
if (!isMetaKey(event.keyCode) && !isCtrlMetaKey(event.keyCode) && !isCtrlPressed && !this.isEditorOpened()) {
this.openEditor('', event);
return;
}
}
const isShiftPressed = event.shiftKey;
const rangeModifier = isShiftPressed ? this.selection.setRangeEnd : this.selection.setRangeStart;
let tabMoves;
switch (event.keyCode) {
case KEY_CODES.A:
if (!this.isEditorOpened() && isCtrlPressed) {
this.instance.selectAll();
event.preventDefault();
stopPropagation(event);
}
break;
case KEY_CODES.ARROW_UP:
if (this.isEditorOpened() && !this.activeEditor.isWaiting()) {
this.closeEditorAndSaveChanges(isCtrlPressed);
}
this.moveSelectionUp(isShiftPressed);
event.preventDefault();
stopPropagation(event);
break;
case KEY_CODES.ARROW_DOWN:
if (this.isEditorOpened() && !this.activeEditor.isWaiting()) {
this.closeEditorAndSaveChanges(isCtrlPressed);
}
this.moveSelectionDown(isShiftPressed);
event.preventDefault();
stopPropagation(event);
break;
case KEY_CODES.ARROW_RIGHT:
if (this.isEditorOpened() && !this.activeEditor.isWaiting()) {
this.closeEditorAndSaveChanges(isCtrlPressed);
}
this.moveSelectionRight(isShiftPressed);
event.preventDefault();
stopPropagation(event);
break;
case KEY_CODES.ARROW_LEFT:
if (this.isEditorOpened() && !this.activeEditor.isWaiting()) {
this.closeEditorAndSaveChanges(isCtrlPressed);
}
this.moveSelectionLeft(isShiftPressed);
event.preventDefault();
stopPropagation(event);
break;
case KEY_CODES.TAB:
tabMoves = typeof this.priv.settings.tabMoves === 'function' ? this.priv.settings.tabMoves(event) : this.priv.settings.tabMoves;
if (isShiftPressed) {
// move selection left
this.selection.transformStart(-tabMoves.row, -tabMoves.col);
} else {
// move selection right (add a new column if needed)
this.selection.transformStart(tabMoves.row, tabMoves.col, true);
}
event.preventDefault();
stopPropagation(event);
break;
case KEY_CODES.BACKSPACE:
case KEY_CODES.DELETE:
this.instance.emptySelectedCells();
this.prepareEditor();
event.preventDefault();
break;
case KEY_CODES.F2:
/* F2 */
if (this.activeEditor) {
this.activeEditor.enableFullEditMode();
}
this.openEditor(null, event);
event.preventDefault(); // prevent Opera from opening 'Go to Page dialog'
break;
case KEY_CODES.ENTER:
/* return/enter */
if (this.isEditorOpened()) {
if (this.activeEditor && this.activeEditor.state !== EditorState.WAITING) {
this.closeEditorAndSaveChanges(isCtrlPressed);
}
this.moveSelectionAfterEnter(isShiftPressed);
} else if (this.instance.getSettings().enterBeginsEditing) {
if (this.activeEditor) {
this.activeEditor.enableFullEditMode();
}
this.openEditor(null, event);
} else {
this.moveSelectionAfterEnter(isShiftPressed);
}
event.preventDefault(); // don't add newline to field
stopImmediatePropagation(event); // required by HandsontableEditor
break;
case KEY_CODES.ESCAPE:
if (this.isEditorOpened()) {
this.closeEditorAndRestoreOriginalValue(isCtrlPressed);
this.activeEditor.focus();
}
event.preventDefault();
break;
case KEY_CODES.HOME:
if (event.ctrlKey || event.metaKey) {
rangeModifier.call(this.selection, new CellCoords(0, this.selection.selectedRange.current().from.col));
} else {
rangeModifier.call(this.selection, new CellCoords(this.selection.selectedRange.current().from.row, 0));
}
event.preventDefault(); // don't scroll the window
stopPropagation(event);
break;
case KEY_CODES.END:
if (event.ctrlKey || event.metaKey) {
rangeModifier.call(this.selection, new CellCoords(this.instance.countRows() - 1, this.selection.selectedRange.current().from.col));
} else {
rangeModifier.call(this.selection, new CellCoords(this.selection.selectedRange.current().from.row, this.instance.countCols() - 1));
}
event.preventDefault(); // don't scroll the window
stopPropagation(event);
break;
case KEY_CODES.PAGE_UP:
this.selection.transformStart(-this.instance.countVisibleRows(), 0);
event.preventDefault(); // don't page up the window
stopPropagation(event);
break;
case KEY_CODES.PAGE_DOWN:
this.selection.transformStart(this.instance.countVisibleRows(), 0);
event.preventDefault(); // don't page down the window
stopPropagation(event);
break;
default:
break;
}
}
/**
* onCellDblClick callback.
*
* @private
* @param {MouseEvent} event
* @param {Object} coords
* @param {HTMLTableCellElement|HTMLTableHeaderCellElement} elem
*/
onCellDblClick(event, coords, elem) {
// may be TD or TH
if (elem.nodeName === 'TD') {
if (this.activeEditor) {
this.activeEditor.enableFullEditMode();
}
this.openEditor(null, event);
}
}
/**
* Destroy the instance.
*/
destroy() {
this.destroyed = true;
this.eventManager.destroy();
}
}
const instances = new WeakMap();
/**
* @param {Handsontable} hotInstance
* @param {GridSettings} hotSettings
* @param {Selection} selection
* @param {DataMap} datamap
*/
EditorManager.getInstance = function(hotInstance, hotSettings, selection, datamap) {
let editorManager = instances.get(hotInstance);
if (!editorManager) {
editorManager = new EditorManager(hotInstance, hotSettings, selection, datamap);
instances.set(hotInstance, editorManager);
}
return editorManager;
};
export default EditorManager;
| 1 | 15,839 | Same here. Can we use `this.instance.getSettings()` to get `tableMeta`? | handsontable-handsontable | js |
@@ -13,7 +13,15 @@ import (
"github.com/opencontainers/runc/libcontainer/configs"
)
+func isCpuSet(cgroup *configs.Cgroup) bool {
+ return cgroup.Resources.CpuWeight != 0 || cgroup.Resources.CpuMax != ""
+}
+
func setCpu(dirPath string, cgroup *configs.Cgroup) error {
+ if !isCpuSet(cgroup) {
+ return nil
+ }
+
// NOTE: .CpuShares is not used here. Conversion is the caller's responsibility.
if cgroup.Resources.CpuWeight != 0 {
if err := fscommon.WriteFile(dirPath, "cpu.weight", strconv.FormatUint(cgroup.Resources.CpuWeight, 10)); err != nil { | 1 | // +build linux
package fs2
import (
"bufio"
"os"
"path/filepath"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/configs"
)
func setCpu(dirPath string, cgroup *configs.Cgroup) error {
// NOTE: .CpuShares is not used here. Conversion is the caller's responsibility.
if cgroup.Resources.CpuWeight != 0 {
if err := fscommon.WriteFile(dirPath, "cpu.weight", strconv.FormatUint(cgroup.Resources.CpuWeight, 10)); err != nil {
return err
}
}
// NOTE: .CpuQuota and .CpuPeriod are not used here. Conversion is the caller's responsibility.
if cgroup.Resources.CpuMax != "" {
if err := fscommon.WriteFile(dirPath, "cpu.max", cgroup.Resources.CpuMax); err != nil {
return err
}
}
return nil
}
func statCpu(dirPath string, stats *cgroups.Stats) error {
f, err := os.Open(filepath.Join(dirPath, "cpu.stat"))
if err != nil {
return err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
t, v, err := fscommon.GetCgroupParamKeyValue(sc.Text())
if err != nil {
return err
}
switch t {
case "usage_usec":
stats.CpuStats.CpuUsage.TotalUsage = v * 1000
case "user_usec":
stats.CpuStats.CpuUsage.UsageInUsermode = v * 1000
case "system_usec":
stats.CpuStats.CpuUsage.UsageInKernelmode = v * 1000
}
}
return nil
}
| 1 | 19,108 | Not sure whether this validation is useful. | opencontainers-runc | go |
@@ -422,6 +422,17 @@ class PromptContainer(QWidget):
except UnsupportedOperationError:
pass
+ @cmdutils.register(instance='prompt-container', scope='window',
+ modes=[usertypes.KeyMode.prompt])
+ def prompt_yank(self):
+ """Yank URLs or other data in prompts."""
+ question = self._prompt.question
+ s = None
+ if question and hasattr(question, 'yank_text'):
+ s = question.yank_text
+ utils.set_clipboard(s)
+ message.info("Yanked download URL to clipboard: {}".format(s))
+
class LineEdit(QLineEdit):
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Showing prompts above the statusbar."""
import os.path
import html
import collections
import attr
import sip
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QTimer, QDir, QModelIndex,
QItemSelectionModel, QObject, QEventLoop)
from PyQt5.QtWidgets import (QWidget, QGridLayout, QVBoxLayout, QLineEdit,
QLabel, QFileSystemModel, QTreeView, QSizePolicy,
QSpacerItem)
from qutebrowser.browser import downloads
from qutebrowser.config import config
from qutebrowser.utils import usertypes, log, utils, qtutils, objreg, message
from qutebrowser.keyinput import modeman
from qutebrowser.commands import cmdutils, cmdexc
prompt_queue = None
@attr.s
class AuthInfo:
"""Authentication info returned by a prompt."""
user = attr.ib()
password = attr.ib()
class Error(Exception):
"""Base class for errors in this module."""
class UnsupportedOperationError(Exception):
"""Raised when the prompt class doesn't support the requested operation."""
class PromptQueue(QObject):
"""Global manager and queue for upcoming prompts.
The way in which multiple questions are handled deserves some explanation.
If a question is blocking, we *need* to ask it immediately, and can't wait
for previous questions to finish. We could theoretically ask a blocking
question inside of another blocking one, so in ask_question we simply save
the current question on the stack, let the user answer the *most recent*
question, and then restore the previous state.
With a non-blocking question, things are a bit easier. We simply add it to
self._queue if we're still busy handling another question, since it can be
answered at any time.
In either case, as soon as we finished handling a question, we call
_pop_later() which schedules a _pop to ask the next question in _queue. We
schedule it rather than doing it immediately because then the order of how
things happen is clear, e.g. on_mode_left can't happen after we already set
up the *new* question.
Attributes:
_shutting_down: Whether we're currently shutting down the prompter and
should ignore future questions to avoid segfaults.
_loops: A list of local EventLoops to spin in when blocking.
_queue: A deque of waiting questions.
_question: The current Question object if we're handling a question.
Signals:
show_prompts: Emitted with a Question object when prompts should be
shown.
"""
show_prompts = pyqtSignal(usertypes.Question)
def __init__(self, parent=None):
super().__init__(parent)
self._question = None
self._shutting_down = False
self._loops = []
self._queue = collections.deque()
message.global_bridge.mode_left.connect(self._on_mode_left)
def __repr__(self):
return utils.get_repr(self, loops=len(self._loops),
queue=len(self._queue), question=self._question)
def _pop_later(self):
"""Helper to call self._pop as soon as everything else is done."""
QTimer.singleShot(0, self._pop)
def _pop(self):
"""Pop a question from the queue and ask it, if there are any."""
log.prompt.debug("Popping from queue {}".format(self._queue))
if self._queue:
question = self._queue.popleft()
if not sip.isdeleted(question):
# the question could already be deleted, e.g. by a cancelled
# download. See
# https://github.com/qutebrowser/qutebrowser/issues/415
self.ask_question(question, blocking=False)
def shutdown(self):
"""Cancel all blocking questions.
Quits and removes all running event loops.
Return:
True if loops needed to be aborted,
False otherwise.
"""
log.prompt.debug("Shutting down with loops {}".format(self._loops))
self._shutting_down = True
if self._loops:
for loop in self._loops:
loop.quit()
loop.deleteLater()
return True
else:
return False
@pyqtSlot(usertypes.Question, bool)
def ask_question(self, question, blocking):
"""Display a prompt for a given question.
Args:
question: The Question object to ask.
blocking: If True, this function blocks and returns the result.
Return:
The answer of the user when blocking=True.
None if blocking=False.
"""
log.prompt.debug("Asking question {}, blocking {}, loops {}, queue "
"{}".format(question, blocking, self._loops,
self._queue))
if self._shutting_down:
# If we're currently shutting down we have to ignore this question
# to avoid segfaults - see
# https://github.com/qutebrowser/qutebrowser/issues/95
log.prompt.debug("Ignoring question because we're shutting down.")
question.abort()
return None
if self._question is not None and not blocking:
# We got an async question, but we're already busy with one, so we
# just queue it up for later.
log.prompt.debug("Adding {} to queue.".format(question))
self._queue.append(question)
return None
if blocking:
# If we're blocking we save the old question on the stack, so we
# can restore it after exec, if exec gets called multiple times.
log.prompt.debug("New question is blocking, saving {}".format(
self._question))
old_question = self._question
if old_question is not None:
old_question.interrupted = True
self._question = question
self.show_prompts.emit(question)
if blocking:
loop = qtutils.EventLoop()
self._loops.append(loop)
loop.destroyed.connect(lambda: self._loops.remove(loop))
question.completed.connect(loop.quit)
question.completed.connect(loop.deleteLater)
log.prompt.debug("Starting loop.exec_() for {}".format(question))
loop.exec_(QEventLoop.ExcludeSocketNotifiers)
log.prompt.debug("Ending loop.exec_() for {}".format(question))
log.prompt.debug("Restoring old question {}".format(old_question))
self._question = old_question
self.show_prompts.emit(old_question)
if old_question is None:
# Nothing left to restore, so we can go back to popping async
# questions.
if self._queue:
self._pop_later()
return question.answer
else:
question.completed.connect(self._pop_later)
return None
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
"""Abort question when a prompt mode was left."""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
if self._question is None:
return
log.prompt.debug("Left mode {}, hiding {}".format(
mode, self._question))
self.show_prompts.emit(None)
if self._question.answer is None and not self._question.is_aborted:
log.prompt.debug("Cancelling {} because {} was left".format(
self._question, mode))
self._question.cancel()
self._question = None
class PromptContainer(QWidget):
"""Container for prompts to be shown above the statusbar.
This is a per-window object, however each window shows the same prompt.
Attributes:
_layout: The layout used to show prompts in.
_win_id: The window ID this object is associated with.
Signals:
update_geometry: Emitted when the geometry should be updated.
"""
STYLESHEET = """
QWidget#PromptContainer {
{% if conf.statusbar.position == 'top' %}
border-bottom-left-radius: {{ conf.prompt.radius }}px;
border-bottom-right-radius: {{ conf.prompt.radius }}px;
{% else %}
border-top-left-radius: {{ conf.prompt.radius }}px;
border-top-right-radius: {{ conf.prompt.radius }}px;
{% endif %}
}
QWidget {
font: {{ conf.fonts.prompts }};
color: {{ conf.colors.prompts.fg }};
background-color: {{ conf.colors.prompts.bg }};
}
QLineEdit {
border: {{ conf.colors.prompts.border }};
}
QTreeView {
selection-background-color: {{ conf.colors.prompts.selected.bg }};
border: {{ conf.colors.prompts.border }};
}
QTreeView::branch {
background-color: {{ conf.colors.prompts.bg }};
}
QTreeView::item:selected, QTreeView::item:selected:hover,
QTreeView::branch:selected {
background-color: {{ conf.colors.prompts.selected.bg }};
}
"""
update_geometry = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._layout = QVBoxLayout(self)
self._layout.setContentsMargins(10, 10, 10, 10)
self._win_id = win_id
self._prompt = None
self.setObjectName('PromptContainer')
self.setAttribute(Qt.WA_StyledBackground, True)
config.set_register_stylesheet(self)
message.global_bridge.prompt_done.connect(self._on_prompt_done)
prompt_queue.show_prompts.connect(self._on_show_prompts)
message.global_bridge.mode_left.connect(self._on_global_mode_left)
def __repr__(self):
return utils.get_repr(self, win_id=self._win_id)
@pyqtSlot(usertypes.Question)
def _on_show_prompts(self, question):
"""Show a prompt for the given question.
Args:
question: A Question object or None.
"""
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting old prompt {}".format(widget))
widget.hide()
widget.deleteLater()
if question is None:
log.prompt.debug("No prompts left, hiding prompt container.")
self._prompt = None
self.hide()
return
classes = {
usertypes.PromptMode.yesno: YesNoPrompt,
usertypes.PromptMode.text: LineEditPrompt,
usertypes.PromptMode.user_pwd: AuthenticationPrompt,
usertypes.PromptMode.download: DownloadFilenamePrompt,
usertypes.PromptMode.alert: AlertPrompt,
}
klass = classes[question.mode]
prompt = klass(question)
log.prompt.debug("Displaying prompt {}".format(prompt))
self._prompt = prompt
if not question.interrupted:
# If this question was interrupted, we already connected the signal
question.aborted.connect(
lambda: modeman.leave(self._win_id, prompt.KEY_MODE, 'aborted',
maybe=True))
modeman.enter(self._win_id, prompt.KEY_MODE, 'question asked')
self.setSizePolicy(prompt.sizePolicy())
self._layout.addWidget(prompt)
prompt.show()
self.show()
prompt.setFocus()
self.update_geometry.emit()
@pyqtSlot(usertypes.KeyMode)
def _on_prompt_done(self, key_mode):
"""Leave the prompt mode in this window if a question was answered."""
modeman.leave(self._win_id, key_mode, ':prompt-accept', maybe=True)
@pyqtSlot(usertypes.KeyMode)
def _on_global_mode_left(self, mode):
"""Leave prompt/yesno mode in this window if it was left elsewhere.
This ensures no matter where a prompt was answered, we leave the prompt
mode and dispose of the prompt object in every window.
"""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
modeman.leave(self._win_id, mode, 'left in other window', maybe=True)
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting prompt {}".format(widget))
widget.hide()
widget.deleteLater()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno])
def prompt_accept(self, value=None):
"""Accept the current prompt.
//
This executes the next action depending on the question mode, e.g. asks
for the password or leaves the mode.
Args:
value: If given, uses this value instead of the entered one.
For boolean prompts, "yes"/"no" are accepted as value.
"""
question = self._prompt.question
try:
done = self._prompt.accept(value)
except Error as e:
raise cmdexc.CommandError(str(e))
if done:
message.global_bridge.prompt_done.emit(self._prompt.KEY_MODE)
question.done()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt], maxsplit=0)
def prompt_open_download(self, cmdline: str = None):
"""Immediately open a download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
"""
try:
self._prompt.download_open(cmdline)
except UnsupportedOperationError:
pass
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt])
@cmdutils.argument('which', choices=['next', 'prev'])
def prompt_item_focus(self, which):
"""Shift the focus of the prompt file completion menu to another item.
Args:
which: 'next', 'prev'
"""
try:
self._prompt.item_focus(which)
except UnsupportedOperationError:
pass
class LineEdit(QLineEdit):
"""A line edit used in prompts."""
def __init__(self, parent=None):
super().__init__(parent)
self.setStyleSheet("""
QLineEdit {
background-color: transparent;
}
""")
self.setAttribute(Qt.WA_MacShowFocusRect, False)
def keyPressEvent(self, e):
"""Override keyPressEvent to paste primary selection on Shift + Ins."""
if e.key() == Qt.Key_Insert and e.modifiers() == Qt.ShiftModifier:
try:
text = utils.get_clipboard(selection=True, fallback=True)
except utils.ClipboardError: # pragma: no cover
e.ignore()
else:
e.accept()
self.insert(text)
return
super().keyPressEvent(e)
def __repr__(self):
return utils.get_repr(self)
class _BasePrompt(QWidget):
"""Base class for all prompts."""
KEY_MODE = usertypes.KeyMode.prompt
def __init__(self, question, parent=None):
super().__init__(parent)
self.question = question
self._vbox = QVBoxLayout(self)
self._vbox.setSpacing(15)
self._key_grid = None
def __repr__(self):
return utils.get_repr(self, question=self.question, constructor=True)
def _init_texts(self, question):
assert question.title is not None, question
title = '<font size="4"><b>{}</b></font>'.format(
html.escape(question.title))
title_label = QLabel(title, self)
self._vbox.addWidget(title_label)
if question.text is not None:
# Not doing any HTML escaping here as the text can be formatted
text_label = QLabel(question.text)
text_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self._vbox.addWidget(text_label)
def _init_key_label(self):
assert self._key_grid is None, self._key_grid
self._key_grid = QGridLayout()
self._key_grid.setVerticalSpacing(0)
# The bindings are all in the 'prompt' mode, even for yesno prompts
all_bindings = config.key_instance.get_reverse_bindings_for('prompt')
labels = []
for cmd, text in self._allowed_commands():
bindings = all_bindings.get(cmd, [])
if bindings:
binding = None
preferred = ['<enter>', '<escape>']
for pref in preferred:
if pref in bindings:
binding = pref
if binding is None:
binding = bindings[0]
key_label = QLabel('<b>{}</b>'.format(html.escape(binding)))
text_label = QLabel(text)
labels.append((key_label, text_label))
for i, (key_label, text_label) in enumerate(labels):
self._key_grid.addWidget(key_label, i, 0)
self._key_grid.addWidget(text_label, i, 1)
spacer = QSpacerItem(0, 0, QSizePolicy.Expanding)
self._key_grid.addItem(spacer, 0, 2)
self._vbox.addLayout(self._key_grid)
def accept(self, value=None):
raise NotImplementedError
def download_open(self, _cmdline):
"""Open the download directly if this is a download prompt."""
raise UnsupportedOperationError
def item_focus(self, _which):
"""Switch to next file item if this is a filename prompt.."""
raise UnsupportedOperationError
def _allowed_commands(self):
"""Get the commands we could run as response to this message."""
raise NotImplementedError
class LineEditPrompt(_BasePrompt):
"""A prompt for a single text value."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._lineedit = LineEdit(self)
self._init_texts(question)
self._vbox.addWidget(self._lineedit)
if question.default:
self._lineedit.setText(question.default)
self.setFocusProxy(self._lineedit)
self._init_key_label()
def accept(self, value=None):
text = value if value is not None else self._lineedit.text()
self.question.answer = text
return True
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('leave-mode', 'Abort')]
class FilenamePrompt(_BasePrompt):
"""A prompt for a filename."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
self._lineedit = LineEdit(self)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.textEdited.connect(self._set_fileview_root)
self._vbox.addWidget(self._lineedit)
self.setFocusProxy(self._lineedit)
self._init_fileview()
self._set_fileview_root(question.default)
if config.val.prompt.filebrowser:
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
@pyqtSlot(str)
def _set_fileview_root(self, path, *, tabbed=False):
"""Set the root path for the file display."""
separators = os.sep
if os.altsep is not None:
separators += os.altsep
dirname = os.path.dirname(path)
try:
if not path:
pass
elif path in separators and os.path.isdir(path):
# Input "/" -> don't strip anything
pass
elif path[-1] in separators and os.path.isdir(path):
# Input like /foo/bar/ -> show /foo/bar/ contents
path = path.rstrip(separators)
elif os.path.isdir(dirname) and not tabbed:
# Input like /foo/ba -> show /foo contents
path = dirname
else:
return
except OSError:
log.prompt.exception("Failed to get directory information")
return
root = self._file_model.setRootPath(path)
self._file_view.setRootIndex(root)
@pyqtSlot(QModelIndex)
def _insert_path(self, index, *, clicked=True):
"""Handle an element selection.
Args:
index: The QModelIndex of the selected element.
clicked: Whether the element was clicked.
"""
path = os.path.normpath(self._file_model.filePath(index))
if clicked:
path += os.sep
else:
# On Windows, when we have C:\foo and tab over .., we get C:\
path = path.rstrip(os.sep)
log.prompt.debug('Inserting path {}'.format(path))
self._lineedit.setText(path)
self._lineedit.setFocus()
self._set_fileview_root(path, tabbed=True)
if clicked:
# Avoid having a ..-subtree highlighted
self._file_view.setCurrentIndex(QModelIndex())
def _init_fileview(self):
self._file_view = QTreeView(self)
self._file_model = QFileSystemModel(self)
self._file_view.setModel(self._file_model)
self._file_view.clicked.connect(self._insert_path)
if config.val.prompt.filebrowser:
self._vbox.addWidget(self._file_view)
else:
self._file_view.hide()
# Only show name
self._file_view.setHeaderHidden(True)
for col in range(1, 4):
self._file_view.setColumnHidden(col, True)
# Nothing selected initially
self._file_view.setCurrentIndex(QModelIndex())
# The model needs to be sorted so we get the correct first/last index
self._file_model.directoryLoaded.connect(
lambda: self._file_model.sort(0))
def accept(self, value=None):
text = value if value is not None else self._lineedit.text()
text = downloads.transform_path(text)
if text is None:
message.error("Invalid filename")
return False
self.question.answer = text
return True
def item_focus(self, which):
# This duplicates some completion code, but I don't see a nicer way...
assert which in ['prev', 'next'], which
selmodel = self._file_view.selectionModel()
parent = self._file_view.rootIndex()
first_index = self._file_model.index(0, 0, parent)
row = self._file_model.rowCount(parent) - 1
last_index = self._file_model.index(row, 0, parent)
if not first_index.isValid():
# No entries
return
assert last_index.isValid()
idx = selmodel.currentIndex()
if not idx.isValid():
# No item selected yet
idx = last_index if which == 'prev' else first_index
elif which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid():
idx = last_index if which == 'prev' else first_index
selmodel.setCurrentIndex(
idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)
self._insert_path(idx, clicked=False)
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('leave-mode', 'Abort')]
class DownloadFilenamePrompt(FilenamePrompt):
"""A prompt for a filename for downloads."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._file_model.setFilter(QDir.AllDirs | QDir.Drives | QDir.NoDot)
def accept(self, value=None):
done = super().accept(value)
answer = self.question.answer
if answer is not None:
self.question.answer = downloads.FileDownloadTarget(answer)
return done
def download_open(self, cmdline):
self.question.answer = downloads.OpenFileDownloadTarget(cmdline)
self.question.done()
message.global_bridge.prompt_done.emit(self.KEY_MODE)
def _allowed_commands(self):
cmds = [
('prompt-accept', 'Accept'),
('leave-mode', 'Abort'),
('prompt-open-download', "Open download"),
]
return cmds
class AuthenticationPrompt(_BasePrompt):
"""A prompt for username/password."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
user_label = QLabel("Username:", self)
self._user_lineedit = LineEdit(self)
password_label = QLabel("Password:", self)
self._password_lineedit = LineEdit(self)
self._password_lineedit.setEchoMode(QLineEdit.Password)
grid = QGridLayout()
grid.addWidget(user_label, 1, 0)
grid.addWidget(self._user_lineedit, 1, 1)
grid.addWidget(password_label, 2, 0)
grid.addWidget(self._password_lineedit, 2, 1)
self._vbox.addLayout(grid)
self._init_key_label()
assert not question.default, question.default
self.setFocusProxy(self._user_lineedit)
def accept(self, value=None):
if value is not None:
if ':' not in value:
raise Error("Value needs to be in the format "
"username:password, but {} was given".format(
value))
username, password = value.split(':', maxsplit=1)
self.question.answer = AuthInfo(username, password)
return True
elif self._user_lineedit.hasFocus():
# Earlier, tab was bound to :prompt-accept, so to still support
# that we simply switch the focus when tab was pressed.
self._password_lineedit.setFocus()
return False
else:
self.question.answer = AuthInfo(self._user_lineedit.text(),
self._password_lineedit.text())
return True
def item_focus(self, which):
"""Support switching between fields with tab."""
assert which in ['prev', 'next'], which
if which == 'next' and self._user_lineedit.hasFocus():
self._password_lineedit.setFocus()
elif which == 'prev' and self._password_lineedit.hasFocus():
self._user_lineedit.setFocus()
def _allowed_commands(self):
return [('prompt-accept', "Accept"),
('leave-mode', "Abort")]
class YesNoPrompt(_BasePrompt):
"""A prompt with yes/no answers."""
KEY_MODE = usertypes.KeyMode.yesno
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def accept(self, value=None):
if value is None:
if self.question.default is None:
raise Error("No default value was set for this question!")
self.question.answer = self.question.default
elif value == 'yes':
self.question.answer = True
elif value == 'no':
self.question.answer = False
else:
raise Error("Invalid value {} - expected yes/no!".format(value))
return True
def _allowed_commands(self):
cmds = [
('prompt-accept yes', "Yes"),
('prompt-accept no', "No"),
]
if self.question.default is not None:
assert self.question.default in [True, False]
default = 'yes' if self.question.default else 'no'
cmds.append(('prompt-accept', "Use default ({})".format(default)))
cmds.append(('leave-mode', "Abort"))
return cmds
class AlertPrompt(_BasePrompt):
"""A prompt without any answer possibility."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def accept(self, value=None):
if value is not None:
raise Error("No value is permitted with alert prompts!")
# Simply mark prompt as done without setting self.question.answer
return True
def _allowed_commands(self):
return [('prompt-accept', "Hide")]
def init():
"""Initialize global prompt objects."""
global prompt_queue
prompt_queue = PromptQueue()
objreg.register('prompt-queue', prompt_queue) # for commands
message.global_bridge.ask_question.connect(
prompt_queue.ask_question, Qt.DirectConnection)
| 1 | 20,581 | Various things to note here: - Why would `question` ever be falsey (or `None`) here, i.e. why the `if question`? - Why would `question` ever not have that attribute? You set it in `Question.__init__`, so that check does nothing at all (and generally, `hasattr()` should be avoided if possible). - Why do you want to yank `None` (what does that do?) rather than e.g. show an error if there's nothing to yank? | qutebrowser-qutebrowser | py |
@@ -121,8 +121,10 @@ def buffer_s3response(s3response):
found_records = True
elif 'Progress' in event:
logger_.info("select progress: %s", event['Progress'].get('Details'))
+ print(event['Progress'].get('Details'))
elif 'Stats' in event:
logger_.info("select stats: %s", event['Stats'])
+ print(event['Stats'])
elif 'End' in event:
# End event indicates that the request finished successfully
end_event_received = True | 1 | """
Helper functions.
"""
import gzip
import io
import json
import logging
import os
from base64 import b64decode
from typing import Iterable
LOGGER_NAME = "quilt-lambda"
MANIFEST_PREFIX_V1 = ".quilt/packages/"
POINTER_PREFIX_V1 = ".quilt/named_packages/"
def separated_env_to_iter(
env_var: str,
*,
deduplicate=True,
lower=True,
predicate=None,
separator=","
) -> Iterable[str]:
"""turn a comma-separated string in the environment into a python list"""
candidate = os.getenv(env_var, "")
result = []
if candidate:
for c in candidate.split(separator):
token = c.strip().lower() if lower else c.strip()
if predicate:
if predicate(token):
result.append(token)
else:
result.append(token)
return set(result) if deduplicate else result
def get_default_origins():
"""
Returns a list of origins that should normally be passed into the @api decorator.
"""
return [
'http://localhost:3000',
os.environ.get('WEB_ORIGIN')
]
def get_quilt_logger():
"""inject a logger via kwargs, with level set by the environment"""
logger_ = logging.getLogger(LOGGER_NAME)
# See https://docs.python.org/3/library/logging.html#logging-levels
level = os.environ.get("QUILT_LOG_LEVEL", "WARNING")
logger_.setLevel(level)
return logger_
def get_available_memory():
"""how much virtual memory is available to us (bytes)?"""
from psutil import virtual_memory
return virtual_memory().available
def make_json_response(status_code, json_object, extra_headers=None):
"""
Helper function to serialize a JSON object and add the JSON content type header.
"""
headers = {
"Content-Type": 'application/json'
}
if extra_headers is not None:
headers.update(extra_headers)
return status_code, json.dumps(json_object), headers
def read_body(resp):
"""
Helper function to decode response body depending on how the body was encoded
prior to transfer to and from lambda.
"""
body = resp['body']
if resp['isBase64Encoded']:
body = b64decode(body)
if resp['headers'].get('Content-Encoding') == 'gzip':
body = gzip.decompress(body)
return body
class IncompleteResultException(Exception):
"""
Exception indicating an incomplete response
(e.g., from S3 Select)
"""
def sql_escape(s):
"""
Escape strings that might contain single quotes for use in Athena
or S3 Select
"""
escaped = s or ""
return escaped.replace("'", "''")
def buffer_s3response(s3response):
"""
Read a streaming response (botocore.eventstream.EventStream) from s3 select
into a StringIO buffer
"""
logger_ = logging.getLogger(LOGGER_NAME)
response = io.StringIO()
end_event_received = False
stats = None
found_records = False
for event in s3response['Payload']:
if 'Records' in event:
records = event['Records']['Payload'].decode()
response.write(records)
found_records = True
elif 'Progress' in event:
logger_.info("select progress: %s", event['Progress'].get('Details'))
elif 'Stats' in event:
logger_.info("select stats: %s", event['Stats'])
elif 'End' in event:
# End event indicates that the request finished successfully
end_event_received = True
if not end_event_received:
raise IncompleteResultException("Error: Received an incomplete response from S3 Select.")
response.seek(0)
return response if found_records else None
def query_manifest_content(
s3_client: str,
*,
bucket: str,
key: str,
sql_stmt: str
) -> io.StringIO:
"""
Call S3 Select to read only the logical keys from a
package manifest that match the desired folder path
prefix
"""
logger_ = get_quilt_logger()
logger_.debug("utils.py: manifest_select: %s", sql_stmt)
response = s3_client.select_object_content(
Bucket=bucket,
Key=key,
ExpressionType='SQL',
Expression=sql_stmt,
InputSerialization={
'JSON': {'Type': 'LINES'},
'CompressionType': 'NONE'
},
OutputSerialization={'JSON': {'RecordDelimiter': '\n'}}
)
return buffer_s3response(response)
| 1 | 19,957 | If this is needed for testing, you should use `pytest --log-cli-level=INFO` instead. | quiltdata-quilt | py |
@@ -115,7 +115,7 @@ func DefaultNodeOptions() *MobileNodeOptions {
EtherClientRPC: metadata.Testnet2Definition.EtherClientRPC,
FeedbackURL: "https://feedback.mysterium.network",
QualityOracleURL: "https://testnet2-quality.mysterium.network/api/v1",
- IPDetectorURL: "https://api.ipify.org/?format=json",
+ IPDetectorURL: "https://testnet2-location.mysterium.network/api/v1/location",
LocationDetectorURL: "https://testnet2-location.mysterium.network/api/v1/location",
TransactorEndpointAddress: metadata.Testnet2Definition.TransactorAddress,
TransactorRegistryAddress: metadata.Testnet2Definition.RegistryAddress, | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mysterium
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"path/filepath"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/mysteriumnetwork/node/cmd"
"github.com/mysteriumnetwork/node/config"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/connection/connectionstate"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/location"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/core/port"
"github.com/mysteriumnetwork/node/core/quality"
"github.com/mysteriumnetwork/node/core/state"
"github.com/mysteriumnetwork/node/eventbus"
"github.com/mysteriumnetwork/node/feedback"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/identity/registry"
"github.com/mysteriumnetwork/node/identity/selector"
"github.com/mysteriumnetwork/node/logconfig"
"github.com/mysteriumnetwork/node/market"
"github.com/mysteriumnetwork/node/metadata"
"github.com/mysteriumnetwork/node/pilvytis"
"github.com/mysteriumnetwork/node/services/wireguard"
wireguard_connection "github.com/mysteriumnetwork/node/services/wireguard/connection"
"github.com/mysteriumnetwork/node/session/pingpong"
"github.com/mysteriumnetwork/node/session/pingpong/event"
"github.com/mysteriumnetwork/payments/crypto"
)
// MobileNode represents node object tuned for mobile device.
type MobileNode struct {
shutdown func() error
node *cmd.Node
stateKeeper *state.Keeper
connectionManager connection.Manager
locationResolver *location.Cache
identitySelector selector.Handler
signerFactory identity.SignerFactory
ipResolver ip.Resolver
eventBus eventbus.EventBus
connectionRegistry *connection.Registry
proposalsManager *proposalsManager
hermes common.Address
feedbackReporter *feedback.Reporter
transactor *registry.Transactor
identityRegistry registry.IdentityRegistry
identityChannelCalculator *pingpong.ChannelAddressCalculator
consumerBalanceTracker *pingpong.ConsumerBalanceTracker
pilvytis *pilvytis.Service
registryAddress string
channelImplementationAddress string
chainID int64
startTime time.Time
}
// MobileNodeOptions contains common mobile node options.
type MobileNodeOptions struct {
Testnet2 bool
Localnet bool
ExperimentNATPunching bool
MysteriumAPIAddress string
BrokerAddresses []string
EtherClientRPC string
FeedbackURL string
QualityOracleURL string
IPDetectorURL string
LocationDetectorURL string
TransactorEndpointAddress string
TransactorRegistryAddress string
TransactorChannelImplementation string
HermesEndpointAddress string
HermesID string
MystSCAddress string
ChainID int64
PilvytisAddress string
}
// DefaultNodeOptions returns default options.
func DefaultNodeOptions() *MobileNodeOptions {
return &MobileNodeOptions{
Testnet2: true,
ExperimentNATPunching: true,
MysteriumAPIAddress: metadata.Testnet2Definition.MysteriumAPIAddress,
BrokerAddresses: metadata.Testnet2Definition.BrokerAddresses,
EtherClientRPC: metadata.Testnet2Definition.EtherClientRPC,
FeedbackURL: "https://feedback.mysterium.network",
QualityOracleURL: "https://testnet2-quality.mysterium.network/api/v1",
IPDetectorURL: "https://api.ipify.org/?format=json",
LocationDetectorURL: "https://testnet2-location.mysterium.network/api/v1/location",
TransactorEndpointAddress: metadata.Testnet2Definition.TransactorAddress,
TransactorRegistryAddress: metadata.Testnet2Definition.RegistryAddress,
TransactorChannelImplementation: metadata.Testnet2Definition.ChannelImplAddress,
HermesID: metadata.Testnet2Definition.HermesID,
MystSCAddress: "0xf74a5ca65E4552CfF0f13b116113cCb493c580C5",
ChainID: metadata.Testnet2Definition.DefaultChainID,
PilvytisAddress: metadata.Testnet2Definition.PilvytisAddress,
}
}
// NewNode function creates new Node.
func NewNode(appPath string, options *MobileNodeOptions) (*MobileNode, error) {
var di cmd.Dependencies
if appPath == "" {
return nil, errors.New("node app path is required")
}
dataDir := filepath.Join(appPath, ".mysterium")
currentDir := appPath
config.Current.SetDefault(config.FlagChainID.Name, options.ChainID)
config.Current.SetDefault(config.FlagDefaultCurrency.Name, metadata.DefaultNetwork.DefaultCurrency)
network := node.OptionsNetwork{
Testnet2: options.Testnet2,
Localnet: options.Localnet,
ExperimentNATPunching: options.ExperimentNATPunching,
MysteriumAPIAddress: options.MysteriumAPIAddress,
BrokerAddresses: options.BrokerAddresses,
EtherClientRPC: options.EtherClientRPC,
ChainID: options.ChainID,
DNSMap: map[string][]string{
"testnet-location.mysterium.network": {"82.196.15.9"},
"testnet2-location.mysterium.network": {"95.216.204.232"},
"testnet2-quality.mysterium.network": {"116.202.100.246"},
"feedback.mysterium.network": {"116.203.17.150"},
"api.ipify.org": {
"54.204.14.42", "54.225.153.147", "54.235.83.248", "54.243.161.145",
"23.21.109.69", "23.21.126.66",
"50.19.252.36",
"174.129.214.20",
},
},
}
logOptions := logconfig.LogOptions{
LogLevel: zerolog.DebugLevel,
LogHTTP: false,
Filepath: filepath.Join(dataDir, "mysterium-node"),
}
nodeOptions := node.Options{
LogOptions: logOptions,
Directories: node.OptionsDirectory{
Data: dataDir,
Storage: filepath.Join(dataDir, "db"),
Keystore: filepath.Join(dataDir, "keystore"),
Runtime: currentDir,
},
TequilapiEnabled: false,
Keystore: node.OptionsKeystore{
UseLightweight: true,
},
UI: node.OptionsUI{
UIEnabled: false,
},
FeedbackURL: options.FeedbackURL,
OptionsNetwork: network,
Quality: node.OptionsQuality{
Type: node.QualityTypeMORQA,
Address: options.QualityOracleURL,
},
Discovery: node.OptionsDiscovery{
Types: []node.DiscoveryType{node.DiscoveryTypeAPI, node.DiscoveryTypeBroker, node.DiscoveryTypeDHT},
Address: network.MysteriumAPIAddress,
FetchEnabled: false,
DHT: node.OptionsDHT{
Address: "0.0.0.0",
Port: 0,
Protocol: "tcp",
BootstrapPeers: []string{},
},
},
Location: node.OptionsLocation{
IPDetectorURL: options.IPDetectorURL,
Type: node.LocationTypeOracle,
Address: options.LocationDetectorURL,
},
Transactor: node.OptionsTransactor{
TransactorEndpointAddress: options.TransactorEndpointAddress,
RegistryAddress: options.TransactorRegistryAddress,
ChannelImplementation: options.TransactorChannelImplementation,
ProviderMaxRegistrationAttempts: 10,
ProviderRegistrationRetryDelay: time.Minute * 3,
ProviderRegistrationStake: big.NewInt(6200000000),
},
Hermes: node.OptionsHermes{
HermesID: options.HermesID,
},
Payments: node.OptionsPayments{
MaxAllowedPaymentPercentile: 1500,
BCTimeout: time.Second * 30,
HermesPromiseSettlingThreshold: 0.1,
SettlementTimeout: time.Hour * 2,
MystSCAddress: options.MystSCAddress,
},
Consumer: true,
P2PPorts: port.UnspecifiedRange(),
PilvytisAddress: options.PilvytisAddress,
}
err := di.Bootstrap(nodeOptions)
if err != nil {
return nil, fmt.Errorf("could not bootstrap dependencies: %w", err)
}
mobileNode := &MobileNode{
shutdown: di.Shutdown,
node: di.Node,
stateKeeper: di.StateKeeper,
connectionManager: di.ConnectionManager,
locationResolver: di.LocationResolver,
identitySelector: di.IdentitySelector,
signerFactory: di.SignerFactory,
ipResolver: di.IPResolver,
eventBus: di.EventBus,
connectionRegistry: di.ConnectionRegistry,
hermes: common.HexToAddress(nodeOptions.Hermes.HermesID),
feedbackReporter: di.Reporter,
transactor: di.Transactor,
identityRegistry: di.IdentityRegistry,
consumerBalanceTracker: di.ConsumerBalanceTracker,
identityChannelCalculator: di.ChannelAddressCalculator,
channelImplementationAddress: nodeOptions.Transactor.ChannelImplementation,
registryAddress: nodeOptions.Transactor.RegistryAddress,
proposalsManager: newProposalsManager(
di.ProposalRepository,
di.MysteriumAPI,
di.QualityClient,
),
pilvytis: di.Pilvytis,
startTime: time.Now(),
chainID: nodeOptions.OptionsNetwork.ChainID,
}
return mobileNode, nil
}
// GetDefaultCurrency returns the current default currency set.
func (mb *MobileNode) GetDefaultCurrency() string {
return config.Current.GetString(config.FlagDefaultCurrency.Name)
}
// GetProposals returns service proposals from API or cache. Proposals returned as JSON byte array since
// go mobile does not support complex slices.
func (mb *MobileNode) GetProposals(req *GetProposalsRequest) ([]byte, error) {
return mb.proposalsManager.getProposals(req)
}
// ProposalChangeCallback represents proposal callback.
type ProposalChangeCallback interface {
OnChange(proposal []byte)
}
// GetLocationResponse represents location response.
type GetLocationResponse struct {
IP string
Country string
}
// GetLocation return current location including country and IP.
func (mb *MobileNode) GetLocation() (*GetLocationResponse, error) {
loc, err := mb.locationResolver.DetectLocation()
if err != nil {
return nil, fmt.Errorf("could not get location: %w", err)
}
return &GetLocationResponse{
IP: loc.IP,
Country: loc.Country,
}, nil
}
// GetStatusResponse represents status response.
type GetStatusResponse struct {
State string
ProviderID string
ServiceType string
}
// GetStatus returns current connection state and provider info if connected to VPN.
func (mb *MobileNode) GetStatus() *GetStatusResponse {
status := mb.connectionManager.Status()
return &GetStatusResponse{
State: string(status.State),
ProviderID: status.Proposal.ProviderID,
ServiceType: status.Proposal.ServiceType,
}
}
// StatisticsChangeCallback represents statistics callback.
type StatisticsChangeCallback interface {
OnChange(duration int64, bytesReceived int64, bytesSent int64, tokensSpent float64)
}
// RegisterStatisticsChangeCallback registers callback which is called on active connection
// statistics change.
func (mb *MobileNode) RegisterStatisticsChangeCallback(cb StatisticsChangeCallback) {
_ = mb.eventBus.SubscribeAsync(connectionstate.AppTopicConnectionStatistics, func(e connectionstate.AppEventConnectionStatistics) {
tokensSpent := crypto.BigMystToFloat(mb.stateKeeper.GetState().Connection.Invoice.AgreementTotal)
cb.OnChange(int64(e.SessionInfo.Duration().Seconds()), int64(e.Stats.BytesReceived), int64(e.Stats.BytesSent), tokensSpent)
})
}
// ConnectionStatusChangeCallback represents status callback.
type ConnectionStatusChangeCallback interface {
OnChange(status string)
}
// RegisterConnectionStatusChangeCallback registers callback which is called on active connection
// status change.
func (mb *MobileNode) RegisterConnectionStatusChangeCallback(cb ConnectionStatusChangeCallback) {
_ = mb.eventBus.SubscribeAsync(connectionstate.AppTopicConnectionState, func(e connectionstate.AppEventConnectionState) {
cb.OnChange(string(e.State))
})
}
// BalanceChangeCallback represents balance change callback.
type BalanceChangeCallback interface {
OnChange(identityAddress string, balance float64)
}
// RegisterBalanceChangeCallback registers callback which is called on identity balance change.
func (mb *MobileNode) RegisterBalanceChangeCallback(cb BalanceChangeCallback) {
_ = mb.eventBus.SubscribeAsync(event.AppTopicBalanceChanged, func(e event.AppEventBalanceChanged) {
balance := crypto.BigMystToFloat(e.Current)
cb.OnChange(e.Identity.Address, balance)
})
}
// IdentityRegistrationChangeCallback represents identity registration status callback.
type IdentityRegistrationChangeCallback interface {
OnChange(identityAddress string, status string)
}
// RegisterIdentityRegistrationChangeCallback registers callback which is called on identity registration status change.
func (mb *MobileNode) RegisterIdentityRegistrationChangeCallback(cb IdentityRegistrationChangeCallback) {
_ = mb.eventBus.SubscribeAsync(registry.AppTopicIdentityRegistration, func(e registry.AppEventIdentityRegistration) {
cb.OnChange(e.ID.Address, e.Status.String())
})
}
// ConnectRequest represents connect request.
type ConnectRequest struct {
IdentityAddress string
ProviderID string
ServiceType string
DisableKillSwitch bool
ForceReconnect bool
}
// ConnectResponse represents connect response with optional error code and message.
type ConnectResponse struct {
ErrorCode string
ErrorMessage string
}
const (
connectErrInvalidProposal = "InvalidProposal"
connectErrInsufficientBalance = "InsufficientBalance"
connectErrUnknown = "Unknown"
)
// Connect connects to given provider.
func (mb *MobileNode) Connect(req *ConnectRequest) *ConnectResponse {
proposal, err := mb.proposalsManager.repository.Proposal(market.ProposalID{
ProviderID: req.ProviderID,
ServiceType: req.ServiceType,
})
qualityEvent := quality.ConnectionEvent{
ServiceType: req.ServiceType,
ConsumerID: req.IdentityAddress,
ProviderID: req.ProviderID,
}
if err != nil {
qualityEvent.Stage = quality.StageGetProposal
qualityEvent.Error = err.Error()
mb.eventBus.Publish(quality.AppTopicConnectionEvents, qualityEvent)
return &ConnectResponse{
ErrorCode: connectErrInvalidProposal,
ErrorMessage: err.Error(),
}
}
connectOptions := connection.ConnectParams{
DisableKillSwitch: req.DisableKillSwitch,
DNS: connection.DNSOptionAuto,
}
if err := mb.connectionManager.Connect(identity.FromAddress(req.IdentityAddress), mb.hermes, *proposal, connectOptions); err != nil {
qualityEvent.Stage = quality.StageConnectionUnknownError
qualityEvent.Error = err.Error()
mb.eventBus.Publish(quality.AppTopicConnectionEvents, qualityEvent)
if errors.Is(err, connection.ErrInsufficientBalance) {
return &ConnectResponse{
ErrorCode: connectErrInsufficientBalance,
}
}
return &ConnectResponse{
ErrorCode: connectErrUnknown,
ErrorMessage: err.Error(),
}
}
qualityEvent.Stage = quality.StageConnectionOK
mb.eventBus.Publish(quality.AppTopicConnectionEvents, qualityEvent)
return &ConnectResponse{}
}
// Reconnect checks weather session is alive and reconnects if its dead. Force reconnect if ForceReconnect is set.
func (mb *MobileNode) Reconnect(req *ConnectRequest) *ConnectResponse {
reconnect := func() *ConnectResponse {
if err := mb.Disconnect(); err != nil {
log.Err(err).Msg("Failed to disconnect previous session")
}
return mb.Connect(req)
}
if req.ForceReconnect {
log.Info().Msg("Forcing immediate reconnect")
return reconnect()
}
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
if err := mb.connectionManager.CheckChannel(ctx); err != nil {
log.Info().Msgf("Forcing reconnect after failed channel: %s", err)
return reconnect()
}
log.Info().Msg("Reconnect is not needed - p2p channel is alive")
return &ConnectResponse{}
}
// Disconnect disconnects or cancels current connection.
func (mb *MobileNode) Disconnect() error {
if err := mb.connectionManager.Disconnect(); err != nil {
return fmt.Errorf("could not disconnect: %w", err)
}
return nil
}
// GetIdentityRequest represents identity request.
type GetIdentityRequest struct {
Address string
Passphrase string
}
// GetIdentityResponse represents identity response.
type GetIdentityResponse struct {
IdentityAddress string
ChannelAddress string
RegistrationStatus string
}
// GetIdentity finds first identity and unlocks it.
// If there is no identity default one will be created.
func (mb *MobileNode) GetIdentity(req *GetIdentityRequest) (*GetIdentityResponse, error) {
if req == nil {
req = &GetIdentityRequest{}
}
id, err := mb.identitySelector.UseOrCreate(req.Address, req.Passphrase, mb.chainID)
if err != nil {
return nil, fmt.Errorf("could not unlock identity: %w", err)
}
channelAddress, err := mb.identityChannelCalculator.GetChannelAddress(id)
if err != nil {
return nil, fmt.Errorf("could not generate channel address: %w", err)
}
status, err := mb.identityRegistry.GetRegistrationStatus(mb.chainID, id)
if err != nil {
return nil, fmt.Errorf("could not get identity registration status: %w", err)
}
return &GetIdentityResponse{
IdentityAddress: id.Address,
ChannelAddress: channelAddress.Hex(),
RegistrationStatus: status.String(),
}, nil
}
// GetIdentityRegistrationFeesResponse represents identity registration fees result.
type GetIdentityRegistrationFeesResponse struct {
Fee float64
}
// GetIdentityRegistrationFees returns identity registration fees.
func (mb *MobileNode) GetIdentityRegistrationFees() (*GetIdentityRegistrationFeesResponse, error) {
fees, err := mb.transactor.FetchRegistrationFees(mb.chainID)
if err != nil {
return nil, fmt.Errorf("could not get registration fees: %w", err)
}
fee := crypto.BigMystToFloat(fees.Fee)
return &GetIdentityRegistrationFeesResponse{Fee: fee}, nil
}
// RegisterIdentityRequest represents identity registration request.
type RegisterIdentityRequest struct {
IdentityAddress string
Token string
}
// RegisterIdentity starts identity registration in background.
func (mb *MobileNode) RegisterIdentity(req *RegisterIdentityRequest) error {
fees, err := mb.transactor.FetchRegistrationFees(mb.chainID)
if err != nil {
return fmt.Errorf("could not get registration fees: %w", err)
}
var token *string
if req.Token != "" {
token = &req.Token
}
err = mb.transactor.RegisterIdentity(req.IdentityAddress, big.NewInt(0), fees.Fee, "", mb.chainID, token)
if err != nil {
return fmt.Errorf("could not register identity: %w", err)
}
return nil
}
// GetBalanceRequest represents balance request.
type GetBalanceRequest struct {
IdentityAddress string
}
// GetBalanceResponse represents balance response.
type GetBalanceResponse struct {
Balance float64
}
// GetBalance returns current balance.
func (mb *MobileNode) GetBalance(req *GetBalanceRequest) (*GetBalanceResponse, error) {
balance := mb.consumerBalanceTracker.GetBalance(mb.chainID, identity.FromAddress(req.IdentityAddress))
b := crypto.BigMystToFloat(balance)
return &GetBalanceResponse{Balance: b}, nil
}
// SendFeedbackRequest represents user feedback request.
type SendFeedbackRequest struct {
Email string
Description string
}
// SendFeedback sends user feedback via feedback reported.
func (mb *MobileNode) SendFeedback(req *SendFeedbackRequest) error {
report := feedback.UserReport{
Email: req.Email,
Description: req.Description,
}
result, err := mb.feedbackReporter.NewIssue(report)
if err != nil {
return fmt.Errorf("could not create user report: %w", err)
}
if !result.Success {
return errors.New("user report sent but got error response")
}
return nil
}
// Shutdown function stops running mobile node.
func (mb *MobileNode) Shutdown() error {
return mb.shutdown()
}
// WaitUntilDies function returns when node stops.
func (mb *MobileNode) WaitUntilDies() error {
return mb.node.Wait()
}
// OverrideWireguardConnection overrides default wireguard connection implementation to more mobile adapted one.
func (mb *MobileNode) OverrideWireguardConnection(wgTunnelSetup WireguardTunnelSetup) {
wireguard.Bootstrap()
factory := func() (connection.Connection, error) {
opts := wireGuardOptions{
statsUpdateInterval: 1 * time.Second,
handshakeTimeout: 1 * time.Minute,
}
return NewWireGuardConnection(
opts,
newWireguardDevice(wgTunnelSetup),
mb.ipResolver,
wireguard_connection.NewHandshakeWaiter(),
)
}
mb.connectionRegistry.Register(wireguard.ServiceType, factory)
}
// HealthCheckData represents node health check info.
type HealthCheckData struct {
Uptime string `json:"uptime"`
Version string `json:"version"`
BuildInfo *BuildInfo `json:"build_info"`
}
// BuildInfo represents node build info.
type BuildInfo struct {
Commit string `json:"commit"`
Branch string `json:"branch"`
BuildNumber string `json:"build_number"`
}
// HealthCheck returns node health check data.
func (mb *MobileNode) HealthCheck() *HealthCheckData {
return &HealthCheckData{
Uptime: time.Since(mb.startTime).String(),
Version: metadata.VersionAsString(),
BuildInfo: &BuildInfo{
Commit: metadata.BuildCommit,
Branch: metadata.BuildBranch,
BuildNumber: metadata.BuildNumber,
},
}
}
// OrderUpdatedCallbackPayload is the payload of OrderUpdatedCallback.
type OrderUpdatedCallbackPayload struct {
OrderID int64
Status string
PayAmount float64
PayCurrency string
}
// OrderUpdatedCallback is a callback when order status changes.
type OrderUpdatedCallback interface {
OnUpdate(payload *OrderUpdatedCallbackPayload)
}
// RegisterOrderUpdatedCallback registers OrderStatusChanged callback.
func (mb *MobileNode) RegisterOrderUpdatedCallback(cb OrderUpdatedCallback) {
_ = mb.eventBus.SubscribeAsync(pilvytis.AppTopicOrderUpdated, func(e pilvytis.AppEventOrderUpdated) {
payload := OrderUpdatedCallbackPayload{}
id, err := shrinkUint64(e.ID)
if err != nil {
log.Err(err).Send()
return
}
payload.OrderID = id
payload.Status = string(e.Status)
if e.PayAmount != nil {
payload.PayAmount = *e.PayAmount
}
if e.PayCurrency != nil {
payload.PayCurrency = *e.PayCurrency
}
cb.OnUpdate(&payload)
})
}
// CreateOrderRequest a request to create an order.
type CreateOrderRequest struct {
IdentityAddress string
MystAmount float64
PayCurrency string
Lightning bool
}
// OrderResponse represents a payment order for mobile usage.
type OrderResponse struct {
ID int64 `json:"id"`
IdentityAddress string `json:"identity_address"`
Status string `json:"status"`
MystAmount float64 `json:"myst_amount"`
PayCurrency *string `json:"pay_currency,omitempty"`
PayAmount *float64 `json:"pay_amount,omitempty"`
PaymentAddress string `json:"payment_address"`
PaymentURL string `json:"payment_url"`
}
func newOrderResponse(order pilvytis.OrderResponse) (*OrderResponse, error) {
id, err := shrinkUint64(order.ID)
if err != nil {
return nil, err
}
response := &OrderResponse{
ID: id,
IdentityAddress: order.Identity,
Status: string(order.Status),
MystAmount: order.MystAmount,
PayCurrency: order.PayCurrency,
PayAmount: order.PayAmount,
PaymentAddress: order.PaymentAddress,
PaymentURL: order.PaymentURL,
}
return response, nil
}
// CreateOrder creates a payment order.
func (mb *MobileNode) CreateOrder(req *CreateOrderRequest) ([]byte, error) {
order, err := mb.pilvytis.CreateOrder(identity.FromAddress(req.IdentityAddress), req.MystAmount, req.PayCurrency, req.Lightning)
if err != nil {
return nil, err
}
res, err := newOrderResponse(*order)
if err != nil {
return nil, err
}
return json.Marshal(res)
}
// GetOrderRequest a request to get an order.
type GetOrderRequest struct {
IdentityAddress string
ID int64
}
// GetOrder gets an order by ID.
func (mb *MobileNode) GetOrder(req *GetOrderRequest) ([]byte, error) {
order, err := mb.pilvytis.GetOrder(identity.FromAddress(req.IdentityAddress), uint64(req.ID))
if err != nil {
return nil, err
}
res, err := newOrderResponse(*order)
if err != nil {
return nil, err
}
return json.Marshal(res)
}
// ListOrdersRequest a request to list orders.
type ListOrdersRequest struct {
IdentityAddress string
}
// ListOrders lists all payment orders.
func (mb *MobileNode) ListOrders(req *ListOrdersRequest) ([]byte, error) {
orders, err := mb.pilvytis.ListOrders(identity.FromAddress(req.IdentityAddress))
if err != nil {
return nil, err
}
res := make([]OrderResponse, len(orders))
for i := range orders {
orderRes, err := newOrderResponse(orders[i])
if err != nil {
return nil, err
}
res[i] = *orderRes
}
return json.Marshal(orders)
}
// Currencies lists supported payment currencies.
func (mb *MobileNode) Currencies() ([]byte, error) {
currencies, err := mb.pilvytis.Currencies()
if err != nil {
return nil, err
}
return json.Marshal(currencies)
}
func shrinkUint64(u uint64) (int64, error) {
return strconv.ParseInt(strconv.FormatUint(u, 10), 10, 64)
}
| 1 | 16,878 | We should drop ipify in all places i guess | mysteriumnetwork-node | go |
@@ -137,8 +137,8 @@ type ConsensusParams struct {
DownCommitteeSize uint64
DownCommitteeThreshold uint64
- FilterTimeoutSmallLambdas uint64
- FilterTimeoutPeriod0SmallLambdas uint64
+ AgreementFilterTimeout time.Duration
+ AgreementFilterTimeoutPeriod0 time.Duration
FastRecoveryLambda time.Duration // time between fast recovery attempts
FastPartitionRecovery bool // set when fast partition recovery is enabled | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package config
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"time"
"github.com/algorand/go-algorand/protocol"
)
// ConsensusParams specifies settings that might vary based on the
// particular version of the consensus protocol.
type ConsensusParams struct {
// Consensus protocol upgrades. Votes for upgrades are collected for
// UpgradeVoteRounds. If the number of positive votes is over
// UpgradeThreshold, the proposal is accepted.
//
// UpgradeVoteRounds needs to be long enough to collect an
// accurate sample of participants, and UpgradeThreshold needs
// to be high enough to ensure that there are sufficient participants
// after the upgrade.
//
// A consensus protocol upgrade may specify the delay between its
// acceptance and its execution. This gives clients time to notify
// users. This delay is specified by the upgrade proposer and must
// be between MinUpgradeWaitRounds and MaxUpgradeWaitRounds (inclusive)
// in the old protocol's parameters. Note that these parameters refer
// to the representation of the delay in a block rather than the actual
// delay: if the specified delay is zero, it is equivalent to
// DefaultUpgradeWaitRounds.
//
// The maximum length of a consensus version string is
// MaxVersionStringLen.
UpgradeVoteRounds uint64
UpgradeThreshold uint64
DefaultUpgradeWaitRounds uint64
MinUpgradeWaitRounds uint64
MaxUpgradeWaitRounds uint64
MaxVersionStringLen int
// MaxTxnBytesPerBlock determines the maximum number of bytes
// that transactions can take up in a block. Specifically,
// the sum of the lengths of encodings of each transaction
// in a block must not exceed MaxTxnBytesPerBlock.
MaxTxnBytesPerBlock int
// MaxTxnBytesPerBlock is the maximum size of a transaction's Note field.
MaxTxnNoteBytes int
// MaxTxnLife is how long a transaction can be live for:
// the maximum difference between LastValid and FirstValid.
//
// Note that in a protocol upgrade, the ledger must first be upgraded
// to hold more past blocks for this value to be raised.
MaxTxnLife uint64
// ApprovedUpgrades describes the upgrade proposals that this protocol
// implementation will vote for, along with their delay value
// (in rounds). A delay value of zero is the same as a delay of
// DefaultUpgradeWaitRounds.
ApprovedUpgrades map[protocol.ConsensusVersion]uint64
// SupportGenesisHash indicates support for the GenesisHash
// fields in transactions (and requires them in blocks).
SupportGenesisHash bool
// RequireGenesisHash indicates that GenesisHash must be present
// in every transaction.
RequireGenesisHash bool
// DefaultKeyDilution specifies the granularity of top-level ephemeral
// keys. KeyDilution is the number of second-level keys in each batch,
// signed by a top-level "batch" key. The default value can be
// overriden in the account state.
DefaultKeyDilution uint64
// MinBalance specifies the minimum balance that can appear in
// an account. To spend money below MinBalance requires issuing
// an account-closing transaction, which transfers all of the
// money from the account, and deletes the account state.
MinBalance uint64
// MinTxnFee specifies the minimum fee allowed on a transaction.
// A minimum fee is necessary to prevent DoS. In some sense this is
// a way of making the spender subsidize the cost of storing this transaction.
MinTxnFee uint64
// RewardUnit specifies the number of MicroAlgos corresponding to one reward
// unit.
//
// Rewards are received by whole reward units. Fractions of
// RewardUnits do not receive rewards.
RewardUnit uint64
// RewardsRateRefreshInterval is the number of rounds after which the
// rewards level is recomputed for the next RewardsRateRefreshInterval rounds.
RewardsRateRefreshInterval uint64
// seed-related parameters
SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec
SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec
// ledger retention policy
MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for
// sortition threshold factors
NumProposers uint64
SoftCommitteeSize uint64
SoftCommitteeThreshold uint64
CertCommitteeSize uint64
CertCommitteeThreshold uint64
NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant
NextCommitteeThreshold uint64
LateCommitteeSize uint64
LateCommitteeThreshold uint64
RedoCommitteeSize uint64
RedoCommitteeThreshold uint64
DownCommitteeSize uint64
DownCommitteeThreshold uint64
FilterTimeoutSmallLambdas uint64
FilterTimeoutPeriod0SmallLambdas uint64
FastRecoveryLambda time.Duration // time between fast recovery attempts
FastPartitionRecovery bool // set when fast partition recovery is enabled
// commit to payset using a hash of entire payset,
// instead of txid merkle tree
PaysetCommitFlat bool
MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks
// support for the efficient encoding in SignedTxnInBlock
SupportSignedTxnInBlock bool
// force the FeeSink address to be non-participating in the genesis balances.
ForceNonParticipatingFeeSink bool
// support for ApplyData in SignedTxnInBlock
ApplyData bool
// track reward distributions in ApplyData
RewardsInApplyData bool
// domain-separated credentials
CredentialDomainSeparationEnabled bool
// support for transactions that mark an account non-participating
SupportBecomeNonParticipatingTransactions bool
// fix the rewards calculation by avoiding subtracting too much from the rewards pool
PendingResidueRewards bool
// asset support
Asset bool
// max number of assets per account
MaxAssetsPerAccount int
// max length of asset name
MaxAssetNameBytes int
// max length of asset unit name
MaxAssetUnitNameBytes int
// max length of asset url
MaxAssetURLBytes int
// support sequential transaction counter TxnCounter
TxnCounter bool
// transaction groups
SupportTxGroups bool
// max group size
MaxTxGroupSize int
// support for transaction leases
// note: if FixTransactionLeases is not set, the transaction
// leases supported are faulty; specifically, they do not
// enforce exclusion correctly when the FirstValid of
// transactions do not match.
SupportTransactionLeases bool
FixTransactionLeases bool
// 0 for no support, otherwise highest version supported
LogicSigVersion uint64
// len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this
LogicSigMaxSize uint64
// sum of estimated op cost must be less than this
LogicSigMaxCost uint64
// max decimal precision for assets
MaxAssetDecimals uint32
// SupportRekeying indicates support for account rekeying (the RekeyTo and AuthAddr fields)
SupportRekeying bool
// application support
Application bool
// max number of ApplicationArgs for an ApplicationCall transaction
MaxAppArgs int
// max sum([len(arg) for arg in txn.ApplicationArgs])
MaxAppTotalArgLen int
// maximum length of application approval program or clear state
// program in bytes
MaxAppProgramLen int
// maximum number of accounts in the ApplicationCall Accounts field.
// this determines, in part, the maximum number of balance records
// accessed by a single transaction
MaxAppTxnAccounts int
// maximum number of app ids in the ApplicationCall ForeignApps field.
// these are the only applications besides the called application for
// which global state may be read in the transaction
MaxAppTxnForeignApps int
// maximum number of asset ids in the ApplicationCall ForeignAssets
// field. these are the only assets for which the asset parameters may
// be read in the transaction
MaxAppTxnForeignAssets int
// maximum cost of application approval program or clear state program
MaxAppProgramCost int
// maximum length of a key used in an application's global or local
// key/value store
MaxAppKeyLen int
// maximum length of a bytes value used in an application's global or
// local key/value store
MaxAppBytesValueLen int
// maximum number of applications a single account can create and store
// AppParams for at once
MaxAppsCreated int
// maximum number of applications a single account can opt in to and
// store AppLocalState for at once
MaxAppsOptedIn int
// flat MinBalance requirement for creating a single application and
// storing its AppParams
AppFlatParamsMinBalance uint64
// flat MinBalance requirement for opting in to a single application
// and storing its AppLocalState
AppFlatOptInMinBalance uint64
// MinBalance requirement per key/value entry in LocalState or
// GlobalState key/value stores, regardless of value type
SchemaMinBalancePerEntry uint64
// MinBalance requirement (in addition to SchemaMinBalancePerEntry) for
// integer values stored in LocalState or GlobalState key/value stores
SchemaUintMinBalance uint64
// MinBalance requirement (in addition to SchemaMinBalancePerEntry) for
// []byte values stored in LocalState or GlobalState key/value stores
SchemaBytesMinBalance uint64
// maximum number of total key/value pairs allowed by a given
// LocalStateSchema (and therefore allowed in LocalState)
MaxLocalSchemaEntries uint64
// maximum number of total key/value pairs allowed by a given
// GlobalStateSchema (and therefore allowed in GlobalState)
MaxGlobalSchemaEntries uint64
// maximum total minimum balance requirement for an account, used
// to limit the maximum size of a single balance record
MaximumMinimumBalance uint64
// CompactCertRounds defines the frequency with with compact
// certificates are generated. Every round that is a multiple
// of CompactCertRounds, the block header will include a Merkle
// commitment to the set of online accounts (that can vote after
// another CompactCertRounds rounds), and that block will be signed
// (forming a compact certificate) by the voters from the previous
// such Merkle tree commitment. A value of zero means no compact
// certificates.
CompactCertRounds uint64
// CompactCertTopVoters is a bound on how many online accounts get to
// participate in forming the compact certificate, by including the
// top CompactCertTopVoters accounts (by normalized balance) into the
// Merkle commitment.
CompactCertTopVoters uint64
// CompactCertVotersLookback is the number of blocks we skip before
// publishing a Merkle commitment to the online accounts. Namely,
// if block number N contains a Merkle commitment to the online
// accounts (which, incidentally, means N%CompactCertRounds=0),
// then the balances reflected in that commitment must come from
// block N-CompactCertVotersLookback. This gives each node some
// time (CompactCertVotersLookback blocks worth of time) to
// construct this Merkle tree, so as to avoid placing the
// construction of this Merkle tree (and obtaining the requisite
// accounts and balances) in the critical path.
CompactCertVotersLookback uint64
}
// ConsensusProtocols defines a set of supported protocol versions and their
// corresponding parameters.
type ConsensusProtocols map[protocol.ConsensusVersion]ConsensusParams
// Consensus tracks the protocol-level settings for different versions of the
// consensus protocol.
var Consensus ConsensusProtocols
// MaxVoteThreshold is the largest threshold for a bundle over all supported
// consensus protocols, used for decoding purposes.
var MaxVoteThreshold int
// MaxEvalDeltaAccounts is the largest number of accounts that may appear in
// an eval delta, used for decoding purposes.
var MaxEvalDeltaAccounts int
// MaxStateDeltaKeys is the largest number of key/value pairs that may appear
// in a StateDelta, used for decoding purposes.
var MaxStateDeltaKeys int
// MaxLogicSigMaxSize is the largest logical signature appear in any of the supported
// protocols, used for decoding purposes.
var MaxLogicSigMaxSize int
// MaxTxnNoteBytes is the largest supported nodes field array size supported by any
// of the consensus protocols. used for decoding purposes.
var MaxTxnNoteBytes int
// MaxTxGroupSize is the largest supported number of transactions per transaction group supported by any
// of the consensus protocols. used for decoding purposes.
var MaxTxGroupSize int
// MaxAppProgramLen is the largest supported app program size supported by any
// of the consensus protocols. used for decoding purposes.
var MaxAppProgramLen int
func checkSetMax(value int, curMax *int) {
if value > *curMax {
*curMax = value
}
}
// checkSetAllocBounds sets some global variables used during msgpack decoding
// to enforce memory allocation limits. The values should be generous to
// prevent correctness bugs, but not so large that DoS attacks are trivial
func checkSetAllocBounds(p ConsensusParams) {
checkSetMax(int(p.SoftCommitteeThreshold), &MaxVoteThreshold)
checkSetMax(int(p.CertCommitteeThreshold), &MaxVoteThreshold)
checkSetMax(int(p.NextCommitteeThreshold), &MaxVoteThreshold)
checkSetMax(int(p.LateCommitteeThreshold), &MaxVoteThreshold)
checkSetMax(int(p.RedoCommitteeThreshold), &MaxVoteThreshold)
checkSetMax(int(p.DownCommitteeThreshold), &MaxVoteThreshold)
// These bounds could be tighter, but since these values are just to
// prevent DoS, setting them to be the maximum number of allowed
// executed TEAL instructions should be fine (order of ~1000)
checkSetMax(p.MaxAppProgramLen, &MaxStateDeltaKeys)
checkSetMax(p.MaxAppProgramLen, &MaxEvalDeltaAccounts)
checkSetMax(p.MaxAppProgramLen, &MaxAppProgramLen)
checkSetMax(int(p.LogicSigMaxSize), &MaxLogicSigMaxSize)
checkSetMax(p.MaxTxnNoteBytes, &MaxTxnNoteBytes)
checkSetMax(p.MaxTxGroupSize, &MaxTxGroupSize)
}
// SaveConfigurableConsensus saves the configurable protocols file to the provided data directory.
func SaveConfigurableConsensus(dataDirectory string, params ConsensusProtocols) error {
consensusProtocolPath := filepath.Join(dataDirectory, ConfigurableConsensusProtocolsFilename)
encodedConsensusParams, err := json.Marshal(params)
if err != nil {
return err
}
err = ioutil.WriteFile(consensusProtocolPath, encodedConsensusParams, 0644)
return err
}
// DeepCopy creates a deep copy of a consensus protocols map.
func (cp ConsensusProtocols) DeepCopy() ConsensusProtocols {
staticConsensus := make(ConsensusProtocols)
for consensusVersion, consensusParams := range cp {
// recreate the ApprovedUpgrades map since we don't want to modify the original one.
if consensusParams.ApprovedUpgrades != nil {
newApprovedUpgrades := make(map[protocol.ConsensusVersion]uint64)
for ver, when := range consensusParams.ApprovedUpgrades {
newApprovedUpgrades[ver] = when
}
consensusParams.ApprovedUpgrades = newApprovedUpgrades
}
staticConsensus[consensusVersion] = consensusParams
}
return staticConsensus
}
// Merge merges a configurable consensus ontop of the existing consensus protocol and return
// a new consensus protocol without modify any of the incoming structures.
func (cp ConsensusProtocols) Merge(configurableConsensus ConsensusProtocols) ConsensusProtocols {
staticConsensus := cp.DeepCopy()
for consensusVersion, consensusParams := range configurableConsensus {
if consensusParams.ApprovedUpgrades == nil {
// if we were provided with an empty ConsensusParams, delete the existing reference to this consensus version
for cVer, cParam := range staticConsensus {
if cVer == consensusVersion {
delete(staticConsensus, cVer)
} else if _, has := cParam.ApprovedUpgrades[consensusVersion]; has {
// delete upgrade to deleted version
delete(cParam.ApprovedUpgrades, consensusVersion)
}
}
} else {
// need to add/update entry
staticConsensus[consensusVersion] = consensusParams
}
}
return staticConsensus
}
// LoadConfigurableConsensusProtocols loads the configurable protocols from the data directroy
func LoadConfigurableConsensusProtocols(dataDirectory string) error {
newConsensus, err := PreloadConfigurableConsensusProtocols(dataDirectory)
if err != nil {
return err
}
if newConsensus != nil {
Consensus = newConsensus
// Set allocation limits
for _, p := range Consensus {
checkSetAllocBounds(p)
}
}
return nil
}
// PreloadConfigurableConsensusProtocols loads the configurable protocols from the data directroy
// and merge it with a copy of the Consensus map. Then, it returns it to the caller.
func PreloadConfigurableConsensusProtocols(dataDirectory string) (ConsensusProtocols, error) {
consensusProtocolPath := filepath.Join(dataDirectory, ConfigurableConsensusProtocolsFilename)
file, err := os.Open(consensusProtocolPath)
if err != nil {
if os.IsNotExist(err) {
// this file is not required, only optional. if it's missing, no harm is done.
return Consensus, nil
}
return nil, err
}
defer file.Close()
configurableConsensus := make(ConsensusProtocols)
decoder := json.NewDecoder(file)
err = decoder.Decode(&configurableConsensus)
if err != nil {
return nil, err
}
return Consensus.Merge(configurableConsensus), nil
}
func initConsensusProtocols() {
// WARNING: copying a ConsensusParams by value into a new variable
// does not copy the ApprovedUpgrades map. Make sure that each new
// ConsensusParams structure gets a fresh ApprovedUpgrades map.
// Base consensus protocol version, v7.
v7 := ConsensusParams{
UpgradeVoteRounds: 10000,
UpgradeThreshold: 9000,
DefaultUpgradeWaitRounds: 10000,
MaxVersionStringLen: 64,
MinBalance: 10000,
MinTxnFee: 1000,
MaxTxnLife: 1000,
MaxTxnNoteBytes: 1024,
MaxTxnBytesPerBlock: 1000000,
DefaultKeyDilution: 10000,
MaxTimestampIncrement: 25,
RewardUnit: 1e6,
RewardsRateRefreshInterval: 5e5,
ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{},
NumProposers: 30,
SoftCommitteeSize: 2500,
SoftCommitteeThreshold: 1870,
CertCommitteeSize: 1000,
CertCommitteeThreshold: 720,
NextCommitteeSize: 10000,
NextCommitteeThreshold: 7750,
LateCommitteeSize: 10000,
LateCommitteeThreshold: 7750,
RedoCommitteeSize: 10000,
RedoCommitteeThreshold: 7750,
DownCommitteeSize: 10000,
DownCommitteeThreshold: 7750,
FilterTimeoutSmallLambdas: 2,
FilterTimeoutPeriod0SmallLambdas: 2,
FastRecoveryLambda: 5 * time.Minute,
SeedLookback: 2,
SeedRefreshInterval: 100,
MaxBalLookback: 320,
MaxTxGroupSize: 1,
}
v7.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV7] = v7
// v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis
v8 := v7
v8.SeedRefreshInterval = 80
v8.NumProposers = 9
v8.SoftCommitteeSize = 2990
v8.SoftCommitteeThreshold = 2267
v8.CertCommitteeSize = 1500
v8.CertCommitteeThreshold = 1112
v8.NextCommitteeSize = 5000
v8.NextCommitteeThreshold = 3838
v8.LateCommitteeSize = 5000
v8.LateCommitteeThreshold = 3838
v8.RedoCommitteeSize = 5000
v8.RedoCommitteeThreshold = 3838
v8.DownCommitteeSize = 5000
v8.DownCommitteeThreshold = 3838
v8.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV8] = v8
// v7 can be upgraded to v8.
v7.ApprovedUpgrades[protocol.ConsensusV8] = 0
// v9 increases the minimum balance to 100,000 microAlgos.
v9 := v8
v9.MinBalance = 100000
v9.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV9] = v9
// v8 can be upgraded to v9.
v8.ApprovedUpgrades[protocol.ConsensusV9] = 0
// v10 introduces fast partition recovery (and also raises NumProposers).
v10 := v9
v10.FastPartitionRecovery = true
v10.NumProposers = 20
v10.LateCommitteeSize = 500
v10.LateCommitteeThreshold = 320
v10.RedoCommitteeSize = 2400
v10.RedoCommitteeThreshold = 1768
v10.DownCommitteeSize = 6000
v10.DownCommitteeThreshold = 4560
v10.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV10] = v10
// v9 can be upgraded to v10.
v9.ApprovedUpgrades[protocol.ConsensusV10] = 0
// v11 introduces SignedTxnInBlock.
v11 := v10
v11.SupportSignedTxnInBlock = true
v11.PaysetCommitFlat = true
v11.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV11] = v11
// v10 can be upgraded to v11.
v10.ApprovedUpgrades[protocol.ConsensusV11] = 0
// v12 increases the maximum length of a version string.
v12 := v11
v12.MaxVersionStringLen = 128
v12.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV12] = v12
// v11 can be upgraded to v12.
v11.ApprovedUpgrades[protocol.ConsensusV12] = 0
// v13 makes the consensus version a meaningful string.
v13 := v12
v13.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV13] = v13
// v12 can be upgraded to v13.
v12.ApprovedUpgrades[protocol.ConsensusV13] = 0
// v14 introduces tracking of closing amounts in ApplyData, and enables
// GenesisHash in transactions.
v14 := v13
v14.ApplyData = true
v14.SupportGenesisHash = true
v14.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV14] = v14
// v13 can be upgraded to v14.
v13.ApprovedUpgrades[protocol.ConsensusV14] = 0
// v15 introduces tracking of reward distributions in ApplyData.
v15 := v14
v15.RewardsInApplyData = true
v15.ForceNonParticipatingFeeSink = true
v15.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV15] = v15
// v14 can be upgraded to v15.
v14.ApprovedUpgrades[protocol.ConsensusV15] = 0
// v16 fixes domain separation in credentials.
v16 := v15
v16.CredentialDomainSeparationEnabled = true
v16.RequireGenesisHash = true
v16.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV16] = v16
// v15 can be upgraded to v16.
v15.ApprovedUpgrades[protocol.ConsensusV16] = 0
// ConsensusV17 points to 'final' spec commit
v17 := v16
v17.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV17] = v17
// v16 can be upgraded to v17.
v16.ApprovedUpgrades[protocol.ConsensusV17] = 0
// ConsensusV18 points to reward calculation spec commit
v18 := v17
v18.PendingResidueRewards = true
v18.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
v18.TxnCounter = true
v18.Asset = true
v18.LogicSigVersion = 1
v18.LogicSigMaxSize = 1000
v18.LogicSigMaxCost = 20000
v18.MaxAssetsPerAccount = 1000
v18.SupportTxGroups = true
v18.MaxTxGroupSize = 16
v18.SupportTransactionLeases = true
v18.SupportBecomeNonParticipatingTransactions = true
v18.MaxAssetNameBytes = 32
v18.MaxAssetUnitNameBytes = 8
v18.MaxAssetURLBytes = 32
Consensus[protocol.ConsensusV18] = v18
// ConsensusV19 is the official spec commit ( teal, assets, group tx )
v19 := v18
v19.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV19] = v19
// v18 can be upgraded to v19.
v18.ApprovedUpgrades[protocol.ConsensusV19] = 0
// v17 can be upgraded to v19.
v17.ApprovedUpgrades[protocol.ConsensusV19] = 0
// v20 points to adding the precision to the assets.
v20 := v19
v20.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
v20.MaxAssetDecimals = 19
// we want to adjust the upgrade time to be roughly one week.
// one week, in term of rounds would be:
// 140651 = (7 * 24 * 60 * 60 / 4.3)
// for the sake of future manual calculations, we'll round that down
// a bit :
v20.DefaultUpgradeWaitRounds = 140000
Consensus[protocol.ConsensusV20] = v20
// v19 can be upgraded to v20.
v19.ApprovedUpgrades[protocol.ConsensusV20] = 0
// v21 fixes a bug in Credential.lowestOutput that would cause larger accounts to be selected to propose disproportionately more often than small accounts
v21 := v20
v21.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
Consensus[protocol.ConsensusV21] = v21
// v20 can be upgraded to v21.
v20.ApprovedUpgrades[protocol.ConsensusV21] = 0
// v22 is an upgrade which allows tuning the number of rounds to wait to execute upgrades.
v22 := v21
v22.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
v22.MinUpgradeWaitRounds = 10000
v22.MaxUpgradeWaitRounds = 150000
Consensus[protocol.ConsensusV22] = v22
// v23 is an upgrade which fixes the behavior of leases so that
// it conforms with the intended spec.
v23 := v22
v23.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
v23.FixTransactionLeases = true
Consensus[protocol.ConsensusV23] = v23
// v22 can be upgraded to v23.
v22.ApprovedUpgrades[protocol.ConsensusV23] = 10000
// v21 can be upgraded to v23.
v21.ApprovedUpgrades[protocol.ConsensusV23] = 0
// v24 is the stateful teal and rekeying upgrade
v24 := v23
v24.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
v24.LogicSigVersion = 2
// Enable application support
v24.Application = true
// Enable rekeying
v24.SupportRekeying = true
// 100.1 Algos (MinBalance for creating 1,000 assets)
v24.MaximumMinimumBalance = 100100000
v24.MaxAppArgs = 16
v24.MaxAppTotalArgLen = 2048
v24.MaxAppProgramLen = 1024
v24.MaxAppKeyLen = 64
v24.MaxAppBytesValueLen = 64
// 0.1 Algos (Same min balance cost as an Asset)
v24.AppFlatParamsMinBalance = 100000
v24.AppFlatOptInMinBalance = 100000
// Can look up Sender + 4 other balance records per Application txn
v24.MaxAppTxnAccounts = 4
// Can look up 2 other app creator balance records to see global state
v24.MaxAppTxnForeignApps = 2
// Can look up 2 assets to see asset parameters
v24.MaxAppTxnForeignAssets = 2
// 64 byte keys @ ~333 microAlgos/byte + delta
v24.SchemaMinBalancePerEntry = 25000
// 9 bytes @ ~333 microAlgos/byte + delta
v24.SchemaUintMinBalance = 3500
// 64 byte values @ ~333 microAlgos/byte + delta
v24.SchemaBytesMinBalance = 25000
// Maximum number of key/value pairs per local key/value store
v24.MaxLocalSchemaEntries = 16
// Maximum number of key/value pairs per global key/value store
v24.MaxGlobalSchemaEntries = 64
// Maximum cost of ApprovalProgram/ClearStateProgram
v24.MaxAppProgramCost = 700
// Maximum number of apps a single account can create
v24.MaxAppsCreated = 10
// Maximum number of apps a single account can opt into
v24.MaxAppsOptedIn = 10
Consensus[protocol.ConsensusV24] = v24
// v23 can be upgraded to v24, with an update delay of 7 days ( see calculation above )
v23.ApprovedUpgrades[protocol.ConsensusV24] = 140000
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
vFuture := v24
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
// FilterTimeout 2s instead of 4s for period 0
vFuture.FilterTimeoutPeriod0SmallLambdas = 1
Consensus[protocol.ConsensusFuture] = vFuture
}
// Global defines global Algorand protocol parameters which should not be overriden.
type Global struct {
SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential)
BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block)
}
// Protocol holds the global configuration settings for the agreement protocol,
// initialized with our current defaults. This is used across all nodes we create.
var Protocol = Global{
SmallLambda: 2000 * time.Millisecond,
BigLambda: 15000 * time.Millisecond,
}
func init() {
Consensus = make(ConsensusProtocols)
initConsensusProtocols()
// Allow tuning SmallLambda for faster consensus in single-machine e2e
// tests. Useful for development. This might make sense to fold into
// a protocol-version-specific setting, once we move SmallLambda into
// ConsensusParams.
algoSmallLambda, err := strconv.ParseInt(os.Getenv("ALGOSMALLLAMBDAMSEC"), 10, 64)
if err == nil {
Protocol.SmallLambda = time.Duration(algoSmallLambda) * time.Millisecond
}
// Set allocation limits
for _, p := range Consensus {
checkSetAllocBounds(p)
}
}
| 1 | 40,221 | Add explanations around these variables - what do they mean, how they should be configured, etc. | algorand-go-algorand | go |
@@ -106,6 +106,11 @@ class SiteController < ApplicationController
@locale = params[:about_locale] || I18n.locale
end
+ def communities
+ OsmCommunityIndex::LocalChapter.add_to_i18n # this should be called on app init
+ @local_chapters = OsmCommunityIndex::LocalChapter.local_chapters
+ end
+
def export; end
def offline; end | 1 | class SiteController < ApplicationController
layout "site"
layout :map_layout, :only => [:index, :export]
before_action :authorize_web
before_action :set_locale
before_action :redirect_browse_params, :only => :index
before_action :redirect_map_params, :only => [:index, :edit, :export]
before_action :require_oauth, :only => [:index]
before_action :update_totp, :only => [:index]
authorize_resource :class => false
def index
session[:location] ||= OSM.ip_location(request.env["REMOTE_ADDR"]) unless Settings.status == "database_readonly" || Settings.status == "database_offline"
end
def permalink
lon, lat, zoom = ShortLink.decode(params[:code])
new_params = params.except(:host, :controller, :action, :code, :lon, :lat, :zoom, :layers, :node, :way, :relation, :changeset)
if new_params.key? :m
new_params.delete :m
new_params[:mlat] = lat
new_params[:mlon] = lon
end
new_params[:anchor] = "map=#{zoom}/#{lat}/#{lon}"
new_params[:anchor] += "&layers=#{params[:layers]}" if params.key? :layers
options = new_params.to_unsafe_h.to_options
path = if params.key? :node
node_path(params[:node], options)
elsif params.key? :way
way_path(params[:way], options)
elsif params.key? :relation
relation_path(params[:relation], options)
elsif params.key? :changeset
changeset_path(params[:changeset], options)
else
root_url(options)
end
redirect_to path
end
def key
expires_in 7.days, :public => true
render :layout => false
end
def edit
editor = preferred_editor
if editor == "remote"
require_oauth
render :action => :index, :layout => map_layout
return
else
require_user
end
if %w[id].include?(editor)
append_content_security_policy_directives(
:frame_src => %w[blob:]
)
end
begin
if params[:node]
bbox = Node.visible.find(params[:node]).bbox.to_unscaled
@lat = bbox.centre_lat
@lon = bbox.centre_lon
@zoom = 18
elsif params[:way]
bbox = Way.visible.find(params[:way]).bbox.to_unscaled
@lat = bbox.centre_lat
@lon = bbox.centre_lon
@zoom = 17
elsif params[:note]
note = Note.visible.find(params[:note])
@lat = note.lat
@lon = note.lon
@zoom = 17
elsif params[:gpx] && current_user
trace = Trace.visible_to(current_user).find(params[:gpx])
@lat = trace.latitude
@lon = trace.longitude
@zoom = 16
end
rescue ActiveRecord::RecordNotFound
# don't try and derive a location from a missing/deleted object
end
end
def copyright
@locale = params[:copyright_locale] || I18n.locale
end
def welcome; end
def help; end
def about
@locale = params[:about_locale] || I18n.locale
end
def export; end
def offline; end
def preview
render :html => RichText.new(params[:type], params[:text]).to_html
end
def id
append_content_security_policy_directives(
:connect_src => %w[*],
:img_src => %w[* blob:],
:script_src => %w[dev.virtualearth.net 'unsafe-eval'],
:style_src => %w['unsafe-inline']
)
render :layout => false
end
private
def redirect_browse_params
if params[:node]
redirect_to node_path(params[:node])
elsif params[:way]
redirect_to way_path(params[:way])
elsif params[:relation]
redirect_to relation_path(params[:relation])
elsif params[:note]
redirect_to browse_note_path(params[:note])
elsif params[:query]
redirect_to search_path(:query => params[:query])
end
end
def redirect_map_params
anchor = []
anchor << "map=#{params.delete(:zoom) || 5}/#{params.delete(:lat)}/#{params.delete(:lon)}" if params[:lat] && params[:lon]
if params[:layers]
anchor << "layers=#{params.delete(:layers)}"
elsif params.delete(:notes) == "yes"
anchor << "layers=N"
end
redirect_to params.to_unsafe_h.merge(:only_path => true, :anchor => anchor.join("&")) if anchor.present?
end
end
| 1 | 13,558 | One thing tho - I would really appreciate any advice on where to move this to, so that it's called on initialisation of the website. | openstreetmap-openstreetmap-website | rb |
@@ -321,4 +321,16 @@ public interface GauntletConfig extends Config
{
return false;
}
+
+ @ConfigItem(
+ position = 21,
+ keyName = "displayResources",
+ name = "Show raw resources gathered",
+ description = "Displays how much of each resource you have gathered.",
+ titleSection = "resources"
+ )
+ default boolean displayGatheredResources()
+ {
+ return false;
+ }
} | 1 | /*
* Copyright (c) 2019, kThisIsCvpv <https://github.com/kThisIsCvpv>
* Copyright (c) 2019, ganom <https://github.com/Ganom>
* Copyright (c) 2019, kyle <https://github.com/Kyleeld>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.gauntlet;
import java.awt.Color;
import lombok.AllArgsConstructor;
import lombok.Getter;
import net.runelite.client.config.Config;
import net.runelite.client.config.ConfigGroup;
import net.runelite.client.config.ConfigItem;
import net.runelite.client.config.ConfigTitleSection;
import net.runelite.client.config.Range;
import net.runelite.client.config.Title;
@ConfigGroup("Gauntlet")
public interface GauntletConfig extends Config
{
@Getter
@AllArgsConstructor
enum CounterDisplay
{
ONBOSS("On Boss"),
INFOBOX("Info Box"),
BOTH("Both"),
NONE("None");
private String name;
@Override
public String toString()
{
return getName();
}
}
@ConfigTitleSection(
keyName = "resources",
position = 0,
name = "Resources",
description = ""
)
default Title resources()
{
return new Title();
}
@ConfigItem(
position = 1,
keyName = "highlightResources",
name = "Highlight Resources (Outline)",
description = "Highlights all the resources in each room with an outline.",
titleSection = "resources"
)
default boolean highlightResources()
{
return false;
}
@ConfigItem(
position = 2,
keyName = "highlightResourcesColor",
name = "Highlight Color",
description = "Highlights all the resources in each room with this color.",
titleSection = "resources",
hidden = true,
unhide = "highlightResources"
)
default Color highlightResourcesColor()
{
return Color.YELLOW;
}
@ConfigItem(
position = 3,
keyName = "highlightResourcesIcons",
name = "Highlight Resources (Icon)",
description = "Highlights all the icons in each room with an icon.",
titleSection = "resources",
hidden = true,
unhide = "highlightResources"
)
default boolean highlightResourcesIcons()
{
return false;
}
@Range(
min = 1,
max = 50
)
@ConfigItem(
position = 4,
keyName = "resourceIconSize",
name = "Resource Icon Size",
description = " change the size of resource icons.",
hidden = true,
unhide = "highlightResources",
titleSection = "resources"
)
default int resourceIconSize()
{
return 20;
}
@ConfigTitleSection(
keyName = "boss",
position = 5,
name = "Boss",
description = ""
)
default Title boss()
{
return new Title();
}
@ConfigItem(
position = 6,
keyName = "countAttacks",
name = "Count Attacks Display",
description = "Count the attacks until the Hunllef switches their attack style and prayer.",
titleSection = "boss"
)
default CounterDisplay countAttacks()
{
return CounterDisplay.NONE;
}
@ConfigItem(
position = 7,
keyName = "highlightWidget",
name = "Highlight Prayer (Prayer Tab)",
description = "Highlights the correct prayer to use in your prayer book.",
titleSection = "boss"
)
default boolean highlightWidget()
{
return false;
}
@ConfigItem(
position = 8,
keyName = "highlightPrayerInfobox",
name = "Highlight Prayer (InfoBox)",
description = "Highlights the correct prayer to use in an Infobox.",
titleSection = "boss"
)
default boolean highlightPrayerInfobox()
{
return false;
}
@ConfigItem(
position = 9,
keyName = "flashOnWrongAttack",
name = "Flash screen on Wrong Attack",
description = "This will flash your screen if you attack with the wrong stlye.",
titleSection = "boss"
)
default boolean flashOnWrongAttack()
{
return false;
}
@ConfigItem(
position = 10,
keyName = "uniquePrayerAudio",
name = "Prayer Audio Warning",
description = "Plays a unique sound whenever the boss is about to shut down your prayer.",
titleSection = "boss"
)
default boolean uniquePrayerAudio()
{
return false;
}
@ConfigItem(
position = 11,
keyName = "uniquePrayerVisual",
name = "Prayer Attack (Icon)",
description = "Prayer attacks will have a unique overlay visual.",
titleSection = "boss"
)
default boolean uniquePrayerVisual()
{
return false;
}
@ConfigItem(
position = 12,
keyName = "uniqueAttackVisual",
name = "Magic & Range Attack (Icon)",
description = "Magic and Range attacks will have a unique overlay visual.",
titleSection = "boss"
)
default boolean uniqueAttackVisual()
{
return false;
}
@ConfigItem(
position = 13,
keyName = "attackVisualOutline",
name = "Hunllefs' attacks (Outline)",
description = "Outline the Hunllefs' attacks.",
titleSection = "boss"
)
default boolean attackVisualOutline()
{
return false;
}
@ConfigItem(
position = 14,
keyName = "overlayBoss",
name = "Outline Hunllef (Color)",
description = "Overlay Hunllef while you are on the wrong prayer with an color denoting it's current attack style.",
titleSection = "boss"
)
default boolean overlayBoss()
{
return false;
}
@ConfigItem(
position = 15,
keyName = "overlayBossPrayer",
name = "Hunllef Overlay (Icons)",
description = "Overlay the Hunllef with an icon denoting it's current attack style.",
titleSection = "boss"
)
default boolean overlayBossPrayer()
{
return false;
}
@ConfigItem(
position = 16,
keyName = "overlayTornadoes",
name = "Show Tornado Decay",
description = "Display the amount of ticks left until the tornadoes decay.",
titleSection = "boss"
)
default boolean overlayTornadoes()
{
return false;
}
@Range(
min = 1,
max = 50
)
@ConfigItem(
position = 17,
keyName = "projectileIconSize",
name = "Hunllef Projectile Icon Size",
description = " change the size of Projectile icons.",
titleSection = "boss"
)
default int projectileIconSize()
{
return 20;
}
@ConfigTitleSection(
keyName = "timer",
position = 18,
name = "Timer",
description = ""
)
default Title timer()
{
return new Title();
}
@ConfigItem(
position = 19,
keyName = "displayTimerWidget",
name = "Show Gauntlet timer overlay",
description = "Display a timer widget that tracks your gauntlet progress.",
titleSection = "timer"
)
default boolean displayTimerWidget()
{
return false;
}
@ConfigItem(
position = 20,
keyName = "displayTimerChat",
name = "Show Gauntlet timer chat message",
description = "Display a chat message that tracks your gauntlet progress.",
titleSection = "timer"
)
default boolean displayTimerChat()
{
return false;
}
}
| 1 | 15,901 | To much tabs here (1 tab) | open-osrs-runelite | java |
@@ -135,12 +135,12 @@ public class ExecutionLogsDao {
}
}
- int removeExecutionLogsByTime(final long millis)
+ int removeExecutionLogsByTime(final long millis, final int recordCleanupLimit)
throws ExecutorManagerException {
final String DELETE_BY_TIME =
- "DELETE FROM execution_logs WHERE upload_time < ?";
+ "DELETE FROM execution_logs WHERE upload_time < ? LIMIT ?";
try {
- return this.dbOperator.update(DELETE_BY_TIME, millis);
+ return this.dbOperator.update(DELETE_BY_TIME, millis, recordCleanupLimit);
} catch (final SQLException e) {
logger.error("delete execution logs failed", e);
throw new ExecutorManagerException( | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import azkaban.db.EncodingType;
import azkaban.db.DatabaseOperator;
import azkaban.db.DatabaseTransOperator;
import azkaban.db.SQLTransaction;
import azkaban.utils.FileIOUtils;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.GZIPUtils;
import azkaban.utils.Pair;
import java.io.BufferedInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Arrays;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
@Singleton
public class ExecutionLogsDao {
private static final Logger logger = Logger.getLogger(ExecutionLogsDao.class);
private final DatabaseOperator dbOperator;
private final EncodingType defaultEncodingType = EncodingType.GZIP;
@Inject
ExecutionLogsDao(final DatabaseOperator dbOperator) {
this.dbOperator = dbOperator;
}
// TODO kunkun-tang: the interface's parameter is called endByte, but actually is length.
LogData fetchLogs(final int execId, final String name, final int attempt,
final int startByte,
final int length) throws ExecutorManagerException {
final FetchLogsHandler handler = new FetchLogsHandler(startByte, length + startByte);
try {
return this.dbOperator.query(FetchLogsHandler.FETCH_LOGS, handler,
execId, name, attempt, startByte, startByte + length);
} catch (final SQLException e) {
throw new ExecutorManagerException("Error fetching logs " + execId
+ " : " + name, e);
}
}
public void uploadLogFile(final int execId, final String name, final int attempt,
final File... files) throws ExecutorManagerException {
final SQLTransaction<Integer> transaction = transOperator -> {
uploadLogFile(transOperator, execId, name, attempt, files, this.defaultEncodingType);
transOperator.getConnection().commit();
return 1;
};
try {
this.dbOperator.transaction(transaction);
} catch (final SQLException e) {
logger.error("uploadLogFile failed.", e);
throw new ExecutorManagerException("uploadLogFile failed.", e);
}
}
private void uploadLogFile(final DatabaseTransOperator transOperator, final int execId,
final String name,
final int attempt, final File[] files, final EncodingType encType)
throws SQLException {
// 50K buffer... if logs are greater than this, we chunk.
// However, we better prevent large log files from being uploaded somehow
final byte[] buffer = new byte[50 * 1024];
int pos = 0;
int length = buffer.length;
int startByte = 0;
try {
for (int i = 0; i < files.length; ++i) {
final File file = files[i];
final BufferedInputStream bufferedStream =
new BufferedInputStream(new FileInputStream(file));
try {
int size = bufferedStream.read(buffer, pos, length);
while (size >= 0) {
if (pos + size == buffer.length) {
// Flush here.
uploadLogPart(transOperator, execId, name, attempt, startByte,
startByte + buffer.length, encType, buffer, buffer.length);
pos = 0;
length = buffer.length;
startByte += buffer.length;
} else {
// Usually end of file.
pos += size;
length = buffer.length - pos;
}
size = bufferedStream.read(buffer, pos, length);
}
} finally {
IOUtils.closeQuietly(bufferedStream);
}
}
// Final commit of buffer.
if (pos > 0) {
uploadLogPart(transOperator, execId, name, attempt, startByte, startByte
+ pos, encType, buffer, pos);
}
} catch (final SQLException e) {
logger.error("Error writing log part.", e);
throw new SQLException("Error writing log part", e);
} catch (final IOException e) {
logger.error("Error chunking.", e);
throw new SQLException("Error chunking", e);
}
}
int removeExecutionLogsByTime(final long millis)
throws ExecutorManagerException {
final String DELETE_BY_TIME =
"DELETE FROM execution_logs WHERE upload_time < ?";
try {
return this.dbOperator.update(DELETE_BY_TIME, millis);
} catch (final SQLException e) {
logger.error("delete execution logs failed", e);
throw new ExecutorManagerException(
"Error deleting old execution_logs before " + millis, e);
}
}
private void uploadLogPart(final DatabaseTransOperator transOperator, final int execId,
final String name,
final int attempt, final int startByte, final int endByte,
final EncodingType encType,
final byte[] buffer, final int length)
throws SQLException, IOException {
final String INSERT_EXECUTION_LOGS = "INSERT INTO execution_logs "
+ "(exec_id, name, attempt, enc_type, start_byte, end_byte, "
+ "log, upload_time) VALUES (?,?,?,?,?,?,?,?)";
byte[] buf = buffer;
if (encType == EncodingType.GZIP) {
buf = GZIPUtils.gzipBytes(buf, 0, length);
} else if (length < buf.length) {
buf = Arrays.copyOf(buffer, length);
}
transOperator.update(INSERT_EXECUTION_LOGS, execId, name, attempt,
encType.getNumVal(), startByte, startByte + length, buf, DateTime.now()
.getMillis());
}
private static class FetchLogsHandler implements ResultSetHandler<LogData> {
private static final String FETCH_LOGS =
"SELECT exec_id, name, attempt, enc_type, start_byte, end_byte, log "
+ "FROM execution_logs "
+ "WHERE exec_id=? AND name=? AND attempt=? AND end_byte > ? "
+ "AND start_byte <= ? ORDER BY start_byte";
private final int startByte;
private final int endByte;
FetchLogsHandler(final int startByte, final int endByte) {
this.startByte = startByte;
this.endByte = endByte;
}
@Override
public LogData handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return null;
}
final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
do {
// int execId = rs.getInt(1);
// String name = rs.getString(2);
final int attempt = rs.getInt(3);
final EncodingType encType = EncodingType.fromInteger(rs.getInt(4));
final int startByte = rs.getInt(5);
final int endByte = rs.getInt(6);
final byte[] data = rs.getBytes(7);
final int offset =
this.startByte > startByte ? this.startByte - startByte : 0;
final int length =
this.endByte < endByte ? this.endByte - startByte - offset
: endByte - startByte - offset;
try {
byte[] buffer = data;
if (encType == EncodingType.GZIP) {
buffer = GZIPUtils.unGzipBytes(data);
}
byteStream.write(buffer, offset, length);
} catch (final IOException e) {
throw new SQLException(e);
}
} while (rs.next());
final byte[] buffer = byteStream.toByteArray();
final Pair<Integer, Integer> result =
FileIOUtils.getUtf8Range(buffer, 0, buffer.length);
return new LogData(this.startByte + result.getFirst(), result.getSecond(),
new String(buffer, result.getFirst(), result.getSecond(), StandardCharsets.UTF_8));
}
}
}
| 1 | 19,370 | This is not maintaining retention time for logs as we are deleting only 1000 entries and we might end up with huge number of rows spanning over multiple months over a period of time if the cluster generates more rows as we are restricting ourselves to delete only 24k rows/day. Like I pointed out earlier a better would be to probably run this clean up every hour with some batch_size in a loop while there are records to be deleted(i.e older than 12 weeks) and committing each batch. | azkaban-azkaban | java |
@@ -1319,10 +1319,10 @@ func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_ContinueAsNew_Re
// need to override the workflow retry policy
executionInfo := mutableState.executionInfo
executionInfo.HasRetryPolicy = true
- executionInfo.WorkflowExpirationTime = s.now.Add(1000 * time.Second)
+ executionInfo.WorkflowExpirationTime = timestamp.TimeNowPtrUtcAddSeconds(1000)
executionInfo.MaximumAttempts = 10
- executionInfo.InitialInterval = 1
- executionInfo.MaximumInterval = 1
+ executionInfo.InitialInterval = timestamp.DurationFromSeconds(1)
+ executionInfo.MaximumInterval = timestamp.DurationFromSeconds(1)
executionInfo.BackoffCoefficient = 1
di := addWorkflowTaskScheduledEvent(mutableState) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/pborman/uuid"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/uber-go/tally"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
taskqueuepb "go.temporal.io/api/taskqueue/v1"
"go.temporal.io/api/workflowservice/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/historyservice/v1"
"go.temporal.io/server/api/matchingservice/v1"
"go.temporal.io/server/api/matchingservicemock/v1"
"go.temporal.io/server/api/persistenceblobs/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/definition"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/mocks"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
)
type (
timerQueueActiveTaskExecutorSuite struct {
suite.Suite
*require.Assertions
controller *gomock.Controller
mockShard *shardContextTest
mockTxProcessor *MocktransferQueueProcessor
mockReplicationProcessor *MockReplicatorQueueProcessor
mockTimerProcessor *MocktimerQueueProcessor
mockNamespaceCache *cache.MockNamespaceCache
mockMatchingClient *matchingservicemock.MockMatchingServiceClient
mockClusterMetadata *cluster.MockMetadata
mockHistoryEngine *historyEngineImpl
mockExecutionMgr *mocks.ExecutionManager
mockHistoryV2Mgr *mocks.HistoryV2Manager
logger log.Logger
namespaceID string
namespaceEntry *cache.NamespaceCacheEntry
version int64
now time.Time
timeSource *clock.EventTimeSource
timerQueueActiveTaskExecutor *timerQueueActiveTaskExecutor
}
)
func TestTimerQueueActiveTaskExecutorSuite(t *testing.T) {
s := new(timerQueueActiveTaskExecutorSuite)
suite.Run(t, s)
}
func (s *timerQueueActiveTaskExecutorSuite) SetupSuite() {
}
func (s *timerQueueActiveTaskExecutorSuite) SetupTest() {
s.Assertions = require.New(s.T())
s.namespaceID = testNamespaceID
s.namespaceEntry = testGlobalNamespaceEntry
s.version = s.namespaceEntry.GetFailoverVersion()
s.now = time.Now().UTC()
s.timeSource = clock.NewEventTimeSource().Update(s.now)
s.controller = gomock.NewController(s.T())
s.mockTxProcessor = NewMocktransferQueueProcessor(s.controller)
s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.controller)
s.mockTimerProcessor = NewMocktimerQueueProcessor(s.controller)
s.mockTxProcessor.EXPECT().NotifyNewTask(gomock.Any(), gomock.Any()).AnyTimes()
s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes()
s.mockTimerProcessor.EXPECT().NotifyNewTimers(gomock.Any(), gomock.Any()).AnyTimes()
config := NewDynamicConfigForTest()
s.mockShard = newTestShardContext(
s.controller,
&persistence.ShardInfoWithFailover{
ShardInfo: &persistenceblobs.ShardInfo{
ShardId: 1,
RangeId: 1,
TransferAckLevel: 0,
}},
config,
)
s.mockShard.eventsCache = newEventsCache(s.mockShard)
s.mockShard.resource.TimeSource = s.timeSource
s.mockNamespaceCache = s.mockShard.resource.NamespaceCache
s.mockMatchingClient = s.mockShard.resource.MatchingClient
s.mockExecutionMgr = s.mockShard.resource.ExecutionMgr
s.mockHistoryV2Mgr = s.mockShard.resource.HistoryMgr
s.mockClusterMetadata = s.mockShard.resource.ClusterMetadata
// ack manager will use the namespace information
s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testGlobalNamespaceEntry, nil).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes()
s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()).AnyTimes()
s.logger = s.mockShard.GetLogger()
historyCache := newHistoryCache(s.mockShard)
h := &historyEngineImpl{
currentClusterName: s.mockShard.GetService().GetClusterMetadata().GetCurrentClusterName(),
shard: s.mockShard,
clusterMetadata: s.mockClusterMetadata,
executionManager: s.mockExecutionMgr,
historyCache: historyCache,
logger: s.logger,
tokenSerializer: common.NewProtoTaskTokenSerializer(),
metricsClient: s.mockShard.GetMetricsClient(),
historyEventNotifier: newHistoryEventNotifier(clock.NewRealTimeSource(), metrics.NewClient(tally.NoopScope, metrics.History), func(string, string) int { return 1 }),
txProcessor: s.mockTxProcessor,
replicatorProcessor: s.mockReplicationProcessor,
timerProcessor: s.mockTimerProcessor,
}
s.mockShard.SetEngine(h)
s.mockHistoryEngine = h
s.timerQueueActiveTaskExecutor = newTimerQueueActiveTaskExecutor(
s.mockShard,
h,
newTimerQueueActiveProcessor(
s.mockShard,
h,
s.mockMatchingClient,
newTaskAllocator(s.mockShard),
nil,
s.logger,
),
s.logger,
s.mockShard.GetMetricsClient(),
config,
).(*timerQueueActiveTaskExecutor)
}
func (s *timerQueueActiveTaskExecutorSuite) TearDownTest() {
s.controller.Finish()
s.mockShard.Finish(s.T())
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(
s.mockShard,
s.mockShard.GetEventsCache(),
s.logger,
s.version,
execution.GetRunId(),
)
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowRunTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
timerID := "timer"
timerTimeout := 2 * time.Second
event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextUserTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.UserTimerTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_USER_TIMER,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: event.EventId,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once()
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
_, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetUserTimerInfo(timerID)
s.False(ok)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessUserTimerTimeout_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(
s.mockShard,
s.mockShard.GetEventsCache(),
s.logger,
s.version,
execution.GetRunId(),
)
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
timerID := "timer"
timerTimeout := 2 * time.Second
event, _ = addTimerStartedEvent(mutableState, event.GetEventId(), timerID, timerTimeout)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextUserTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.UserTimerTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_USER_TIMER,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: event.EventId,
}
event = addTimerFiredEvent(mutableState, timerID)
// Flush buffered events so real IDs get assigned
mutableState.FlushBufferedEvents()
persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_NoRetryPolicy_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, _ := addActivityTaskScheduledEvent(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
timerTimeout,
)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextActivityTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.ActivityTimeoutTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: di.ScheduleID,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once()
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
_, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId())
s.False(ok)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_NoRetryPolicy_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
identity := "identity"
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, _ := addActivityTaskScheduledEvent(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
timerTimeout,
)
startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextActivityTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.ActivityTimeoutTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: di.ScheduleID,
}
completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), startedEvent.GetEventId(), nil, identity)
// Flush buffered events so real IDs get assigned
mutableState.FlushBufferedEvents()
persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Retry() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowRunTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowExecutionTimeout: timestamp.DurationPtr(999 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
identity := "identity"
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, _ := addActivityTaskScheduledEventWithRetry(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
999*time.Second,
timerTimeout,
timerTimeout,
timerTimeout,
&commonpb.RetryPolicy{
InitialInterval: timestamp.DurationPtr(1 * time.Second),
BackoffCoefficient: 1.2,
MaximumInterval: timestamp.DurationPtr(5 * time.Second),
MaximumAttempts: 5,
NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "},
},
)
startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity)
s.Nil(startedEvent)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextActivityTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.ActivityTimeoutTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: di.ScheduleID,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
activityInfo, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId())
s.True(ok)
s.Equal(scheduledEvent.GetEventId(), activityInfo.ScheduleId)
s.Equal(common.EmptyEventID, activityInfo.StartedId)
// only a schedule to start timer will be created, apart from the retry timer
s.Equal(int32(timerTaskStatusCreatedScheduleToStart), activityInfo.TimerTaskStatus)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, _ := addActivityTaskScheduledEventWithRetry(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
timerTimeout,
&commonpb.RetryPolicy{
InitialInterval: timestamp.DurationPtr(1 * time.Second),
BackoffCoefficient: 1.2,
MaximumInterval: timestamp.DurationPtr(5 * time.Second),
MaximumAttempts: 5,
NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "},
},
)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextActivityTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.ActivityTimeoutTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: di.ScheduleID,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once()
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
_, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetActivityInfo(scheduledEvent.GetEventId())
s.False(ok)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_RetryPolicy_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
identity := "identity"
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, _ := addActivityTaskScheduledEventWithRetry(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
timerTimeout,
&commonpb.RetryPolicy{
InitialInterval: timestamp.DurationPtr(1 * time.Second),
BackoffCoefficient: 1.2,
MaximumInterval: timestamp.DurationPtr(5 * time.Second),
MaximumAttempts: 5,
NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "},
},
)
startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity)
s.Nil(startedEvent)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextActivityTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
protoTaskTime := task.(*persistence.ActivityTimeoutTask).GetVisibilityTimestamp()
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: di.ScheduleID,
}
completeEvent := addActivityTaskCompletedEvent(mutableState, scheduledEvent.GetEventId(), common.TransientEventID, nil, identity)
// Flush buffered events so real IDs get assigned
mutableState.FlushBufferedEvents()
persistenceMutableState := s.createPersistenceMutableState(mutableState, completeEvent.GetEventId(), completeEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.timeSource.Update(s.now.Add(2 * timerTimeout))
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestProcessActivityTimeout_Heartbeat_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
identity := "identity"
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
heartbeatTimerTimeout := time.Second
scheduledEvent, _ := addActivityTaskScheduledEventWithRetry(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
heartbeatTimerTimeout,
&commonpb.RetryPolicy{
InitialInterval: timestamp.DurationPtr(1 * time.Second),
BackoffCoefficient: 1.2,
MaximumInterval: timestamp.DurationPtr(5 * time.Second),
MaximumAttempts: 5,
NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "},
},
)
startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity)
s.Nil(startedEvent)
timerSequence := newTimerSequence(s.timeSource, mutableState)
mutableState.insertTimerTasks = nil
modified, err := timerSequence.createNextActivityTimer()
s.NoError(err)
s.True(modified)
task := mutableState.insertTimerTasks[0]
s.Equal(enumspb.TIMEOUT_TYPE_HEARTBEAT, task.(*persistence.ActivityTimeoutTask).TimeoutType)
protoTaskTime := task.(*persistence.ActivityTimeoutTask).GetVisibilityTimestamp().Add(-time.Second)
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT,
VisibilityTime: &protoTaskTime,
EventId: scheduledEvent.GetEventId(),
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTaskTimeout_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
startedEvent := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
protoTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_TASK_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTime,
EventId: di.ScheduleID,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once()
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once()
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
workflowTask, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetPendingWorkflowTask()
s.True(ok)
s.True(workflowTask.ScheduleID != common.EmptyEventID)
s.Equal(common.EmptyEventID, workflowTask.StartedID)
s.Equal(int32(2), workflowTask.Attempt)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTaskTimeout_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
startedEvent := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
protoTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_TASK_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTime,
EventId: di.ScheduleID - 1,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, startedEvent.GetEventId(), startedEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowBackoffTimer_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
event, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_BACKOFF_TIMER,
WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY,
VisibilityTime: &protoTaskTime,
EventId: 0,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once()
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once()
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
workflowTask, ok := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetPendingWorkflowTask()
s.True(ok)
s.True(workflowTask.ScheduleID != common.EmptyEventID)
s.Equal(common.EmptyEventID, workflowTask.StartedID)
s.Equal(int32(1), workflowTask.Attempt)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowBackoffTimer_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_BACKOFF_TIMER,
WorkflowBackoffType: enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY,
VisibilityTime: &protoTaskTime,
EventId: 0,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestActivityRetryTimer_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{
Name: taskQueueName,
Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, activityInfo := addActivityTaskScheduledEventWithRetry(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
timerTimeout,
&commonpb.RetryPolicy{
InitialInterval: timestamp.DurationPtr(1 * time.Second),
BackoffCoefficient: 1.2,
MaximumInterval: timestamp.DurationPtr(5 * time.Second),
MaximumAttempts: 5,
NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "},
},
)
activityInfo.Attempt = 1
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_RETRY_TIMER,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: activityInfo.ScheduleId,
ScheduleAttempt: activityInfo.Attempt,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockMatchingClient.EXPECT().AddActivityTask(
gomock.Any(),
&matchingservice.AddActivityTaskRequest{
NamespaceId: activityInfo.NamespaceId,
SourceNamespaceId: activityInfo.NamespaceId,
Execution: &execution,
TaskQueue: &taskqueuepb.TaskQueue{
Name: activityInfo.TaskQueue,
Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
},
ScheduleId: activityInfo.ScheduleId,
ScheduleToStartTimeout: activityInfo.ScheduleToStartTimeout,
},
).Return(&matchingservice.AddActivityTaskResponse{}, nil).Times(1)
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestActivityRetryTimer_Noop() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
event := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = event.GetEventId()
event = addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
identity := "identity"
taskqueue := "taskqueue"
activityID := "activity"
activityType := "activity type"
timerTimeout := 2 * time.Second
scheduledEvent, activityInfo := addActivityTaskScheduledEventWithRetry(
mutableState,
event.GetEventId(),
activityID,
activityType,
taskqueue,
nil,
timerTimeout,
timerTimeout,
timerTimeout,
timerTimeout,
&commonpb.RetryPolicy{
InitialInterval: timestamp.DurationPtr(1 * time.Second),
BackoffCoefficient: 1.2,
MaximumInterval: timestamp.DurationPtr(5 * time.Second),
MaximumAttempts: 5,
NonRetryableErrorTypes: []string{"(╯' - ')╯ ┻━┻ "},
},
)
startedEvent := addActivityTaskStartedEvent(mutableState, scheduledEvent.GetEventId(), identity)
s.Nil(startedEvent)
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_ACTIVITY_RETRY_TIMER,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
EventId: activityInfo.ScheduleId,
ScheduleAttempt: activityInfo.Attempt,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, scheduledEvent.GetEventId(), scheduledEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_Fire() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)),
},
)
s.Nil(err)
di := addWorkflowTaskScheduledEvent(mutableState)
startEvent := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = startEvent.GetEventId()
completionEvent := addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once()
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
running := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).IsWorkflowExecutionRunning()
s.False(running)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_ContinueAsNew_Retry() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowExecutionTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)),
},
)
s.Nil(err)
// need to override the workflow retry policy
executionInfo := mutableState.executionInfo
executionInfo.HasRetryPolicy = true
executionInfo.WorkflowExpirationTime = s.now.Add(1000 * time.Second)
executionInfo.MaximumAttempts = 10
executionInfo.InitialInterval = 1
executionInfo.MaximumInterval = 1
executionInfo.BackoffCoefficient = 1
di := addWorkflowTaskScheduledEvent(mutableState)
startEvent := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = startEvent.GetEventId()
completionEvent := addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
// one for current workflow, one for new
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Times(2)
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus()
s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state)
s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, status)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_ContinueAsNew_Cron() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
Attempt: 1,
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowRunTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(10 * time.Second)),
},
)
s.Nil(err)
executionInfo := mutableState.executionInfo
executionInfo.StartTimestamp = s.now
executionInfo.CronSchedule = "* * * * *"
di := addWorkflowTaskScheduledEvent(mutableState)
startEvent := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = startEvent.GetEventId()
completionEvent := addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
ScheduleAttempt: 1,
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
// one for current workflow, one for new
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Times(2)
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus()
s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state)
s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, status)
}
func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowTimeout_WorkflowExpired() {
execution := commonpb.WorkflowExecution{
WorkflowId: "some random workflow ID",
RunId: uuid.New(),
}
workflowType := "some random workflow type"
taskQueueName := "some random task queue"
mutableState := newMutableStateBuilderWithVersionHistoriesForTest(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId())
_, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
&historyservice.StartWorkflowExecutionRequest{
NamespaceId: s.namespaceID,
StartRequest: &workflowservice.StartWorkflowExecutionRequest{
WorkflowType: &commonpb.WorkflowType{Name: workflowType},
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueueName},
WorkflowRunTimeout: timestamp.DurationPtr(2 * time.Second),
WorkflowTaskTimeout: timestamp.DurationPtr(1 * time.Second),
},
WorkflowExecutionExpirationTime: timestamp.TimePtr(s.now.Add(-1 * time.Second)),
},
)
s.Nil(err)
executionInfo := mutableState.executionInfo
executionInfo.StartTimestamp = s.now
executionInfo.CronSchedule = "* * * * *"
di := addWorkflowTaskScheduledEvent(mutableState)
startEvent := addWorkflowTaskStartedEvent(mutableState, di.ScheduleID, taskQueueName, uuid.New())
di.StartedID = startEvent.GetEventId()
completionEvent := addWorkflowTaskCompletedEvent(mutableState, di.ScheduleID, di.StartedID, "some random identity")
protoTaskTime := s.now
s.NoError(err)
timerTask := &persistenceblobs.TimerTaskInfo{
Version: s.version,
NamespaceId: s.namespaceID,
WorkflowId: execution.GetWorkflowId(),
RunId: execution.GetRunId(),
TaskId: int64(100),
TaskType: enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT,
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
VisibilityTime: &protoTaskTime,
}
persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion())
s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil)
s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Times(1)
s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once()
err = s.timerQueueActiveTaskExecutor.execute(timerTask, true)
s.NoError(err)
state, status := s.getMutableStateFromCache(s.namespaceID, execution.GetWorkflowId(), execution.GetRunId()).GetWorkflowStateStatus()
s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, state)
s.EqualValues(enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, status)
}
func (s *timerQueueActiveTaskExecutorSuite) createPersistenceMutableState(
ms mutableState,
lastEventID int64,
lastEventVersion int64,
) *persistence.WorkflowMutableState {
if ms.GetVersionHistories() != nil {
currentVersionHistory, err := ms.GetVersionHistories().GetCurrentVersionHistory()
s.NoError(err)
err = currentVersionHistory.AddOrUpdateItem(persistence.NewVersionHistoryItem(
lastEventID, lastEventVersion,
))
s.NoError(err)
}
return createMutableState(ms)
}
func (s *timerQueueActiveTaskExecutorSuite) getMutableStateFromCache(
namespaceID string,
workflowID string,
runID string,
) mutableState {
return s.mockHistoryEngine.historyCache.Get(
definition.NewWorkflowIdentifier(namespaceID, workflowID, runID),
).(*workflowExecutionContextImpl).mutableState
}
| 1 | 10,197 | I don't like this helper func :-). Yeah, definitely don't like. | temporalio-temporal | go |
@@ -18,17 +18,7 @@ describe( 'PageSpeed Insights Activation', () => {
beforeAll( async () => {
await page.setRequestInterception( true );
useRequestInterception( ( request ) => {
- if ( request.url().match( 'google-site-kit/v1/data/' ) ) {
- request.respond( { status: 200 } );
- } else if (
- request
- .url()
- .match(
- 'google-site-kit/v1/modules/pagespeed-insights/data/pagespeed'
- )
- ) {
- request.respond( { status: 200, body: JSON.stringify( {} ) } );
- } else if (
+ if (
request
.url()
.match( | 1 | /**
* WordPress dependencies
*/
import { visitAdminPage, activatePlugin } from '@wordpress/e2e-test-utils';
/**
* Internal dependencies
*/
import {
deactivateUtilityPlugins,
resetSiteKit,
setSearchConsoleProperty,
setSiteVerification,
useRequestInterception,
} from '../../../utils';
describe( 'PageSpeed Insights Activation', () => {
beforeAll( async () => {
await page.setRequestInterception( true );
useRequestInterception( ( request ) => {
if ( request.url().match( 'google-site-kit/v1/data/' ) ) {
request.respond( { status: 200 } );
} else if (
request
.url()
.match(
'google-site-kit/v1/modules/pagespeed-insights/data/pagespeed'
)
) {
request.respond( { status: 200, body: JSON.stringify( {} ) } );
} else if (
request
.url()
.match(
'google-site-kit/v1/modules/search-console/data/searchanalytics'
)
) {
request.respond( { status: 200, body: JSON.stringify( {} ) } );
} else {
request.continue();
}
} );
} );
beforeEach( async () => {
await activatePlugin( 'e2e-tests-proxy-auth-plugin' );
await setSiteVerification();
await setSearchConsoleProperty();
} );
afterEach( async () => {
await deactivateUtilityPlugins();
await resetSiteKit();
} );
it( 'leads you to the Site Kit dashboard after activation via CTA', async () => {
await visitAdminPage( 'admin.php', 'page=googlesitekit-dashboard' );
await expect( page ).toClick( '.googlesitekit-cta-link', {
text: /Set up PageSpeed Insights/i,
} );
await page.waitForSelector(
'.googlesitekit-publisher-win--win-success'
);
await expect( page ).toMatchElement(
'.googlesitekit-publisher-win__title',
{
text: /Congrats on completing the setup for PageSpeed Insights!/i,
}
);
} );
it( 'leads you to the Site Kit dashboard after activation via the settings page', async () => {
await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' );
await page.waitForSelector( '.mdc-tab-bar' );
await expect( page ).toClick( '.mdc-tab', {
text: /connect more services/i,
} );
await page.waitForSelector(
'.googlesitekit-settings-connect-module--pagespeed-insights'
);
await expect( page ).toClick( '.googlesitekit-cta-link', {
text: /Set up PageSpeed Insights/i,
} );
await page.waitForSelector(
'.googlesitekit-publisher-win--win-success'
);
await expect( page ).toMatchElement(
'.googlesitekit-publisher-win__title',
{
text: /Congrats on completing the setup for PageSpeed Insights!/i,
}
);
} );
} );
| 1 | 40,663 | Same here. This should remain unchanged. Please, add it back. | google-site-kit-wp | js |
@@ -2052,6 +2052,9 @@ Collection.prototype.mapReduce = function(map, reduce, options, callback) {
*/
Collection.prototype.initializeUnorderedBulkOp = function(options) {
options = options || {};
+ if (this.s.options.ignoreUndefined) {
+ options.ignoreUndefined = this.s.options.ignoreUndefined;
+ }
options.promiseLibrary = this.s.promiseLibrary;
return unordered(this.s.topology, this, options);
}; | 1 | 'use strict';
const deprecate = require('util').deprecate;
const deprecateOptions = require('./utils').deprecateOptions;
const checkCollectionName = require('./utils').checkCollectionName;
const ObjectID = require('mongodb-core').BSON.ObjectID;
const AggregationCursor = require('./aggregation_cursor');
const MongoError = require('mongodb-core').MongoError;
const toError = require('./utils').toError;
const normalizeHintField = require('./utils').normalizeHintField;
const handleCallback = require('./utils').handleCallback;
const decorateCommand = require('./utils').decorateCommand;
const decorateWithCollation = require('./utils').decorateWithCollation;
const decorateWithReadConcern = require('./utils').decorateWithReadConcern;
const formattedOrderClause = require('./utils').formattedOrderClause;
const ReadPreference = require('mongodb-core').ReadPreference;
const CommandCursor = require('./command_cursor');
const unordered = require('./bulk/unordered');
const ordered = require('./bulk/ordered');
const ChangeStream = require('./change_stream');
const executeOperation = require('./utils').executeOperation;
const applyWriteConcern = require('./utils').applyWriteConcern;
const resolveReadPreference = require('./utils').resolveReadPreference;
// Operations
const bulkWrite = require('./operations/collection_ops').bulkWrite;
const checkForAtomicOperators = require('./operations/collection_ops').checkForAtomicOperators;
const count = require('./operations/collection_ops').count;
const countDocuments = require('./operations/collection_ops').countDocuments;
const createIndex = require('./operations/collection_ops').createIndex;
const createIndexes = require('./operations/collection_ops').createIndexes;
const deleteMany = require('./operations/collection_ops').deleteMany;
const deleteOne = require('./operations/collection_ops').deleteOne;
const distinct = require('./operations/collection_ops').distinct;
const dropIndex = require('./operations/collection_ops').dropIndex;
const dropIndexes = require('./operations/collection_ops').dropIndexes;
const ensureIndex = require('./operations/collection_ops').ensureIndex;
const findAndModify = require('./operations/collection_ops').findAndModify;
const findAndRemove = require('./operations/collection_ops').findAndRemove;
const findOne = require('./operations/collection_ops').findOne;
const findOneAndDelete = require('./operations/collection_ops').findOneAndDelete;
const findOneAndReplace = require('./operations/collection_ops').findOneAndReplace;
const findOneAndUpdate = require('./operations/collection_ops').findOneAndUpdate;
const geoHaystackSearch = require('./operations/collection_ops').geoHaystackSearch;
const group = require('./operations/collection_ops').group;
const indexes = require('./operations/collection_ops').indexes;
const indexExists = require('./operations/collection_ops').indexExists;
const indexInformation = require('./operations/collection_ops').indexInformation;
const insertOne = require('./operations/collection_ops').insertOne;
const isCapped = require('./operations/collection_ops').isCapped;
const mapReduce = require('./operations/collection_ops').mapReduce;
const optionsOp = require('./operations/collection_ops').optionsOp;
const parallelCollectionScan = require('./operations/collection_ops').parallelCollectionScan;
const prepareDocs = require('./operations/collection_ops').prepareDocs;
const reIndex = require('./operations/collection_ops').reIndex;
const removeDocuments = require('./operations/collection_ops').removeDocuments;
const rename = require('./operations/collection_ops').rename;
const replaceOne = require('./operations/collection_ops').replaceOne;
const save = require('./operations/collection_ops').save;
const stats = require('./operations/collection_ops').stats;
const updateDocuments = require('./operations/collection_ops').updateDocuments;
const updateMany = require('./operations/collection_ops').updateMany;
const updateOne = require('./operations/collection_ops').updateOne;
/**
* @fileOverview The **Collection** class is an internal class that embodies a MongoDB collection
* allowing for insert/update/remove/find and other command operation on that MongoDB collection.
*
* **COLLECTION Cannot directly be instantiated**
* @example
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, client) {
* // Create a collection we want to drop later
* const col = client.db(dbName).collection('createIndexExample1');
* // Show that duplicate records got dropped
* col.find({}).toArray(function(err, items) {
* test.equal(null, err);
* test.equal(4, items.length);
* client.close();
* });
* });
*/
const mergeKeys = ['ignoreUndefined'];
/**
* Create a new Collection instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {string} collectionName Get the collection name.
* @property {string} namespace Get the full collection namespace.
* @property {object} writeConcern The current write concern values.
* @property {object} readConcern The current read concern values.
* @property {object} hint Get current index hint for collection.
* @return {Collection} a Collection instance.
*/
function Collection(db, topology, dbName, name, pkFactory, options) {
checkCollectionName(name);
// Unpack variables
const internalHint = null;
const slaveOk = options == null || options.slaveOk == null ? db.slaveOk : options.slaveOk;
const serializeFunctions =
options == null || options.serializeFunctions == null
? db.s.options.serializeFunctions
: options.serializeFunctions;
const raw = options == null || options.raw == null ? db.s.options.raw : options.raw;
const promoteLongs =
options == null || options.promoteLongs == null
? db.s.options.promoteLongs
: options.promoteLongs;
const promoteValues =
options == null || options.promoteValues == null
? db.s.options.promoteValues
: options.promoteValues;
const promoteBuffers =
options == null || options.promoteBuffers == null
? db.s.options.promoteBuffers
: options.promoteBuffers;
let readPreference = null;
const collectionHint = null;
const namespace = `${dbName}.${name}`;
// Get the promiseLibrary
const promiseLibrary = options.promiseLibrary || Promise;
// Assign the right collection level readPreference
if (options && options.readPreference) {
readPreference = options.readPreference;
} else if (db.options.readPreference) {
readPreference = db.options.readPreference;
}
// Set custom primary key factory if provided
pkFactory = pkFactory == null ? ObjectID : pkFactory;
// Internal state
this.s = {
// Set custom primary key factory if provided
pkFactory: pkFactory,
// Db
db: db,
// Topology
topology: topology,
// dbName
dbName: dbName,
// Options
options: options,
// Namespace
namespace: namespace,
// Read preference
readPreference: readPreference,
// SlaveOK
slaveOk: slaveOk,
// Serialize functions
serializeFunctions: serializeFunctions,
// Raw
raw: raw,
// promoteLongs
promoteLongs: promoteLongs,
// promoteValues
promoteValues: promoteValues,
// promoteBuffers
promoteBuffers: promoteBuffers,
// internalHint
internalHint: internalHint,
// collectionHint
collectionHint: collectionHint,
// Name
name: name,
// Promise library
promiseLibrary: promiseLibrary,
// Read Concern
readConcern: options.readConcern,
// Write Concern
writeConcern: options.writeConcern
};
}
Object.defineProperty(Collection.prototype, 'dbName', {
enumerable: true,
get: function() {
return this.s.dbName;
}
});
Object.defineProperty(Collection.prototype, 'collectionName', {
enumerable: true,
get: function() {
return this.s.name;
}
});
Object.defineProperty(Collection.prototype, 'namespace', {
enumerable: true,
get: function() {
return this.s.namespace;
}
});
Object.defineProperty(Collection.prototype, 'readConcern', {
enumerable: true,
get: function() {
return this.s.readConcern || { level: 'local' };
}
});
Object.defineProperty(Collection.prototype, 'writeConcern', {
enumerable: true,
get: function() {
let ops = {};
if (this.s.writeConcern) {
return this.s.writeConcern;
}
if (this.s.options.w != null) ops.w = this.s.options.w;
if (this.s.options.j != null) ops.j = this.s.options.j;
if (this.s.options.fsync != null) ops.fsync = this.s.options.fsync;
if (this.s.options.wtimeout != null) ops.wtimeout = this.s.options.wtimeout;
return ops;
}
});
/**
* @ignore
*/
Object.defineProperty(Collection.prototype, 'hint', {
enumerable: true,
get: function() {
return this.s.collectionHint;
},
set: function(v) {
this.s.collectionHint = normalizeHintField(v);
}
});
const DEPRECATED_FIND_OPTIONS = ['maxScan', 'fields', 'snapshot'];
/**
* Creates a cursor for a query that can be used to iterate over results from MongoDB
* @method
* @param {object} [query={}] The cursor query object.
* @param {object} [options] Optional settings.
* @param {number} [options.limit=0] Sets the limit of documents returned in the query.
* @param {(array|object)} [options.sort] Set to sort the documents coming back from the query. Array of indexes, [['a', 1]] etc.
* @param {object} [options.projection] The fields to return in the query. Object of fields to include or exclude (not both), {'a':1}
* @param {object} [options.fields] **Deprecated** Use `options.projection` instead
* @param {number} [options.skip=0] Set to skip N documents ahead in your query (useful for pagination).
* @param {Object} [options.hint] Tell the query to use specific indexes in the query. Object of indexes to use, {'_id':1}
* @param {boolean} [options.explain=false] Explain the query instead of returning the data.
* @param {boolean} [options.snapshot=false] DEPRECATED: Snapshot query.
* @param {boolean} [options.timeout=false] Specify if the cursor can timeout.
* @param {boolean} [options.tailable=false] Specify if the cursor is tailable.
* @param {number} [options.batchSize=0] Set the batchSize for the getMoreCommand when iterating over the query results.
* @param {boolean} [options.returnKey=false] Only return the index key.
* @param {number} [options.maxScan] DEPRECATED: Limit the number of items to scan.
* @param {number} [options.min] Set index bounds.
* @param {number} [options.max] Set index bounds.
* @param {boolean} [options.showDiskLoc=false] Show disk location of results.
* @param {string} [options.comment] You can put a $comment field on a query to make looking in the profiler logs simpler.
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers.
* @param {boolean} [options.promoteLongs=true] Promotes Long values to number if they fit inside the 53 bits resolution.
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {boolean} [options.partial=false] Specify if the cursor should return partial results when querying against a sharded system
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @throws {MongoError}
* @return {Cursor}
*/
Collection.prototype.find = deprecateOptions(
{
name: 'collection.find',
deprecatedOptions: DEPRECATED_FIND_OPTIONS,
optionsIndex: 1
},
function(query, options, callback) {
if (typeof callback === 'object') {
// TODO(MAJOR): throw in the future
console.warn('Third parameter to `find()` must be a callback or undefined');
}
let selector = query;
// figuring out arguments
if (typeof callback !== 'function') {
if (typeof options === 'function') {
callback = options;
options = undefined;
} else if (options == null) {
callback = typeof selector === 'function' ? selector : undefined;
selector = typeof selector === 'object' ? selector : undefined;
}
}
// Ensure selector is not null
selector = selector == null ? {} : selector;
// Validate correctness off the selector
const object = selector;
if (Buffer.isBuffer(object)) {
const object_size = object[0] | (object[1] << 8) | (object[2] << 16) | (object[3] << 24);
if (object_size !== object.length) {
const error = new Error(
'query selector raw message size does not match message header size [' +
object.length +
'] != [' +
object_size +
']'
);
error.name = 'MongoError';
throw error;
}
}
// Check special case where we are using an objectId
if (selector != null && selector._bsontype === 'ObjectID') {
selector = { _id: selector };
}
if (!options) options = {};
let projection = options.projection || options.fields;
if (projection && !Buffer.isBuffer(projection) && Array.isArray(projection)) {
projection = projection.length
? projection.reduce((result, field) => {
result[field] = 1;
return result;
}, {})
: { _id: 1 };
}
// Make a shallow copy of options
let newOptions = Object.assign({}, options);
// Make a shallow copy of the collection options
for (let key in this.s.options) {
if (mergeKeys.indexOf(key) !== -1) {
newOptions[key] = this.s.options[key];
}
}
// Unpack options
newOptions.skip = options.skip ? options.skip : 0;
newOptions.limit = options.limit ? options.limit : 0;
newOptions.raw = typeof options.raw === 'boolean' ? options.raw : this.s.raw;
newOptions.hint =
options.hint != null ? normalizeHintField(options.hint) : this.s.collectionHint;
newOptions.timeout = typeof options.timeout === 'undefined' ? undefined : options.timeout;
// // If we have overridden slaveOk otherwise use the default db setting
newOptions.slaveOk = options.slaveOk != null ? options.slaveOk : this.s.db.slaveOk;
// Add read preference if needed
newOptions.readPreference = resolveReadPreference(newOptions, {
db: this.s.db,
collection: this
});
// Set slave ok to true if read preference different from primary
if (
newOptions.readPreference != null &&
(newOptions.readPreference !== 'primary' || newOptions.readPreference.mode !== 'primary')
) {
newOptions.slaveOk = true;
}
// Ensure the query is an object
if (selector != null && typeof selector !== 'object') {
throw MongoError.create({ message: 'query selector must be an object', driver: true });
}
// Build the find command
const findCommand = {
find: this.s.namespace,
limit: newOptions.limit,
skip: newOptions.skip,
query: selector
};
// Ensure we use the right await data option
if (typeof newOptions.awaitdata === 'boolean') {
newOptions.awaitData = newOptions.awaitdata;
}
// Translate to new command option noCursorTimeout
if (typeof newOptions.timeout === 'boolean') newOptions.noCursorTimeout = newOptions.timeout;
decorateCommand(findCommand, newOptions, ['session', 'collation']);
if (projection) findCommand.fields = projection;
// Add db object to the new options
newOptions.db = this.s.db;
// Add the promise library
newOptions.promiseLibrary = this.s.promiseLibrary;
// Set raw if available at collection level
if (newOptions.raw == null && typeof this.s.raw === 'boolean') newOptions.raw = this.s.raw;
// Set promoteLongs if available at collection level
if (newOptions.promoteLongs == null && typeof this.s.promoteLongs === 'boolean')
newOptions.promoteLongs = this.s.promoteLongs;
if (newOptions.promoteValues == null && typeof this.s.promoteValues === 'boolean')
newOptions.promoteValues = this.s.promoteValues;
if (newOptions.promoteBuffers == null && typeof this.s.promoteBuffers === 'boolean')
newOptions.promoteBuffers = this.s.promoteBuffers;
// Sort options
if (findCommand.sort) {
findCommand.sort = formattedOrderClause(findCommand.sort);
}
// Set the readConcern
decorateWithReadConcern(findCommand, this, options);
// Decorate find command with collation options
try {
decorateWithCollation(findCommand, this, options);
} catch (err) {
if (typeof callback === 'function') return callback(err, null);
throw err;
}
const cursor = this.s.topology.cursor(this.s.namespace, findCommand, newOptions);
return typeof callback === 'function' ? handleCallback(callback, null, cursor) : cursor;
}
);
/**
* Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {object} doc Document to insert.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~insertOneWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.insertOne = function(doc, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, insertOne, [this, doc, options, callback]);
};
function mapInsertManyResults(docs, r) {
const finalResult = {
result: { ok: 1, n: r.insertedCount },
ops: docs,
insertedCount: r.insertedCount,
insertedIds: r.insertedIds
};
if (r.getLastOp()) {
finalResult.result.opTime = r.getLastOp();
}
return finalResult;
}
/**
* Inserts an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {object[]} docs Documents to insert.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {boolean} [options.ordered=true] If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~insertWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.insertMany = function(docs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options ? Object.assign({}, options) : { ordered: true };
if (!Array.isArray(docs) && typeof callback === 'function') {
return callback(
MongoError.create({ message: 'docs parameter must be an array of documents', driver: true })
);
} else if (!Array.isArray(docs)) {
return new this.s.promiseLibrary((resolve, reject) => {
reject(
MongoError.create({ message: 'docs parameter must be an array of documents', driver: true })
);
});
}
// If keep going set unordered
options['serializeFunctions'] = options['serializeFunctions'] || this.s.serializeFunctions;
docs = prepareDocs(this, docs, options);
// Generate the bulk write operations
const operations = [
{
insertMany: docs
}
];
return executeOperation(this.s.topology, bulkWrite, [this, operations, options, callback], {
resultMutator: result => mapInsertManyResults(docs, result)
});
};
/**
* @typedef {Object} Collection~BulkWriteOpResult
* @property {number} insertedCount Number of documents inserted.
* @property {number} matchedCount Number of documents matched for update.
* @property {number} modifiedCount Number of documents modified.
* @property {number} deletedCount Number of documents deleted.
* @property {number} upsertedCount Number of documents upserted.
* @property {object} insertedIds Inserted document generated Id's, hash key is the index of the originating operation
* @property {object} upsertedIds Upserted document generated Id's, hash key is the index of the originating operation
* @property {object} result The command result object.
*/
/**
* The callback format for inserts
* @callback Collection~bulkWriteOpCallback
* @param {BulkWriteError} error An error instance representing the error during the execution.
* @param {Collection~BulkWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Perform a bulkWrite operation without a fluent API
*
* Legal operation types are
*
* { insertOne: { document: { a: 1 } } }
*
* { updateOne: { filter: {a:2}, update: {$set: {a:2}}, upsert:true } }
*
* { updateMany: { filter: {a:2}, update: {$set: {a:2}}, upsert:true } }
*
* { deleteOne: { filter: {c:1} } }
*
* { deleteMany: { filter: {c:1} } }
*
* { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true}}
*
* If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {object[]} operations Bulk operations to perform.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.ordered=true] Execute write operation in ordered or unordered fashion.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~bulkWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.bulkWrite = function(operations, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || { ordered: true };
if (!Array.isArray(operations)) {
throw MongoError.create({ message: 'operations must be an array of documents', driver: true });
}
return executeOperation(this.s.topology, bulkWrite, [this, operations, options, callback]);
};
/**
* @typedef {Object} Collection~WriteOpResult
* @property {object[]} ops All the documents inserted using insertOne/insertMany/replaceOne. Documents contain the _id field if forceServerObjectId == false for insertOne/insertMany
* @property {object} connection The connection object used for the operation.
* @property {object} result The command result object.
*/
/**
* The callback format for inserts
* @callback Collection~writeOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~WriteOpResult} result The result object if the command was executed successfully.
*/
/**
* @typedef {Object} Collection~insertWriteOpResult
* @property {Number} insertedCount The total amount of documents inserted.
* @property {object[]} ops All the documents inserted using insertOne/insertMany/replaceOne. Documents contain the _id field if forceServerObjectId == false for insertOne/insertMany
* @property {Object.<Number, ObjectId>} insertedIds Map of the index of the inserted document to the id of the inserted document.
* @property {object} connection The connection object used for the operation.
* @property {object} result The raw command result object returned from MongoDB (content might vary by server version).
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents inserted.
*/
/**
* @typedef {Object} Collection~insertOneWriteOpResult
* @property {Number} insertedCount The total amount of documents inserted.
* @property {object[]} ops All the documents inserted using insertOne/insertMany/replaceOne. Documents contain the _id field if forceServerObjectId == false for insertOne/insertMany
* @property {ObjectId} insertedId The driver generated ObjectId for the insert operation.
* @property {object} connection The connection object used for the operation.
* @property {object} result The raw command result object returned from MongoDB (content might vary by server version).
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents inserted.
*/
/**
* The callback format for inserts
* @callback Collection~insertWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~insertWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* The callback format for inserts
* @callback Collection~insertOneWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~insertOneWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Inserts a single document or a an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {(object|object[])} docs Documents to insert.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~insertWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated Use insertOne, insertMany or bulkWrite
*/
Collection.prototype.insert = deprecate(function(docs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || { ordered: false };
docs = !Array.isArray(docs) ? [docs] : docs;
if (options.keepGoing === true) {
options.ordered = false;
}
return this.insertMany(docs, options, callback);
}, 'collection.insert is deprecated. Use insertOne, insertMany or bulkWrite instead.');
/**
* @typedef {Object} Collection~updateWriteOpResult
* @property {Object} result The raw result returned from MongoDB. Will vary depending on server version.
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents scanned.
* @property {Number} result.nModified The total count of documents modified.
* @property {Object} connection The connection object used for the operation.
* @property {Number} matchedCount The number of documents that matched the filter.
* @property {Number} modifiedCount The number of documents that were modified.
* @property {Number} upsertedCount The number of documents upserted.
* @property {Object} upsertedId The upserted id.
* @property {ObjectId} upsertedId._id The upserted _id returned from the server.
* @property {Object} message
* @property {Array} ops
*/
/**
* The callback format for inserts
* @callback Collection~updateWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~updateWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Update a single document in a collection
* @method
* @param {object} filter The Filter used to select the document to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options] Optional settings.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {Array} [options.arrayFilters] optional list of array filters referenced in filtered positional operators
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.updateOne = function(filter, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
const err = checkForAtomicOperators(update);
if (err) {
if (typeof callback === 'function') return callback(err);
return this.s.promiseLibrary.reject(err);
}
options = Object.assign({}, options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, updateOne, [this, filter, update, options, callback]);
};
/**
* Replace a document in a collection with another document
* @method
* @param {object} filter The Filter used to select the document to replace
* @param {object} doc The Document that replaces the matching document
* @param {object} [options] Optional settings.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
* @return {Promise<Collection~updatewriteOpResultObject>} returns Promise if no callback passed
*/
Collection.prototype.replaceOne = function(filter, doc, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, replaceOne, [this, filter, doc, options, callback]);
};
/**
* Update multiple documents in a collection
* @method
* @param {object} filter The Filter used to select the documents to update
* @param {object} update The update operations to be applied to the documents
* @param {object} [options] Optional settings.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {Array} [options.arrayFilters] optional list of array filters referenced in filtered positional operators
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
* @return {Promise<Collection~updateWriteOpResultObject>} returns Promise if no callback passed
*/
Collection.prototype.updateMany = function(filter, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
const err = checkForAtomicOperators(update);
if (err) {
if (typeof callback === 'function') return callback(err);
return this.s.promiseLibrary.reject(err);
}
options = Object.assign({}, options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, updateMany, [this, filter, update, options, callback]);
};
/**
* Updates documents.
* @method
* @param {object} selector The selector for the update operation.
* @param {object} document The update document.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {boolean} [options.multi=false] Update one/all documents with operation.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {Array} [options.arrayFilters] optional list of array filters referenced in filtered positional operators
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~writeOpCallback} [callback] The command result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
* @deprecated use updateOne, updateMany or bulkWrite
*/
Collection.prototype.update = deprecate(function(selector, document, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, updateDocuments, [
this,
selector,
document,
options,
callback
]);
}, 'collection.update is deprecated. Use updateOne, updateMany, or bulkWrite instead.');
/**
* @typedef {Object} Collection~deleteWriteOpResult
* @property {Object} result The raw result returned from MongoDB. Will vary depending on server version.
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents deleted.
* @property {Object} connection The connection object used for the operation.
* @property {Number} deletedCount The number of documents deleted.
*/
/**
* The callback format for inserts
* @callback Collection~deleteWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~deleteWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Delete a document from a collection
* @method
* @param {object} filter The Filter used to select the document to remove
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.deleteOne = function(filter, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, deleteOne, [this, filter, options, callback]);
};
Collection.prototype.removeOne = Collection.prototype.deleteOne;
/**
* Delete multiple documents from a collection
* @method
* @param {object} filter The Filter used to select the documents to remove
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.deleteMany = function(filter, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, deleteMany, [this, filter, options, callback]);
};
Collection.prototype.removeMany = Collection.prototype.deleteMany;
/**
* Remove documents.
* @method
* @param {object} selector The selector for the update operation.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.single=false] Removes the first document found.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~writeOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use deleteOne, deleteMany or bulkWrite
*/
Collection.prototype.remove = deprecate(function(selector, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, removeDocuments, [this, selector, options, callback]);
}, 'collection.remove is deprecated. Use deleteOne, deleteMany, or bulkWrite instead.');
/**
* Save a document. Simple full document replacement function. Not recommended for efficiency, use atomic
* operators and update instead for more efficient operations.
* @method
* @param {object} doc Document to save
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~writeOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use insertOne, insertMany, updateOne or updateMany
*/
Collection.prototype.save = deprecate(function(doc, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, save, [this, doc, options, callback]);
}, 'collection.save is deprecated. Use insertOne, insertMany, updateOne, or updateMany instead.');
/**
* The callback format for results
* @callback Collection~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object} result The result object if the command was executed successfully.
*/
/**
* The callback format for an aggregation call
* @callback Collection~aggregationCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {AggregationCursor} cursor The cursor if the aggregation command was executed successfully.
*/
/**
* Fetches the first document that matches the query
* @method
* @param {object} query Query for find Operation
* @param {object} [options] Optional settings.
* @param {number} [options.limit=0] Sets the limit of documents returned in the query.
* @param {(array|object)} [options.sort] Set to sort the documents coming back from the query. Array of indexes, [['a', 1]] etc.
* @param {object} [options.projection] The fields to return in the query. Object of fields to include or exclude (not both), {'a':1}
* @param {object} [options.fields] **Deprecated** Use `options.projection` instead
* @param {number} [options.skip=0] Set to skip N documents ahead in your query (useful for pagination).
* @param {Object} [options.hint] Tell the query to use specific indexes in the query. Object of indexes to use, {'_id':1}
* @param {boolean} [options.explain=false] Explain the query instead of returning the data.
* @param {boolean} [options.snapshot=false] DEPRECATED: Snapshot query.
* @param {boolean} [options.timeout=false] Specify if the cursor can timeout.
* @param {boolean} [options.tailable=false] Specify if the cursor is tailable.
* @param {number} [options.batchSize=0] Set the batchSize for the getMoreCommand when iterating over the query results.
* @param {boolean} [options.returnKey=false] Only return the index key.
* @param {number} [options.maxScan] DEPRECATED: Limit the number of items to scan.
* @param {number} [options.min] Set index bounds.
* @param {number} [options.max] Set index bounds.
* @param {boolean} [options.showDiskLoc=false] Show disk location of results.
* @param {string} [options.comment] You can put a $comment field on a query to make looking in the profiler logs simpler.
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers.
* @param {boolean} [options.promoteLongs=true] Promotes Long values to number if they fit inside the 53 bits resolution.
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {boolean} [options.partial=false] Specify if the cursor should return partial results when querying against a sharded system
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.findOne = deprecateOptions(
{
name: 'collection.find',
deprecatedOptions: DEPRECATED_FIND_OPTIONS,
optionsIndex: 1
},
function(query, options, callback) {
if (typeof callback === 'object') {
// TODO(MAJOR): throw in the future
console.warn('Third parameter to `findOne()` must be a callback or undefined');
}
if (typeof query === 'function') (callback = query), (query = {}), (options = {});
if (typeof options === 'function') (callback = options), (options = {});
query = query || {};
options = options || {};
return executeOperation(this.s.topology, findOne, [this, query, options, callback]);
}
);
/**
* The callback format for the collection method, must be used if strict is specified
* @callback Collection~collectionResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection} collection The collection instance.
*/
/**
* Rename the collection.
*
* @method
* @param {string} newName New name of of the collection.
* @param {object} [options] Optional settings.
* @param {boolean} [options.dropTarget=false] Drop the target name collection if it previously exists.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~collectionResultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.rename = function(newName, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY });
return executeOperation(this.s.topology, rename, [this, newName, options, callback]);
};
/**
* Drop the collection from the database, removing it permanently. New accesses will create a new collection.
*
* @method
* @param {object} [options] Optional settings.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.drop = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, this.s.db.dropCollection.bind(this.s.db), [
this.s.name,
options,
callback
]);
};
/**
* Returns the options of the collection.
*
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.options = function(opts, callback) {
if (typeof opts === 'function') (callback = opts), (opts = {});
opts = opts || {};
return executeOperation(this.s.topology, optionsOp, [this, opts, callback]);
};
/**
* Returns if the collection is a capped collection
*
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.isCapped = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, isCapped, [this, options, callback]);
};
/**
* Creates an index on the db and collection collection.
* @method
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.unique=false] Creates an unique index.
* @param {boolean} [options.sparse=false] Creates a sparse index.
* @param {boolean} [options.background=false] Creates the index in the background, yielding whenever possible.
* @param {boolean} [options.dropDups=false] A unique index cannot be created on a key that has pre-existing duplicate values. If you would like to create the index anyway, keeping the first document the database indexes and deleting all subsequent documents that have duplicate value
* @param {number} [options.min] For geospatial indexes set the lower bound for the co-ordinates.
* @param {number} [options.max] For geospatial indexes set the high bound for the co-ordinates.
* @param {number} [options.v] Specify the format version of the indexes.
* @param {number} [options.expireAfterSeconds] Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)
* @param {string} [options.name] Override the autogenerated index name (useful if the resulting name is larger than 128 bytes)
* @param {object} [options.partialFilterExpression] Creates a partial index based on the given filter object (MongoDB 3.2 or higher)
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.createIndex = function(fieldOrSpec, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, createIndex, [this, fieldOrSpec, options, callback]);
};
/**
* Creates multiple indexes in the collection, this method is only supported for
* MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported
* error. Index specifications are defined at http://docs.mongodb.org/manual/reference/command/createIndexes/.
* @method
* @param {array} indexSpecs An array of index specifications to be created
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.createIndexes = function(indexSpecs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options ? Object.assign({}, options) : {};
if (typeof options.maxTimeMS !== 'number') delete options.maxTimeMS;
return executeOperation(this.s.topology, createIndexes, [this, indexSpecs, options, callback]);
};
/**
* Drops an index from this collection.
* @method
* @param {string} indexName Name of the index to drop.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.dropIndex = function(indexName, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
// Run only against primary
options.readPreference = ReadPreference.PRIMARY;
return executeOperation(this.s.topology, dropIndex, [this, indexName, options, callback]);
};
/**
* Drops all indexes from this collection.
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.dropIndexes = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options ? Object.assign({}, options) : {};
if (typeof options.maxTimeMS !== 'number') delete options.maxTimeMS;
return executeOperation(this.s.topology, dropIndexes, [this, options, callback]);
};
/**
* Drops all indexes from this collection.
* @method
* @deprecated use dropIndexes
* @param {Collection~resultCallback} callback The command result callback
* @return {Promise} returns Promise if no [callback] passed
*/
Collection.prototype.dropAllIndexes = deprecate(
Collection.prototype.dropIndexes,
'collection.dropAllIndexes is deprecated. Use dropIndexes instead.'
);
/**
* Reindex all indexes on the collection
* Warning: reIndex is a blocking operation (indexes are rebuilt in the foreground) and will be slow for large collections.
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.reIndex = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, reIndex, [this, options, callback]);
};
/**
* Get the list of all indexes information for the collection.
*
* @method
* @param {object} [options] Optional settings.
* @param {number} [options.batchSize] The batchSize for the returned command cursor or if pre 2.8 the systems batch collection
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {CommandCursor}
*/
Collection.prototype.listIndexes = function(options) {
options = options || {};
// Clone the options
options = Object.assign({}, options);
// Determine the read preference in the options.
options.readPreference = resolveReadPreference(options, { db: this.s.db, collection: this });
// Set the CommandCursor constructor
options.cursorFactory = CommandCursor;
// Set the promiseLibrary
options.promiseLibrary = this.s.promiseLibrary;
if (!this.s.topology.capabilities()) {
throw new MongoError('cannot connect to server');
}
// Cursor options
let cursor = options.batchSize ? { batchSize: options.batchSize } : {};
// We have a list collections command
if (this.s.topology.capabilities().hasListIndexesCommand) {
// Build the command
const command = { listIndexes: this.s.name, cursor: cursor };
// Execute the cursor
cursor = this.s.topology.cursor(`${this.s.dbName}.$cmd`, command, options);
// Do we have a readPreference, apply it
if (options.readPreference) cursor.setReadPreference(options.readPreference);
// Return the cursor
return cursor;
}
// Get the namespace
const ns = `${this.s.dbName}.system.indexes`;
// Get the query
cursor = this.s.topology.cursor(ns, { find: ns, query: { ns: this.s.namespace } }, options);
// Do we have a readPreference, apply it
if (options.readPreference) cursor.setReadPreference(options.readPreference);
// Set the passed in batch size if one was provided
if (options.batchSize) cursor = cursor.batchSize(options.batchSize);
// Return the cursor
return cursor;
};
/**
* Ensures that an index exists, if it does not it creates it
* @method
* @deprecated use createIndexes instead
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.unique=false] Creates an unique index.
* @param {boolean} [options.sparse=false] Creates a sparse index.
* @param {boolean} [options.background=false] Creates the index in the background, yielding whenever possible.
* @param {boolean} [options.dropDups=false] A unique index cannot be created on a key that has pre-existing duplicate values. If you would like to create the index anyway, keeping the first document the database indexes and deleting all subsequent documents that have duplicate value
* @param {number} [options.min] For geospatial indexes set the lower bound for the co-ordinates.
* @param {number} [options.max] For geospatial indexes set the high bound for the co-ordinates.
* @param {number} [options.v] Specify the format version of the indexes.
* @param {number} [options.expireAfterSeconds] Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)
* @param {number} [options.name] Override the autogenerated index name (useful if the resulting name is larger than 128 bytes)
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.ensureIndex = deprecate(function(fieldOrSpec, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, ensureIndex, [this, fieldOrSpec, options, callback]);
}, 'collection.ensureIndex is deprecated. Use createIndexes instead.');
/**
* Checks if one or more indexes exist on the collection, fails on first non-existing index
* @method
* @param {(string|array)} indexes One or more index names to check.
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.indexExists = function(indexes, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, indexExists, [this, indexes, options, callback]);
};
/**
* Retrieves this collections index info.
* @method
* @param {object} [options] Optional settings.
* @param {boolean} [options.full=false] Returns the full raw index information.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.indexInformation = function(options, callback) {
const args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, indexInformation, [this, options, callback]);
};
/**
* The callback format for results
* @callback Collection~countCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {number} result The count of documents that matched the query.
*/
/**
* Count number of matching documents in the db to a query.
* @method
* @param {object} [query={}] The query for the count.
* @param {object} [options] Optional settings.
* @param {boolean} [options.limit] The limit of documents to count.
* @param {boolean} [options.skip] The number of documents to skip for the count.
* @param {string} [options.hint] An index name hint for the query.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~countCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use {@link Collection#countDocuments countDocuments} or {@link Collection#estimatedDocumentCount estimatedDocumentCount} instead
*/
Collection.prototype.count = deprecate(function(query, options, callback) {
const args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
query = args.length ? args.shift() || {} : {};
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, count, [this, query, options, callback]);
}, 'collection.count is deprecated, and will be removed in a future version.' +
' Use collection.countDocuments or collection.estimatedDocumentCount instead');
/**
* Gets an estimate of the count of documents in a collection using collection metadata.
*
* @method
* @param {object} [options] Optional settings.
* @param {number} [options.maxTimeMS] The maximum amount of time to allow the operation to run.
* @param {Collection~countCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed.
*/
Collection.prototype.estimatedDocumentCount = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, count, [this, null, options, callback]);
};
/**
* Gets the number of documents matching the filter.
*
* **Note**: When migrating from {@link Collection#count count} to {@link Collection#countDocuments countDocuments}
* the following query operators must be replaced:
*
* | Operator | Replacement |
* | -------- | ----------- |
* | `$where` | [`$expr`][1] |
* | `$near` | [`$geoWithin`][2] with [`$center`][3] |
* | `$nearSphere` | [`$geoWithin`][2] with [`$centerSphere`][4] |
*
* [1]: https://docs.mongodb.com/manual/reference/operator/query/expr/
* [2]: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
* [3]: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
* [4]: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
*
* @param {object} [query] the query for the count
* @param {object} [options] Optional settings.
* @param {object} [options.collation] Specifies a collation.
* @param {string|object} [options.hint] The index to use.
* @param {number} [options.limit] The maximum number of document to count.
* @param {number} [options.maxTimeMS] The maximum amount of time to allow the operation to run.
* @param {number} [options.skip] The number of documents to skip before counting.
* @param {Collection~countCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed.
* @see https://docs.mongodb.com/manual/reference/operator/query/expr/
* @see https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
* @see https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
* @see https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
*/
Collection.prototype.countDocuments = function(query, options, callback) {
const args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
query = args.length ? args.shift() || {} : {};
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, countDocuments, [this, query, options, callback]);
};
/**
* The distinct command returns returns a list of distinct values for the given key across a collection.
* @method
* @param {string} key Field of the document to find distinct values for.
* @param {object} query The query for filtering the set of documents to which we apply the distinct filter.
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.distinct = function(key, query, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
const queryOption = args.length ? args.shift() || {} : {};
const optionsOption = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, distinct, [
this,
key,
queryOption,
optionsOption,
callback
]);
};
/**
* Retrieve all the indexes on the collection.
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.indexes = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, indexes, [this, options, callback]);
};
/**
* Get all the collection statistics.
*
* @method
* @param {object} [options] Optional settings.
* @param {number} [options.scale] Divide the returned sizes by scale value.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The collection result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.stats = function(options, callback) {
const args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, stats, [this, options, callback]);
};
/**
* @typedef {Object} Collection~findAndModifyWriteOpResult
* @property {object} value Document returned from findAndModify command.
* @property {object} lastErrorObject The raw lastErrorObject returned from the command.
* @property {Number} ok Is 1 if the command executed correctly.
*/
/**
* The callback format for inserts
* @callback Collection~findAndModifyCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~findAndModifyWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Find a document and delete it in one atomic operation. Requires a write lock for the duration of the operation.
*
* @method
* @param {object} filter The Filter used to select the document to remove
* @param {object} [options] Optional settings.
* @param {object} [options.projection] Limits the fields to return for all matching documents.
* @param {object} [options.sort] Determines which document the operation modifies if the query selects multiple documents.
* @param {number} [options.maxTimeMS] The maximum amount of time to allow the query to run.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
* @return {Promise<Collection~findAndModifyWriteOpResultObject>} returns Promise if no callback passed
*/
Collection.prototype.findOneAndDelete = function(filter, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object');
return executeOperation(this.s.topology, findOneAndDelete, [this, filter, options, callback]);
};
/**
* Find a document and replace it in one atomic operation. Requires a write lock for the duration of the operation.
*
* @method
* @param {object} filter The Filter used to select the document to replace
* @param {object} replacement The Document that replaces the matching document
* @param {object} [options] Optional settings.
* @param {object} [options.projection] Limits the fields to return for all matching documents.
* @param {object} [options.sort] Determines which document the operation modifies if the query selects multiple documents.
* @param {number} [options.maxTimeMS] The maximum amount of time to allow the query to run.
* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
* @param {boolean} [options.returnOriginal=true] When false, returns the updated document rather than the original. The default is true.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
* @return {Promise<Collection~findAndModifyWriteOpResultObject>} returns Promise if no callback passed
*/
Collection.prototype.findOneAndReplace = function(filter, replacement, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object');
if (replacement == null || typeof replacement !== 'object')
throw toError('replacement parameter must be an object');
return executeOperation(this.s.topology, findOneAndReplace, [
this,
filter,
replacement,
options,
callback
]);
};
/**
* Find a document and update it in one atomic operation. Requires a write lock for the duration of the operation.
*
* @method
* @param {object} filter The Filter used to select the document to update
* @param {object} update Update operations to be performed on the document
* @param {object} [options] Optional settings.
* @param {object} [options.projection] Limits the fields to return for all matching documents.
* @param {object} [options.sort] Determines which document the operation modifies if the query selects multiple documents.
* @param {number} [options.maxTimeMS] The maximum amount of time to allow the query to run.
* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
* @param {boolean} [options.returnOriginal=true] When false, returns the updated document rather than the original. The default is true.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Array} [options.arrayFilters] optional list of array filters referenced in filtered positional operators
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
* @return {Promise<Collection~findAndModifyWriteOpResultObject>} returns Promise if no callback passed
*/
Collection.prototype.findOneAndUpdate = function(filter, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object');
if (update == null || typeof update !== 'object')
throw toError('update parameter must be an object');
const err = checkForAtomicOperators(update);
if (err) {
if (typeof callback === 'function') return callback(err);
return this.s.promiseLibrary.reject(err);
}
return executeOperation(this.s.topology, findOneAndUpdate, [
this,
filter,
update,
options,
callback
]);
};
/**
* Find and update a document.
* @method
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} doc The fields/vals to be updated.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.remove=false] Set to true to remove the object before returning.
* @param {boolean} [options.upsert=false] Perform an upsert operation.
* @param {boolean} [options.new=false] Set to true if you want to return the modified object rather than the original. Ignored for remove.
* @param {object} [options.projection] Object containing the field projection for the result returned from the operation.
* @param {object} [options.fields] **Deprecated** Use `options.projection` instead
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Array} [options.arrayFilters] optional list of array filters referenced in filtered positional operators
* @param {Collection~findAndModifyCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead
*/
Collection.prototype.findAndModify = deprecate(function(query, sort, doc, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
sort = args.length ? args.shift() || [] : [];
doc = args.length ? args.shift() : null;
options = args.length ? args.shift() || {} : {};
// Clone options
options = Object.assign({}, options);
// Force read preference primary
options.readPreference = ReadPreference.PRIMARY;
return executeOperation(this.s.topology, findAndModify, [
this,
query,
sort,
doc,
options,
callback
]);
}, 'collection.findAndModify is deprecated. Use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead.');
/**
* Find and remove a document.
* @method
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use findOneAndDelete instead
*/
Collection.prototype.findAndRemove = deprecate(function(query, sort, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
sort = args.length ? args.shift() || [] : [];
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, findAndRemove, [this, query, sort, options, callback]);
}, 'collection.findAndRemove is deprecated. Use findOneAndDelete instead.');
/**
* Execute an aggregation framework pipeline against the collection, needs MongoDB >= 2.2
* @method
* @param {object} [pipeline=[]] Array containing all the aggregation framework commands for the execution.
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {object} [options.cursor] Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.
* @param {number} [options.cursor.batchSize] The batchSize for the cursor
* @param {boolean} [options.explain=false] Explain returns the aggregation execution plan (requires mongodb 2.6 >).
* @param {boolean} [options.allowDiskUse=false] allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 >).
* @param {number} [options.maxTimeMS] maxTimeMS specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers.
* @param {boolean} [options.promoteLongs=true] Promotes Long values to number if they fit inside the 53 bits resolution.
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {string} [options.comment] Add a comment to an aggregation command
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~aggregationCallback} callback The command result callback
* @return {(null|AggregationCursor)}
*/
Collection.prototype.aggregate = function(pipeline, options, callback) {
if (Array.isArray(pipeline)) {
// Set up callback if one is provided
if (typeof options === 'function') {
callback = options;
options = {};
}
// If we have no options or callback we are doing
// a cursor based aggregation
if (options == null && callback == null) {
options = {};
}
} else {
// Aggregation pipeline passed as arguments on the method
const args = Array.prototype.slice.call(arguments, 0);
// Get the callback
callback = args.pop();
// Get the possible options object
const opts = args[args.length - 1];
// If it contains any of the admissible options pop it of the args
options =
opts &&
(opts.readPreference ||
opts.explain ||
opts.cursor ||
opts.out ||
opts.maxTimeMS ||
opts.hint ||
opts.allowDiskUse)
? args.pop()
: {};
// Left over arguments is the pipeline
pipeline = args;
}
// Ignore readConcern option
let ignoreReadConcern = false;
// Build the command
const command = { aggregate: this.s.name, pipeline: pipeline };
// If out was specified
if (typeof options.out === 'string') {
pipeline.push({ $out: options.out });
// Ignore read concern
ignoreReadConcern = true;
} else if (pipeline.length > 0 && pipeline[pipeline.length - 1]['$out']) {
ignoreReadConcern = true;
}
// Decorate command with writeConcern if out has been specified
if (
pipeline.length > 0 &&
pipeline[pipeline.length - 1]['$out'] &&
this.s.topology.capabilities().commandsTakeWriteConcern
) {
applyWriteConcern(command, { db: this.s.db, collection: this }, options);
}
// Have we specified collation
try {
decorateWithCollation(command, this, options);
} catch (err) {
if (typeof callback === 'function') return callback(err, null);
throw err;
}
// If we have bypassDocumentValidation set
if (options.bypassDocumentValidation === true) {
command.bypassDocumentValidation = options.bypassDocumentValidation;
}
// Do we have a readConcern specified
if (!ignoreReadConcern) {
decorateWithReadConcern(command, this, options);
}
// If we have allowDiskUse defined
if (options.allowDiskUse) command.allowDiskUse = options.allowDiskUse;
if (typeof options.maxTimeMS === 'number') command.maxTimeMS = options.maxTimeMS;
// If we are giving a hint
if (options.hint) command.hint = options.hint;
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: this.s.db, collection: this });
// If explain has been specified add it
if (options.explain) {
if (command.readConcern || command.writeConcern) {
throw toError('"explain" cannot be used on an aggregate call with readConcern/writeConcern');
}
command.explain = options.explain;
}
if (typeof options.comment === 'string') command.comment = options.comment;
// Validate that cursor options is valid
if (options.cursor != null && typeof options.cursor !== 'object') {
throw toError('cursor options must be an object');
}
options.cursor = options.cursor || {};
if (options.batchSize) options.cursor.batchSize = options.batchSize;
command.cursor = options.cursor;
// promiseLibrary
options.promiseLibrary = this.s.promiseLibrary;
// Set the AggregationCursor constructor
options.cursorFactory = AggregationCursor;
if (typeof callback !== 'function') {
if (!this.s.topology.capabilities()) {
throw new MongoError('cannot connect to server');
}
// Allow disk usage command
if (typeof options.allowDiskUse === 'boolean') command.allowDiskUse = options.allowDiskUse;
if (typeof options.maxTimeMS === 'number') command.maxTimeMS = options.maxTimeMS;
// Execute the cursor
return this.s.topology.cursor(this.s.namespace, command, options);
}
return handleCallback(callback, null, this.s.topology.cursor(this.s.namespace, command, options));
};
/**
* Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this collection.
* @method
* @since 3.0.0
* @param {Array} [pipeline] An array of {@link https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
* @param {object} [options] Optional settings
* @param {string} [options.fullDocument='default'] Allowed values: ‘default’, ‘updateLookup’. When set to ‘updateLookup’, the change stream will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
* @param {object} [options.resumeAfter] Specifies the logical starting point for the new change stream. This should be the _id field from a previously returned change stream document.
* @param {number} [options.maxAwaitTimeMS] The maximum amount of time for the server to wait on new documents to satisfy a change stream query
* @param {number} [options.batchSize] The number of documents to return per batch. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {object} [options.collation] Specify collation settings for operation. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {ReadPreference} [options.readPreference] The read preference. Defaults to the read preference of the database or collection. See {@link https://docs.mongodb.com/manual/reference/read-preference|read preference documentation}.
* @param {Timestamp} [options.startAtClusterTime] receive change events that occur after the specified timestamp
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {ChangeStream} a ChangeStream instance.
*/
Collection.prototype.watch = function(pipeline, options) {
pipeline = pipeline || [];
options = options || {};
// Allow optionally not specifying a pipeline
if (!Array.isArray(pipeline)) {
options = pipeline;
pipeline = [];
}
return new ChangeStream(this, pipeline, options);
};
/**
* The callback format for results
* @callback Collection~parallelCollectionScanCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Cursor[]} cursors A list of cursors returned allowing for parallel reading of collection.
*/
/**
* Return N number of parallel cursors for a collection allowing parallel reading of entire collection. There are
* no ordering guarantees for returned results.
* @method
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.batchSize] Set the batchSize for the getMoreCommand when iterating over the query results.
* @param {number} [options.numCursors=1] The maximum number of parallel command cursors to return (the number of returned cursors will be in the range 1:numCursors)
* @param {boolean} [options.raw=false] Return all BSON documents as Raw Buffer documents.
* @param {Collection~parallelCollectionScanCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.parallelCollectionScan = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = { numCursors: 1 });
// Set number of cursors to 1
options.numCursors = options.numCursors || 1;
options.batchSize = options.batchSize || 1000;
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: this.s.db, collection: this });
// Add a promiseLibrary
options.promiseLibrary = this.s.promiseLibrary;
if (options.session) {
options.session = undefined;
}
return executeOperation(this.s.topology, parallelCollectionScan, [this, options, callback], {
skipSessions: true
});
};
/**
* Execute a geo search using a geo haystack index on a collection.
*
* @method
* @param {number} x Point to search on the x axis, ensure the indexes are ordered in the same order.
* @param {number} y Point to search on the y axis, ensure the indexes are ordered in the same order.
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxDistance] Include results up to maxDistance from the point.
* @param {object} [options.search] Filter the results by a query.
* @param {number} [options.limit=false] Max number of results to return.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.geoHaystackSearch = function(x, y, options, callback) {
const args = Array.prototype.slice.call(arguments, 2);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, geoHaystackSearch, [this, x, y, options, callback]);
};
/**
* Run a group command across a collection
*
* @method
* @param {(object|array|function|code)} keys An object, array or function expressing the keys to group by.
* @param {object} condition An optional condition that must be true for a row to be considered.
* @param {object} initial Initial value of the aggregation counter object.
* @param {(function|Code)} reduce The reduce function aggregates (reduces) the objects iterated
* @param {(function|Code)} finalize An optional function to be run on each item in the result set just before the item is returned.
* @param {boolean} command Specify if you wish to run using the internal group command or using eval, default is true.
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated MongoDB 3.6 or higher no longer supports the group command. We recommend rewriting using the aggregation framework.
*/
Collection.prototype.group = deprecate(function(
keys,
condition,
initial,
reduce,
finalize,
command,
options,
callback
) {
const args = Array.prototype.slice.call(arguments, 3);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
reduce = args.length ? args.shift() : null;
finalize = args.length ? args.shift() : null;
command = args.length ? args.shift() : null;
options = args.length ? args.shift() || {} : {};
// Make sure we are backward compatible
if (!(typeof finalize === 'function')) {
command = finalize;
finalize = null;
}
if (
!Array.isArray(keys) &&
keys instanceof Object &&
typeof keys !== 'function' &&
!(keys._bsontype === 'Code')
) {
keys = Object.keys(keys);
}
if (typeof reduce === 'function') {
reduce = reduce.toString();
}
if (typeof finalize === 'function') {
finalize = finalize.toString();
}
// Set up the command as default
command = command == null ? true : command;
return executeOperation(this.s.topology, group, [
this,
keys,
condition,
initial,
reduce,
finalize,
command,
options,
callback
]);
},
'MongoDB 3.6 or higher no longer supports the group command. We recommend rewriting using the aggregation framework.');
/**
* Run Map Reduce across a collection. Be aware that the inline option for out will return an array of results not a collection.
*
* @method
* @param {(function|string)} map The mapping function.
* @param {(function|string)} reduce The reduce function.
* @param {object} [options] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {object} [options.out] Sets the output target for the map reduce job. *{inline:1} | {replace:'collectionName'} | {merge:'collectionName'} | {reduce:'collectionName'}*
* @param {object} [options.query] Query filter object.
* @param {object} [options.sort] Sorts the input objects using this key. Useful for optimization, like sorting by the emit key for fewer reduces.
* @param {number} [options.limit] Number of objects to return from collection.
* @param {boolean} [options.keeptemp=false] Keep temporary data.
* @param {(function|string)} [options.finalize] Finalize function.
* @param {object} [options.scope] Can pass in variables that can be access from map/reduce/finalize.
* @param {boolean} [options.jsMode=false] It is possible to make the execution stay in JS. Provided in MongoDB > 2.0.X.
* @param {boolean} [options.verbose=false] Provide statistics on job execution time.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.mapReduce = function(map, reduce, options, callback) {
if ('function' === typeof options) (callback = options), (options = {});
// Out must allways be defined (make sure we don't break weirdly on pre 1.8+ servers)
if (null == options.out) {
throw new Error(
'the out option parameter must be defined, see mongodb docs for possible values'
);
}
if ('function' === typeof map) {
map = map.toString();
}
if ('function' === typeof reduce) {
reduce = reduce.toString();
}
if ('function' === typeof options.finalize) {
options.finalize = options.finalize.toString();
}
return executeOperation(this.s.topology, mapReduce, [this, map, reduce, options, callback]);
};
/**
* Initiate an Out of order batch write operation. All operations will be buffered into insert/update/remove commands executed out of order.
*
* @method
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {UnorderedBulkOperation}
*/
Collection.prototype.initializeUnorderedBulkOp = function(options) {
options = options || {};
options.promiseLibrary = this.s.promiseLibrary;
return unordered(this.s.topology, this, options);
};
/**
* Initiate an In order bulk write operation. Operations will be serially executed in the order they are added, creating a new operation for each switch in types.
*
* @method
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {OrderedBulkOperation} callback The command result callback
* @return {null}
*/
Collection.prototype.initializeOrderedBulkOp = function(options) {
options = options || {};
options.promiseLibrary = this.s.promiseLibrary;
return ordered(this.s.topology, this, options);
};
/**
* Return the db logger
* @method
* @return {Logger} return the db logger
* @ignore
*/
Collection.prototype.getLogger = function() {
return this.s.db.s.logger;
};
module.exports = Collection;
| 1 | 15,088 | @kvwalker should this method have `ignoreUndefined` supported as an `option`? | mongodb-node-mongodb-native | js |
@@ -52,10 +52,11 @@ func New(_ *any.Any, logger *zap.Logger, _ tally.Scope) (service.Service, error)
return nil, errors.New("experiment store wrong type")
}
- transformer := NewTransformer()
+ sugaredLogger := logger.Sugar()
+ transformer := NewTransformer(sugaredLogger)
return &storer{
client.DB(),
- logger.Sugar(),
+ sugaredLogger,
&transformer,
}, nil
} | 1 | package experimentstore
import (
"bytes"
"context"
"database/sql"
"errors"
"strings"
"time"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/uber-go/tally"
"go.uber.org/zap"
experimentation "github.com/lyft/clutch/backend/api/chaos/experimentation/v1"
"github.com/lyft/clutch/backend/id"
"github.com/lyft/clutch/backend/service"
pgservice "github.com/lyft/clutch/backend/service/db/postgres"
)
const Name = "clutch.service.chaos.experimentation.store"
// Storer stores experiment data
type Storer interface {
CreateExperiment(context.Context, *any.Any, *time.Time, *time.Time) (*experimentation.Experiment, error)
CancelExperimentRun(context.Context, uint64) error
GetExperiments(ctx context.Context, configType string, status experimentation.GetExperimentsRequest_Status) ([]*experimentation.Experiment, error)
GetExperimentRunDetails(ctx context.Context, id uint64) (*experimentation.ExperimentRunDetails, error)
GetListView(ctx context.Context) ([]*experimentation.ListViewItem, error)
RegisterTransformation(transformation Transformation) error
Close()
}
type storer struct {
db *sql.DB
logger *zap.SugaredLogger
transformer *Transformer
}
// New returns a new NewExperimentStore instance.
func New(_ *any.Any, logger *zap.Logger, _ tally.Scope) (service.Service, error) {
p, ok := service.Registry[pgservice.Name]
if !ok {
return nil, errors.New("could not find database service")
}
client, ok := p.(pgservice.Client)
if !ok {
return nil, errors.New("experiment store wrong type")
}
transformer := NewTransformer()
return &storer{
client.DB(),
logger.Sugar(),
&transformer,
}, nil
}
func (s *storer) CreateExperiment(ctx context.Context, config *any.Any, startTime *time.Time, endTime *time.Time) (*experimentation.Experiment, error) {
// This API call will eventually be broken into 2 separate calls:
// 1) creating the config
// 2) starting a new experiment with the config
// All experiments are created in a single transaction
tx, err := s.db.Begin()
if err != nil {
return nil, err
}
if config == nil {
return nil, errors.New("empty config")
}
// Step 1) create the config
configID := id.NewID()
configJson, err := marshalConfig(config)
if err != nil {
return nil, err
}
configSql := `INSERT INTO experiment_config (id, details) VALUES ($1, $2)`
_, err = s.db.ExecContext(ctx, configSql, configID, configJson)
if err != nil {
return nil, err
}
// Step 2) start a new experiment with the config
runSql := `
INSERT INTO experiment_run (
id,
experiment_config_id,
execution_time,
creation_time)
VALUES ($1, $2, tstzrange($3, $4, '[]'), NOW())`
runId := id.NewID()
_, err = s.db.ExecContext(ctx, runSql, runId, configID, startTime, endTime)
if err != nil {
return nil, err
}
err = tx.Commit()
if err != nil {
return nil, err
}
st, err := toProto(startTime)
if err != nil {
return nil, err
}
et, err := toProto(endTime)
if err != nil {
return nil, err
}
return &experimentation.Experiment{
// TODO(bgallagher) temporarily returning the experiment run ID. Eventually, the CreateExperiments function
// will be split into CreateExperimentConfig and CreateExperimentRun in which case they will each return
// their respective IDs
Id: uint64(runId),
Config: config,
StartTime: st,
EndTime: et,
}, nil
}
func (s *storer) CancelExperimentRun(ctx context.Context, id uint64) error {
sql :=
`UPDATE experiment_run
SET cancellation_time = NOW()
WHERE id = $1 AND cancellation_time IS NULL AND (upper(execution_time) IS NULL OR NOW() < upper(execution_time))`
_, err := s.db.ExecContext(ctx, sql, id)
return err
}
// GetExperiments experiments with a given type of the configuration. Returns all experiments if provided configuration type
// parameter is an emtpy string.
func (s *storer) GetExperiments(ctx context.Context, configType string, status experimentation.GetExperimentsRequest_Status) ([]*experimentation.Experiment, error) {
query := `
SELECT
experiment_run.id,
details
FROM experiment_config, experiment_run
WHERE
experiment_config.id = experiment_run.experiment_config_id` +
// Return only experiments of a given `configType` or all of them if configType is equal to an empty string.
` AND ($1 = '' OR $1 = experiment_config.details ->> '@type')` +
// Return only running experiments if `status` is equal to `Running`, return all experiments otherwise.
` AND ($2 = 'UNSPECIFIED' OR (experiment_run.cancellation_time is NULL AND NOW() > lower(experiment_run.execution_time) AND (upper(experiment_run.execution_time) IS NULL OR NOW() < upper(experiment_run.execution_time))))`
rows, err := s.db.QueryContext(ctx, query, configType, status.String())
if err != nil {
return nil, err
}
defer rows.Close()
var experiments []*experimentation.Experiment
for rows.Next() {
var experiment experimentation.Experiment
var details string
err = rows.Scan(&experiment.Id, &details)
if err != nil {
return nil, err
}
anyConfig := &any.Any{}
err = jsonpb.Unmarshal(strings.NewReader(details), anyConfig)
if err != nil {
return nil, err
}
experiment.Config = anyConfig
experiments = append(experiments, &experiment)
}
err = rows.Err()
if err != nil {
return nil, err
}
return experiments, nil
}
func (s *storer) GetListView(ctx context.Context) ([]*experimentation.ListViewItem, error) {
query := `
SELECT
experiment_run.id,
lower(execution_time),
upper(execution_time),
cancellation_time,
creation_time,
experiment_config.id,
details
FROM experiment_config, experiment_run
WHERE
experiment_config.id = experiment_run.experiment_config_id`
rows, err := s.db.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
var listViewItems []*experimentation.ListViewItem
for rows.Next() {
var details string
run := ExperimentRun{}
config := ExperimentConfig{Config: &any.Any{}}
err = rows.Scan(&run.id, &run.startTime, &run.endTime, &run.cancellationTime, &run.creationTime, &config.id, &details)
if err != nil {
return nil, err
}
if err = jsonpb.Unmarshal(strings.NewReader(details), config.Config); err != nil {
return nil, err
}
item, err := NewRunListView(&run, &config, s.transformer, time.Now())
if err != nil {
return nil, err
}
listViewItems = append(listViewItems, item)
}
err = rows.Err()
if err != nil {
return nil, err
}
return listViewItems, nil
}
func (s *storer) GetExperimentRunDetails(ctx context.Context, id uint64) (*experimentation.ExperimentRunDetails, error) {
sqlQuery := `
SELECT
experiment_run.id,
lower(execution_time),
upper(execution_time),
cancellation_time,
creation_time,
experiment_config.id,
details FROM experiment_config, experiment_run
WHERE experiment_run.id = $1 AND experiment_run.experiment_config_id = experiment_config.id`
row := s.db.QueryRowContext(ctx, sqlQuery, id)
var details string
run := ExperimentRun{}
config := ExperimentConfig{Config: &any.Any{}}
err := row.Scan(&run.id, &run.startTime, &run.endTime, &run.cancellationTime, &run.creationTime, &config.id, &details)
if err != nil {
return nil, err
}
err = jsonpb.Unmarshal(strings.NewReader(details), config.Config)
if err != nil {
return nil, err
}
return NewRunDetails(&run, &config, s.transformer, time.Now())
}
// Close closes all resources held.
func (s *storer) Close() {
s.db.Close()
}
func (s *storer) RegisterTransformation(transformation Transformation) error {
err := s.transformer.Register(transformation)
if err != nil {
s.logger.Fatal("Could not register transformation %v", transformation)
}
return err
}
func toProto(t *time.Time) (*timestamp.Timestamp, error) {
if t == nil {
return nil, nil
}
timestampProto, err := ptypes.TimestampProto(*t)
if err != nil {
return nil, err
}
return timestampProto, nil
}
func marshalConfig(config *any.Any) (string, error) {
marshaler := jsonpb.Marshaler{}
buf := &bytes.Buffer{}
err := marshaler.Marshal(buf, config)
if err != nil {
return "", err
}
return buf.String(), nil
}
| 1 | 8,979 | we dont use a `sugaredlogger` anywhere else in clutch, just curious why were using it here? | lyft-clutch | go |
@@ -388,7 +388,6 @@ class ServerCallback(MessageCallback):
if not self._queue:
self._active = False
return
- self._queue = []
# Get unique event types in the queue
events = list(OrderedDict([(event.event_name, event)
for event in self._queue]).values()) | 1 | from collections import defaultdict
from bokeh.models import CustomJS, FactorRange, DatetimeAxis
from ...core import OrderedDict
from ...streams import (Stream, PointerXY, RangeXY, Selection1D, RangeX,
RangeY, PointerX, PointerY, BoundsX, BoundsY,
Tap, SingleTap, DoubleTap, MouseEnter, MouseLeave,
PlotSize, Draw, BoundsXY, PlotReset)
from ...streams import PositionX, PositionY, PositionXY, Bounds # Deprecated: remove in 2.0
from ..comms import JupyterCommJS, Comm
from .util import convert_timestamp
class MessageCallback(object):
"""
A MessageCallback is an abstract baseclass used to supply Streams
with events originating from bokeh plot interactions. The baseclass
defines how messages are handled and the basic specification required
to define a Callback.
"""
attributes = {}
# The plotting handle(s) to attach the JS callback on
models = []
# Additional models available to the callback
extra_models = []
# Conditions when callback should be skipped
skip = []
# Callback will listen to events of the supplied type on the models
on_events = []
# List of change events on the models to listen to
on_changes = []
_callbacks = {}
def _process_msg(self, msg):
"""
Subclassable method to preprocess JSON message in callback
before passing to stream.
"""
return msg
def __init__(self, plot, streams, source, **params):
self.plot = plot
self.streams = streams
if plot.renderer.mode != 'server':
try:
self.comm = self._comm_type(plot, on_msg=self.on_msg)
except AttributeError:
self.comm = Comm(plot)
self.source = source
self.handle_ids = defaultdict(dict)
self.callbacks = []
self.plot_handles = {}
self._queue = []
def _filter_msg(self, msg, ids):
"""
Filter event values that do not originate from the plotting
handles associated with a particular stream using their
ids to match them.
"""
filtered_msg = {}
for k, v in msg.items():
if isinstance(v, dict) and 'id' in v:
if v['id'] in ids:
filtered_msg[k] = v['value']
else:
filtered_msg[k] = v
return filtered_msg
def on_msg(self, msg):
streams = []
for stream in self.streams:
handle_ids = self.handle_ids[stream]
ids = list(handle_ids.values())
filtered_msg = self._filter_msg(msg, ids)
processed_msg = self._process_msg(filtered_msg)
if not processed_msg and not stream.transient:
continue
stream.update(**processed_msg)
stream._metadata = {h: {'id': hid, 'events': self.on_events}
for h, hid in handle_ids.items()}
streams.append(stream)
Stream.trigger(streams)
for stream in streams:
stream._metadata = {}
def _init_plot_handles(self):
"""
Find all requested plotting handles and cache them along
with the IDs of the models the callbacks will be attached to.
"""
plots = [self.plot]
if self.plot.subplots:
plots += list(self.plot.subplots.values())
handles = {}
for plot in plots:
for k, v in plot.handles.items():
handles[k] = v
self.plot_handles = handles
requested = {}
for h in self.models+self.extra_models:
if h in self.plot_handles:
requested[h] = handles[h]
elif h in self.extra_models:
print("Warning %s could not find the %s model. "
"The corresponding stream may not work.")
self.handle_ids.update(self._get_stream_handle_ids(requested))
return requested
def _get_stream_handle_ids(self, handles):
"""
Gather the ids of the plotting handles attached to this callback
This allows checking that a stream is not given the state
of a plotting handle it wasn't attached to
"""
stream_handle_ids = defaultdict(dict)
for stream in self.streams:
for h in self.models:
if h in handles:
handle_id = handles[h].ref['id']
stream_handle_ids[stream][h] = handle_id
return stream_handle_ids
class CustomJSCallback(MessageCallback):
"""
The CustomJSCallback attaches CustomJS callbacks to a bokeh plot,
which looks up the requested attributes and sends back a message
to Python using a Comms instance.
"""
js_callback = """
function unique_events(events) {{
// Processes the event queue ignoring duplicate events
// of the same type
var unique = [];
var unique_events = [];
for (var i=0; i<events.length; i++) {{
var _tmpevent = events[i];
event = _tmpevent[0];
data = _tmpevent[1];
if (unique_events.indexOf(event)===-1) {{
unique.unshift(data);
unique_events.push(event);
}}
}}
return unique;
}}
function process_events(comm_state) {{
// Iterates over event queue and sends events via Comm
var events = unique_events(comm_state.event_buffer);
for (var i=0; i<events.length; i++) {{
var data = events[i];
var comm = HoloViewsWidget.comms[data["comm_id"]];
comm.send(data);
}}
comm_state.event_buffer = [];
}}
function on_msg(msg){{
// Receives acknowledgement from Python, processing event
// and unblocking Comm if event queue empty
msg = JSON.parse(msg.content.data);
var comm_id = msg["comm_id"]
var comm_state = HoloViewsWidget.comm_state[comm_id];
if (comm_state.event_buffer.length) {{
process_events(comm_state);
comm_state.blocked = true;
comm_state.time = Date.now()+{debounce};
}} else {{
comm_state.blocked = false;
}}
comm_state.event_buffer = [];
if ((msg.msg_type == "Ready") && msg.content) {{
console.log("Python callback returned following output:", msg.content);
}} else if (msg.msg_type == "Error") {{
console.log("Python failed with the following traceback:", msg['traceback'])
}}
}}
// Initialize Comm
if ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null)) {{
var comm_manager = Jupyter.notebook.kernel.comm_manager;
var comm = HoloViewsWidget.comms["{comm_id}"];
if (comm == null) {{
comm = comm_manager.new_comm("{comm_id}", {{}}, {{}}, {{}});
comm.on_msg(on_msg);
comm_manager["{comm_id}"] = comm;
HoloViewsWidget.comms["{comm_id}"] = comm;
}}
}} else {{
return
}}
// Initialize event queue and timeouts for Comm
var comm_state = HoloViewsWidget.comm_state["{comm_id}"];
if (comm_state === undefined) {{
comm_state = {{event_buffer: [], blocked: false, time: Date.now()}}
HoloViewsWidget.comm_state["{comm_id}"] = comm_state
}}
// Add current event to queue and process queue if not blocked
event_name = cb_obj.event_name
data['comm_id'] = "{comm_id}";
timeout = comm_state.time + {timeout};
if ((window.Jupyter == null) | (Jupyter.notebook.kernel == null)) {{
}} else if ((comm_state.blocked && (Date.now() < timeout))) {{
comm_state.event_buffer.unshift([event_name, data]);
}} else {{
comm_state.event_buffer.unshift([event_name, data]);
setTimeout(function() {{ process_events(comm_state); }}, {debounce});
comm_state.blocked = true;
comm_state.time = Date.now()+{debounce};
}}
"""
code = ""
# Timeout if a comm message is swallowed
timeout = 20000
# Timeout before the first event is processed
debounce = 20
_comm_type = JupyterCommJS
@classmethod
def attributes_js(cls, attributes):
"""
Generates JS code to look up attributes on JS objects from
an attributes specification dictionary. If the specification
references a plotting particular plotting handle it will also
generate JS code to get the ID of the object.
Simple example (when referencing cb_data or cb_obj):
Input : {'x': 'cb_data.geometry.x'}
Output : data['x'] = cb_data['geometry']['x']
Example referencing plot handle:
Input : {'x0': 'x_range.attributes.start'}
Output : if ((x_range !== undefined)) {
data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']}
}
"""
assign_template = '{assign}{{id: {obj_name}["id"], value: {obj_name}{attr_getters}}};\n'
conditional_template = 'if (({obj_name} != undefined)) {{ {assign} }}'
code = ''
for key, attr_path in sorted(attributes.items()):
data_assign = 'data["{key}"] = '.format(key=key)
attrs = attr_path.split('.')
obj_name = attrs[0]
attr_getters = ''.join(['["{attr}"]'.format(attr=attr)
for attr in attrs[1:]])
if obj_name not in ['cb_obj', 'cb_data']:
assign_str = assign_template.format(
assign=data_assign, obj_name=obj_name, attr_getters=attr_getters
)
code += conditional_template.format(
obj_name=obj_name, assign=assign_str
)
else:
assign_str = ''.join([data_assign, obj_name, attr_getters, ';\n'])
code += assign_str
return code
def get_customjs(self, references):
"""
Creates a CustomJS callback that will send the requested
attributes back to python.
"""
# Generate callback JS code to get all the requested data
self_callback = self.js_callback.format(comm_id=self.comm.id,
timeout=self.timeout,
debounce=self.debounce)
attributes = self.attributes_js(self.attributes)
conditions = ["%s" % cond for cond in self.skip]
conditional = ''
if conditions:
conditional = 'if (%s) { return };\n' % (' || '.join(conditions))
data = "var data = {};\n"
code = conditional + data + attributes + self.code + self_callback
return CustomJS(args=references, code=code)
def set_customjs_callback(self, js_callback, handle):
"""
Generates a CustomJS callback by generating the required JS
code and gathering all plotting handles and installs it on
the requested callback handle.
"""
if self.on_events:
for event in self.on_events:
handle.js_on_event(event, js_callback)
elif self.on_changes:
for change in self.on_changes:
handle.js_on_change(change, js_callback)
elif hasattr(handle, 'callback'):
handle.callback = js_callback
class ServerCallback(MessageCallback):
"""
Implements methods to set up bokeh server callbacks. A ServerCallback
resolves the requested attributes on the Python end and then hands
the msg off to the general on_msg handler, which will update the
Stream(s) attached to the callback.
"""
def __init__(self, plot, streams, source, **params):
super(ServerCallback, self).__init__(plot, streams, source, **params)
self._active = False
@classmethod
def resolve_attr_spec(cls, spec, cb_obj, model=None):
"""
Resolves a Callback attribute specification looking the
corresponding attribute up on the cb_obj, which should be a
bokeh model. If not model is supplied cb_obj is assumed to
be the same as the model.
"""
if not cb_obj:
raise Exception('Bokeh plot attribute %s could not be found' % spec)
if model is None:
model = cb_obj
spec = spec.split('.')
resolved = cb_obj
for p in spec[1:]:
if p == 'attributes':
continue
if isinstance(resolved, dict):
resolved = resolved.get(p)
else:
resolved = getattr(resolved, p, None)
return {'id': model.ref['id'], 'value': resolved}
def on_change(self, attr, old, new):
"""
Process change events adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
"""
self._queue.append((attr, old, new))
if not self._active:
self.plot.document.add_timeout_callback(self.process_on_change, 50)
self._active = True
def on_event(self, event):
"""
Process bokeh UIEvents adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
"""
self._queue.append((event))
if not self._active:
self.plot.document.add_timeout_callback(self.process_on_event, 50)
self._active = True
def process_on_event(self):
"""
Trigger callback change event and triggering corresponding streams.
"""
if not self._queue:
self._active = False
return
self._queue = []
# Get unique event types in the queue
events = list(OrderedDict([(event.event_name, event)
for event in self._queue]).values())
self._queue = []
# Process event types
for event in events:
msg = {}
for attr, path in self.attributes.items():
model_obj = self.plot_handles.get(self.models[0])
msg[attr] = self.resolve_attr_spec(path, event, model_obj)
self.on_msg(msg)
self.plot.document.add_timeout_callback(self.process_on_event, 50)
def process_on_change(self):
if not self._queue:
self._active = False
return
self._queue = []
msg = {}
for attr, path in self.attributes.items():
attr_path = path.split('.')
if attr_path[0] == 'cb_obj':
obj_handle = self.models[0]
path = '.'.join(self.models[:1]+attr_path[1:])
else:
obj_handle = attr_path[0]
cb_obj = self.plot_handles.get(obj_handle)
msg[attr] = self.resolve_attr_spec(path, cb_obj)
self.on_msg(msg)
self.plot.document.add_timeout_callback(self.process_on_change, 50)
def set_server_callback(self, handle):
"""
Set up on_change events for bokeh server interactions.
"""
if self.on_events:
for event in self.on_events:
handle.on_event(event, self.on_event)
elif self.on_changes:
for change in self.on_changes:
handle.on_change(change, self.on_change)
class Callback(CustomJSCallback, ServerCallback):
"""
Provides a baseclass to define callbacks, which return data from
bokeh model callbacks, events and attribute changes. The callback
then makes this data available to any streams attached to it.
The definition of a callback consists of a number of components:
* models : Defines which bokeh models the callback will be
attached on referencing the model by its key in
the plots handles, e.g. this could be the x_range,
y_range, plot, a plotting tool or any other
bokeh mode.
* extra_models: Any additional models available in handles which
should be made available in the namespace of the
objects, e.g. to make a tool available to skip
checks.
* attributes : The attributes define which attributes to send
back to Python. They are defined as a dictionary
mapping between the name under which the variable
is made available to Python and the specification
of the attribute. The specification should start
with the variable name that is to be accessed and
the location of the attribute separated by
periods. All models defined by the models and
extra_models attributes can be addressed in this
way, e.g. to get the start of the x_range as 'x'
you can supply {'x': 'x_range.attributes.start'}.
Additionally certain handles additionally make the
cb_data and cb_obj variables available containing
additional information about the event.
* skip : Conditions when the Callback should be skipped
specified as a list of valid JS expressions, which
can reference models requested by the callback,
e.g. ['pan.attributes.active'] would skip the
callback if the pan tool is active.
* code : Defines any additional JS code to be executed,
which can modify the data object that is sent to
the backend.
* on_events : If the Callback should listen to bokeh events this
should declare the types of event as a list (optional)
* on_changes : If the Callback should listen to model attribute
changes on the defined ``models`` (optional)
If either on_events or on_changes are declared the Callback will
be registered using the on_event or on_change machinery, otherwise
it will be treated as a regular callback on the model. The
callback can also define a _process_msg method, which can modify
the data sent by the callback before it is passed to the streams.
"""
def initialize(self):
handles = self._init_plot_handles()
for handle_name in self.models:
if handle_name not in handles:
warn_args = (handle_name, type(self.plot).__name__,
type(self).__name__)
print('%s handle not found on %s, cannot '
'attach %s callback' % warn_args)
continue
handle = handles[handle_name]
# Hash the plot handle with Callback type allowing multiple
# callbacks on one handle to be merged
cb_hash = (id(handle), id(type(self)))
if cb_hash in self._callbacks:
# Merge callbacks if another callback has already been attached
cb = self._callbacks[cb_hash]
cb.streams = list(set(cb.streams+self.streams))
for k, v in self.handle_ids.items():
cb.handle_ids[k].update(v)
continue
if self.plot.renderer.mode == 'server':
self.set_server_callback(handle)
else:
js_callback = self.get_customjs(handles)
self.set_customjs_callback(js_callback, handle)
self.callbacks.append(js_callback)
self._callbacks[cb_hash] = self
class PointerXYCallback(Callback):
"""
Returns the mouse x/y-position on mousemove event.
"""
attributes = {'x': 'cb_obj.x', 'y': 'cb_obj.y'}
models = ['plot']
extra_models= ['x_range', 'y_range']
on_events = ['mousemove']
# Clip x and y values to available axis range
code = """
if (x_range.type.endsWith('Range1d')) {
if (cb_obj.x < x_range.start) {
data['x'] = x_range.start }
else if (cb_obj.x > x_range.end) {
data['x'] = x_range.end }}
if (y_range.type.endsWith('Range1d')) {
if (cb_obj.y < y_range.start) {
data['y'] = y_range.start }
else if (cb_obj.y > y_range.end) {
data['y'] = y_range.end }}
"""
def _process_msg(self, msg):
x_range = self.plot.handles.get('x_range')
y_range = self.plot.handles.get('y_range')
if isinstance(x_range, FactorRange) and isinstance(msg.get('x'), (int, float)):
msg['x'] = x_range.factors[int(msg['x'])]
if isinstance(y_range, FactorRange) and isinstance(msg.get('y'), (int, float)):
msg['y'] = y_range.factors[int(msg['y'])]
xaxis = self.plot.handles.get('xaxis')
yaxis = self.plot.handles.get('yaxis')
if 'x' in msg and isinstance(xaxis, DatetimeAxis):
msg['x'] = convert_timestamp(msg['x'])
if 'y' in msg and isinstance(yaxis, DatetimeAxis):
msg['y'] = convert_timestamp(msg['y'])
return msg
class PointerXCallback(PointerXYCallback):
"""
Returns the mouse x-position on mousemove event.
"""
attributes = {'x': 'cb_obj.x'}
extra_models= ['x_range']
code = """
if (x_range.type.endsWith('Range1d')) {
if (cb_obj.x < x_range.start) {
data['x'] = x_range.start }
else if (cb_obj.x > x_range.end) {
data['x'] = x_range.end }}
"""
class PointerYCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on mousemove event.
"""
attributes = {'y': 'cb_obj.y'}
extra_models= ['y_range']
code = """
if (y_range.type.endsWith('Range1d')) {
if (cb_obj.y < y_range.start) {
data['y'] = y_range.start }
else if (cb_obj.y > y_range.end) {
data['y'] = y_range.end }}
"""
class DrawCallback(PointerXYCallback):
on_events = ['pan', 'panstart', 'panend']
models = ['plot']
extra_models=['pan', 'box_zoom', 'x_range', 'y_range']
skip = ['pan && pan.attributes.active', 'box_zoom && box_zoom.attributes.active']
attributes = {'x': 'cb_obj.x', 'y': 'cb_obj.y', 'event': 'cb_obj.event_name'}
def __init__(self, *args, **kwargs):
self.stroke_count = 0
super(DrawCallback, self).__init__(*args, **kwargs)
def _process_msg(self, msg):
event = msg.pop('event')
if event == 'panend':
self.stroke_count += 1
return dict(msg, stroke_count=self.stroke_count)
class TapCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on tap event.
Note: As of bokeh 0.12.5, there is no way to distinguish the
individual tap events within a doubletap event.
"""
on_events = ['tap', 'doubletap']
class SingleTapCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on tap event.
"""
on_events = ['tap']
class DoubleTapCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on doubletap event.
"""
on_events = ['doubletap']
class MouseEnterCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on mouseenter event, i.e. when
mouse enters the plot canvas.
"""
on_events = ['mouseenter']
class MouseLeaveCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on mouseleave event, i.e. when
mouse leaves the plot canvas.
"""
on_events = ['mouseleave']
class RangeXYCallback(Callback):
"""
Returns the x/y-axis ranges of a plot.
"""
attributes = {'x0': 'x_range.attributes.start',
'x1': 'x_range.attributes.end',
'y0': 'y_range.attributes.start',
'y1': 'y_range.attributes.end'}
models = ['x_range', 'y_range']
on_changes = ['start', 'end']
def _process_msg(self, msg):
data = {}
if 'x0' in msg and 'x1' in msg:
x0, x1 = msg['x0'], msg['x1']
if isinstance(self.plot.handles.get('xaxis'), DatetimeAxis):
x0 = convert_timestamp(x0)
x1 = convert_timestamp(x1)
if self.plot.invert_xaxis:
x0, x1 = x1, x0
data['x_range'] = (x0, x1)
if 'y0' in msg and 'y1' in msg:
y0, y1 = msg['y0'], msg['y1']
if isinstance(self.plot.handles.get('yaxis'), DatetimeAxis):
y0 = convert_timestamp(y0)
y1 = convert_timestamp(y1)
if self.plot.invert_yaxis:
y0, y1 = y1, y0
data['y_range'] = (y0, y1)
return data
class RangeXCallback(RangeXYCallback):
"""
Returns the x-axis range of a plot.
"""
attributes = {'x0': 'x_range.attributes.start',
'x1': 'x_range.attributes.end'}
models = ['x_range']
class RangeYCallback(RangeXYCallback):
"""
Returns the y-axis range of a plot.
"""
attributes = {'y0': 'y_range.attributes.start',
'y1': 'y_range.attributes.end'}
models = ['y_range']
class PlotSizeCallback(Callback):
"""
Returns the actual width and height of a plot once the layout
solver has executed.
"""
models = ['plot']
attributes = {'width': 'cb_obj.inner_width',
'height': 'cb_obj.inner_height'}
on_changes = ['inner_width', 'inner_height']
def _process_msg(self, msg):
if msg.get('width') and msg.get('height'):
return msg
else:
return {}
class BoundsCallback(Callback):
"""
Returns the bounds of a box_select tool.
"""
attributes = {'x0': 'cb_obj.geometry.x0',
'x1': 'cb_obj.geometry.x1',
'y0': 'cb_obj.geometry.y0',
'y1': 'cb_obj.geometry.y1'}
models = ['plot']
extra_models = ['box_select']
on_events = ['selectiongeometry']
skip = ["cb_obj.geometry.type != 'rect'"]
def _process_msg(self, msg):
if all(c in msg for c in ['x0', 'y0', 'x1', 'y1']):
if isinstance(self.plot.handles.get('xaxis'), DatetimeAxis):
msg['x0'] = convert_timestamp(msg['x0'])
msg['x1'] = convert_timestamp(msg['x1'])
if isinstance(self.plot.handles.get('yaxis'), DatetimeAxis):
msg['y0'] = convert_timestamp(msg['y0'])
msg['y1'] = convert_timestamp(msg['y1'])
return {'bounds': (msg['x0'], msg['y0'], msg['x1'], msg['y1'])}
else:
return {}
class BoundsXCallback(Callback):
"""
Returns the bounds of a xbox_select tool.
"""
attributes = {'x0': 'cb_obj.geometry.x0', 'x1': 'cb_obj.geometry.x1'}
models = ['plot']
extra_models = ['xbox_select']
on_events = ['selectiongeometry']
skip = ["cb_obj.geometry.type != 'rect'"]
def _process_msg(self, msg):
if all(c in msg for c in ['x0', 'x1']):
if isinstance(self.plot.handles.get('xaxis'), DatetimeAxis):
msg['x0'] = convert_timestamp(msg['x0'])
msg['x1'] = convert_timestamp(msg['x1'])
return {'boundsx': (msg['x0'], msg['x1'])}
else:
return {}
class BoundsYCallback(Callback):
"""
Returns the bounds of a ybox_select tool.
"""
attributes = {'y0': 'cb_obj.geometry.y0', 'y1': 'cb_obj.geometry.y1'}
models = ['plot']
extra_models = ['ybox_select']
on_events = ['selectiongeometry']
skip = ["cb_obj.geometry.type != 'rect'"]
def _process_msg(self, msg):
if all(c in msg for c in ['y0', 'y1']):
if isinstance(self.plot.handles.get('yaxis'), DatetimeAxis):
msg['y0'] = convert_timestamp(msg['y0'])
msg['y1'] = convert_timestamp(msg['y1'])
return {'boundsy': (msg['y0'], msg['y1'])}
else:
return {}
class Selection1DCallback(Callback):
"""
Returns the current selection on a ColumnDataSource.
"""
attributes = {'index': 'cb_obj.selected.1d.indices'}
models = ['source']
on_changes = ['selected']
def _process_msg(self, msg):
if 'index' in msg:
return {'index': [int(v) for v in msg['index']]}
else:
return {}
class ResetCallback(Callback):
"""
Signals the Reset stream if an event has been triggered.
"""
models = ['plot']
on_events = ['reset']
def _process_msg(self, msg):
return {'reset': True}
callbacks = Stream._callbacks['bokeh']
callbacks[PointerXY] = PointerXYCallback
callbacks[PointerX] = PointerXCallback
callbacks[PointerY] = PointerYCallback
callbacks[Tap] = TapCallback
callbacks[SingleTap] = SingleTapCallback
callbacks[DoubleTap] = DoubleTapCallback
callbacks[MouseEnter] = MouseEnterCallback
callbacks[MouseLeave] = MouseLeaveCallback
callbacks[RangeXY] = RangeXYCallback
callbacks[RangeX] = RangeXCallback
callbacks[RangeY] = RangeYCallback
callbacks[Bounds] = BoundsCallback
callbacks[BoundsXY] = BoundsCallback
callbacks[BoundsX] = BoundsXCallback
callbacks[BoundsY] = BoundsYCallback
callbacks[Selection1D] = Selection1DCallback
callbacks[PlotSize] = PlotSizeCallback
callbacks[Draw] = DrawCallback
callbacks[PlotReset] = ResetCallback
# Aliases for deprecated streams
callbacks[PositionXY] = PointerXYCallback
callbacks[PositionX] = PointerXCallback
callbacks[PositionY] = PointerYCallback
| 1 | 19,704 | Not evident from looking at this diff but the queue is already being cleared four lines below. | holoviz-holoviews | py |
@@ -293,6 +293,16 @@ func (pool *TransactionPool) computeFeePerByte() uint64 {
// checkSufficientFee take a set of signed transactions and verifies that each transaction has
// sufficient fee to get into the transaction pool
func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error {
+ // Special case: the compact cert transaction, if issued from the
+ // special compact-cert-sender address, in a singleton group, pays
+ // no fee.
+ if len(txgroup) == 1 {
+ t := txgroup[0].Txn
+ if t.Type == protocol.CompactCertTx && t.Sender == transactions.CompactCertSender && t.Fee.IsZero() {
+ return nil
+ }
+ }
+
// get the current fee per byte
feePerByte := pool.computeFeePerByte()
| 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package pools
import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/condvar"
)
// A TransactionPool prepares valid blocks for proposal and caches
// validated transaction groups.
//
// At all times, a TransactionPool maintains a queue of transaction
// groups slated for proposal. TransactionPool.Remember adds a
// properly-signed and well-formed transaction group to this queue
// only if its fees are sufficiently high and its state changes are
// consistent with the prior transactions in the queue.
//
// TransactionPool.AssembleBlock constructs a valid block for
// proposal given a deadline.
type TransactionPool struct {
// feePerByte is stored at the begining of this struct to ensure it has a 64 bit aligned address. This is needed as it's being used
// with atomic operations which require 64 bit alignment on arm.
feePerByte uint64
// const
logProcessBlockStats bool
logAssembleStats bool
expFeeFactor uint64
txPoolMaxSize int
ledger *ledger.Ledger
mu deadlock.Mutex
cond sync.Cond
expiredTxCount map[basics.Round]int
pendingBlockEvaluator *ledger.BlockEvaluator
numPendingWholeBlocks basics.Round
feeThresholdMultiplier uint64
statusCache *statusCache
assemblyMu deadlock.Mutex
assemblyCond sync.Cond
assemblyDeadline time.Time
// assemblyRound indicates which round number we're currently waiting for or waited for last.
assemblyRound basics.Round
assemblyResults poolAsmResults
// pendingMu protects pendingTxGroups and pendingTxids
pendingMu deadlock.RWMutex
pendingTxGroups [][]transactions.SignedTxn
pendingVerifyParams [][]verify.Params
pendingTxids map[transactions.Txid]txPoolVerifyCacheVal
// Calls to remember() add transactions to rememberedTxGroups and
// rememberedTxids. Calling rememberCommit() adds them to the
// pendingTxGroups and pendingTxids. This allows us to batch the
// changes in OnNewBlock() without preventing a concurrent call
// to Pending() or Verified().
rememberedTxGroups [][]transactions.SignedTxn
rememberedVerifyParams [][]verify.Params
rememberedTxids map[transactions.Txid]txPoolVerifyCacheVal
}
// MakeTransactionPool makes a transaction pool.
func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local) *TransactionPool {
if cfg.TxPoolExponentialIncreaseFactor < 1 {
cfg.TxPoolExponentialIncreaseFactor = 1
}
pool := TransactionPool{
pendingTxids: make(map[transactions.Txid]txPoolVerifyCacheVal),
rememberedTxids: make(map[transactions.Txid]txPoolVerifyCacheVal),
expiredTxCount: make(map[basics.Round]int),
ledger: ledger,
statusCache: makeStatusCache(cfg.TxPoolSize),
logProcessBlockStats: cfg.EnableProcessBlockStats,
logAssembleStats: cfg.EnableAssembleStats,
expFeeFactor: cfg.TxPoolExponentialIncreaseFactor,
txPoolMaxSize: cfg.TxPoolSize,
}
pool.cond.L = &pool.mu
pool.assemblyCond.L = &pool.assemblyMu
pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round))
return &pool
}
type txPoolVerifyCacheVal struct {
txn transactions.SignedTxn
params verify.Params
}
type poolAsmResults struct {
ok bool
blk *ledger.ValidatedBlock
stats telemetryspec.AssembleBlockMetrics
err error
// roundStartedEvaluating is the round which we were attempted to evaluate last. It's a good measure for
// which round we started evaluating, but not a measure to whether the evaluation is complete.
roundStartedEvaluating basics.Round
}
// TODO I moved this number to be a constant in the module, we should consider putting it in the local config
const expiredHistory = 10
// timeoutOnNewBlock determines how long Test() and Remember() wait for
// OnNewBlock() to process a new block that appears to be in the ledger.
const timeoutOnNewBlock = time.Second
// assemblyWaitEps is the extra time AssembleBlock() waits past the
// deadline before giving up.
const assemblyWaitEps = 150 * time.Millisecond
// ErrStaleBlockAssemblyRequest returned by AssembleBlock when requested block number is older than the current transaction pool round
// i.e. typically it means that we're trying to make a proposal for an older round than what the ledger is currently pointing at.
var ErrStaleBlockAssemblyRequest = fmt.Errorf("AssembleBlock: requested block assembly specified a round that is older than current transaction pool round")
// Reset resets the content of the transaction pool
func (pool *TransactionPool) Reset() {
pool.pendingTxids = make(map[transactions.Txid]txPoolVerifyCacheVal)
pool.pendingVerifyParams = nil
pool.pendingTxGroups = nil
pool.rememberedTxids = make(map[transactions.Txid]txPoolVerifyCacheVal)
pool.rememberedVerifyParams = nil
pool.rememberedTxGroups = nil
pool.expiredTxCount = make(map[basics.Round]int)
pool.numPendingWholeBlocks = 0
pool.pendingBlockEvaluator = nil
pool.statusCache.reset()
pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round))
}
// NumExpired returns the number of transactions that expired at the
// end of a round (only meaningful if cleanup has been called for that
// round).
func (pool *TransactionPool) NumExpired(round basics.Round) int {
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.expiredTxCount[round]
}
// PendingTxIDs return the IDs of all pending transactions.
func (pool *TransactionPool) PendingTxIDs() []transactions.Txid {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
ids := make([]transactions.Txid, len(pool.pendingTxids))
i := 0
for txid := range pool.pendingTxids {
ids[i] = txid
i++
}
return ids
}
// Pending returns a list of transaction groups that should be proposed
// in the next block, in order.
func (pool *TransactionPool) Pending() [][]transactions.SignedTxn {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
// note that this operation is safe for the sole reason that arrays in go are immutable.
// if the underlaying array need to be expanded, the actual underlaying array would need
// to be reallocated.
return pool.pendingTxGroups
}
// rememberCommit() saves the changes added by remember to
// pendingTxGroups and pendingTxids. The caller is assumed to
// be holding pool.mu. flush indicates whether previous
// pendingTxGroups and pendingTxids should be flushed out and
// replaced altogether by rememberedTxGroups and rememberedTxids.
func (pool *TransactionPool) rememberCommit(flush bool) {
pool.pendingMu.Lock()
defer pool.pendingMu.Unlock()
if flush {
pool.pendingTxGroups = pool.rememberedTxGroups
pool.pendingVerifyParams = pool.rememberedVerifyParams
pool.pendingTxids = pool.rememberedTxids
} else {
pool.pendingTxGroups = append(pool.pendingTxGroups, pool.rememberedTxGroups...)
pool.pendingVerifyParams = append(pool.pendingVerifyParams, pool.rememberedVerifyParams...)
for txid, txn := range pool.rememberedTxids {
pool.pendingTxids[txid] = txn
}
}
pool.rememberedTxGroups = nil
pool.rememberedVerifyParams = nil
pool.rememberedTxids = make(map[transactions.Txid]txPoolVerifyCacheVal)
}
// PendingCount returns the number of transactions currently pending in the pool.
func (pool *TransactionPool) PendingCount() int {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
return pool.pendingCountNoLock()
}
// pendingCountNoLock is a helper for PendingCount that returns the number of
// transactions pending in the pool
func (pool *TransactionPool) pendingCountNoLock() int {
var count int
for _, txgroup := range pool.pendingTxGroups {
count += len(txgroup)
}
return count
}
// checkPendingQueueSize test to see if there is more room in the pending
// group transaction list. As long as we haven't surpassed the size limit, we
// should be good to go.
func (pool *TransactionPool) checkPendingQueueSize() error {
pendingSize := len(pool.Pending())
if pendingSize >= pool.txPoolMaxSize {
return fmt.Errorf("TransactionPool.Test: transaction pool have reached capacity")
}
return nil
}
// FeePerByte returns the current minimum microalgos per byte a transaction
// needs to pay in order to get into the pool.
func (pool *TransactionPool) FeePerByte() uint64 {
return atomic.LoadUint64(&pool.feePerByte)
}
// computeFeePerByte computes and returns the current minimum microalgos per byte a transaction
// needs to pay in order to get into the pool. It also updates the atomic counter that holds
// the current fee per byte
func (pool *TransactionPool) computeFeePerByte() uint64 {
// The baseline threshold fee per byte is 1, the smallest fee we can
// represent. This amounts to a fee of 100 for a 100-byte txn, which
// is well below MinTxnFee (1000). This means that, when the pool
// is not under load, the total MinFee dominates for small txns,
// but once the pool comes under load, the fee-per-byte will quickly
// come to dominate.
feePerByte := uint64(1)
// The threshold is multiplied by the feeThresholdMultiplier that
// tracks the load on the transaction pool over time. If the pool
// is mostly idle, feeThresholdMultiplier will be 0, and all txns
// are accepted (assuming the BlockEvaluator approves them, which
// requires a flat MinTxnFee).
feePerByte = feePerByte * pool.feeThresholdMultiplier
// The feePerByte should be bumped to 1 to make the exponentially
// threshold growing valid.
if feePerByte == 0 && pool.numPendingWholeBlocks > 1 {
feePerByte = uint64(1)
}
// The threshold grows exponentially if there are multiple blocks
// pending in the pool.
// golang has no convenient integer exponentiation, so we just
// do this in a loop
for i := 0; i < int(pool.numPendingWholeBlocks)-1; i++ {
feePerByte *= pool.expFeeFactor
}
// Update the counter for fast reads
atomic.StoreUint64(&pool.feePerByte, feePerByte)
return feePerByte
}
// checkSufficientFee take a set of signed transactions and verifies that each transaction has
// sufficient fee to get into the transaction pool
func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error {
// get the current fee per byte
feePerByte := pool.computeFeePerByte()
for _, t := range txgroup {
feeThreshold := feePerByte * uint64(t.GetEncodedLength())
if t.Txn.Fee.Raw < feeThreshold {
return fmt.Errorf("fee %d below threshold %d (%d per byte * %d bytes)",
t.Txn.Fee, feeThreshold, feePerByte, t.GetEncodedLength())
}
}
return nil
}
// Test performs basic duplicate detection and well-formedness checks
// on a transaction group without storing the group.
func (pool *TransactionPool) Test(txgroup []transactions.SignedTxn) error {
if err := pool.checkPendingQueueSize(); err != nil {
return err
}
pool.mu.Lock()
defer pool.mu.Unlock()
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("Test: pendingBlockEvaluator is nil")
}
return pool.pendingBlockEvaluator.TestTransactionGroup(txgroup)
}
type poolIngestParams struct {
recomputing bool // if unset, perform fee checks and wait until ledger is caught up
stats *telemetryspec.AssembleBlockMetrics
}
// remember attempts to add a transaction group to the pool.
func (pool *TransactionPool) remember(txgroup []transactions.SignedTxn, verifyParams []verify.Params) error {
params := poolIngestParams{
recomputing: false,
}
return pool.ingest(txgroup, verifyParams, params)
}
// add tries to add the transaction group to the pool, bypassing the fee
// priority checks.
func (pool *TransactionPool) add(txgroup []transactions.SignedTxn, verifyParams []verify.Params, stats *telemetryspec.AssembleBlockMetrics) error {
params := poolIngestParams{
recomputing: true,
stats: stats,
}
return pool.ingest(txgroup, verifyParams, params)
}
// ingest checks whether a transaction group could be remembered in the pool,
// and stores this transaction if valid.
//
// ingest assumes that pool.mu is locked. It might release the lock
// while it waits for OnNewBlock() to be called.
func (pool *TransactionPool) ingest(txgroup []transactions.SignedTxn, verifyParams []verify.Params, params poolIngestParams) error {
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("TransactionPool.ingest: no pending block evaluator")
}
if !params.recomputing {
// Make sure that the latest block has been processed by OnNewBlock().
// If not, we might be in a race, so wait a little bit for OnNewBlock()
// to catch up to the ledger.
latest := pool.ledger.Latest()
waitExpires := time.Now().Add(timeoutOnNewBlock)
for pool.pendingBlockEvaluator.Round() <= latest && time.Now().Before(waitExpires) {
condvar.TimedWait(&pool.cond, timeoutOnNewBlock)
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("TransactionPool.ingest: no pending block evaluator")
}
}
err := pool.checkSufficientFee(txgroup)
if err != nil {
return err
}
}
err := pool.addToPendingBlockEvaluator(txgroup, params.recomputing, params.stats)
if err != nil {
return err
}
pool.rememberedTxGroups = append(pool.rememberedTxGroups, txgroup)
pool.rememberedVerifyParams = append(pool.rememberedVerifyParams, verifyParams)
for i, t := range txgroup {
pool.rememberedTxids[t.ID()] = txPoolVerifyCacheVal{txn: t, params: verifyParams[i]}
}
return nil
}
// RememberOne stores the provided transaction.
// Precondition: Only RememberOne() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
func (pool *TransactionPool) RememberOne(t transactions.SignedTxn, verifyParams verify.Params) error {
return pool.Remember([]transactions.SignedTxn{t}, []verify.Params{verifyParams})
}
// Remember stores the provided transaction group.
// Precondition: Only Remember() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
func (pool *TransactionPool) Remember(txgroup []transactions.SignedTxn, verifyParams []verify.Params) error {
if err := pool.checkPendingQueueSize(); err != nil {
return err
}
pool.mu.Lock()
defer pool.mu.Unlock()
err := pool.remember(txgroup, verifyParams)
if err != nil {
return fmt.Errorf("TransactionPool.Remember: %v", err)
}
pool.rememberCommit(false)
return nil
}
// Lookup returns the error associated with a transaction that used
// to be in the pool. If no status information is available (e.g., because
// it was too long ago, or the transaction committed successfully), then
// found is false. If the transaction is still in the pool, txErr is empty.
func (pool *TransactionPool) Lookup(txid transactions.Txid) (tx transactions.SignedTxn, txErr string, found bool) {
if pool == nil {
return transactions.SignedTxn{}, "", false
}
pool.mu.Lock()
defer pool.mu.Unlock()
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
cacheval, inPool := pool.pendingTxids[txid]
tx = cacheval.txn
if inPool {
return tx, "", true
}
return pool.statusCache.check(txid)
}
// Verified returns whether a given SignedTxn is already in the
// pool, and, since only verified transactions should be added
// to the pool, whether that transaction is verified (i.e., Verify
// returned success). This is used as an optimization to avoid
// re-checking signatures on transactions that we have already
// verified.
func (pool *TransactionPool) Verified(txn transactions.SignedTxn, params verify.Params) bool {
if pool == nil {
return false
}
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
cacheval, ok := pool.pendingTxids[txn.ID()]
if !ok {
return false
}
if cacheval.params != params {
return false
}
pendingSigTxn := cacheval.txn
return pendingSigTxn.Sig == txn.Sig && pendingSigTxn.Msig.Equal(txn.Msig) && pendingSigTxn.Lsig.Equal(&txn.Lsig) && (pendingSigTxn.AuthAddr == txn.AuthAddr)
}
// OnNewBlock excises transactions from the pool that are included in the specified Block or if they've expired
func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledger.StateDelta) {
var stats telemetryspec.ProcessBlockMetrics
var knownCommitted uint
var unknownCommitted uint
commitedTxids := delta.Txids
if pool.logProcessBlockStats {
pool.pendingMu.RLock()
for txid := range commitedTxids {
if _, ok := pool.pendingTxids[txid]; ok {
knownCommitted++
} else {
unknownCommitted++
}
}
pool.pendingMu.RUnlock()
}
pool.mu.Lock()
defer pool.mu.Unlock()
defer pool.cond.Broadcast()
if pool.pendingBlockEvaluator == nil || block.Round() >= pool.pendingBlockEvaluator.Round() {
// Adjust the pool fee threshold. The rules are:
// - If there was less than one full block in the pool, reduce
// the multiplier by 2x. It will eventually go to 0, so that
// only the flat MinTxnFee matters if the pool is idle.
// - If there were less than two full blocks in the pool, keep
// the multiplier as-is.
// - If there were two or more full blocks in the pool, grow
// the multiplier by 2x (or increment by 1, if 0).
switch pool.numPendingWholeBlocks {
case 0:
pool.feeThresholdMultiplier = pool.feeThresholdMultiplier / pool.expFeeFactor
case 1:
// Keep the fee multiplier the same.
default:
if pool.feeThresholdMultiplier == 0 {
pool.feeThresholdMultiplier = 1
} else {
pool.feeThresholdMultiplier = pool.feeThresholdMultiplier * pool.expFeeFactor
}
}
// Recompute the pool by starting from the new latest block.
// This has the side-effect of discarding transactions that
// have been committed (or that are otherwise no longer valid).
stats = pool.recomputeBlockEvaluator(commitedTxids)
}
stats.KnownCommittedCount = knownCommitted
stats.UnknownCommittedCount = unknownCommitted
proto := config.Consensus[block.CurrentProtocol]
pool.expiredTxCount[block.Round()] = int(stats.ExpiredCount)
delete(pool.expiredTxCount, block.Round()-expiredHistory*basics.Round(proto.MaxTxnLife))
if pool.logProcessBlockStats {
var details struct {
Round uint64
}
details.Round = uint64(block.Round())
logging.Base().Metrics(telemetryspec.Transaction, stats, details)
}
}
func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactions.SignedTxn, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
r := pool.pendingBlockEvaluator.Round() + pool.numPendingWholeBlocks
for _, tx := range txgroup {
if tx.Txn.LastValid < r {
return transactions.TxnDeadError{
Round: r,
FirstValid: tx.Txn.FirstValid,
LastValid: tx.Txn.LastValid,
}
}
}
txgroupad := make([]transactions.SignedTxnWithAD, len(txgroup))
for i, tx := range txgroup {
txgroupad[i].SignedTxn = tx
}
err := pool.pendingBlockEvaluator.TransactionGroup(txgroupad)
if recomputing {
pool.assemblyMu.Lock()
defer pool.assemblyMu.Unlock()
if !pool.assemblyResults.ok {
if (err == ledger.ErrNoSpace || (pool.assemblyDeadline != time.Time{} && time.Now().After(pool.assemblyDeadline))) && (pool.assemblyRound <= pool.pendingBlockEvaluator.Round()) {
pool.assemblyResults.ok = true
if err == ledger.ErrNoSpace {
stats.StopReason = telemetryspec.AssembleBlockFull
} else {
stats.StopReason = telemetryspec.AssembleBlockTimeout
}
pool.assemblyResults.stats = *stats
lvb, gerr := pool.pendingBlockEvaluator.GenerateBlock()
if gerr != nil {
pool.assemblyResults.err = fmt.Errorf("could not generate block for %d: %v", pool.assemblyResults.roundStartedEvaluating, gerr)
} else {
pool.assemblyResults.blk = lvb
}
pool.assemblyCond.Broadcast()
}
}
}
return err
}
func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.SignedTxn, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
err := pool.addToPendingBlockEvaluatorOnce(txgroup, recomputing, stats)
if err == ledger.ErrNoSpace {
pool.numPendingWholeBlocks++
pool.pendingBlockEvaluator.ResetTxnBytes()
err = pool.addToPendingBlockEvaluatorOnce(txgroup, recomputing, stats)
}
return err
}
// recomputeBlockEvaluator constructs a new BlockEvaluator and feeds all
// in-pool transactions to it (removing any transactions that are rejected
// by the BlockEvaluator). Expects that the pool.mu mutex would be already taken.
func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transactions.Txid]basics.Round) (stats telemetryspec.ProcessBlockMetrics) {
pool.pendingBlockEvaluator = nil
latest := pool.ledger.Latest()
prev, err := pool.ledger.BlockHdr(latest)
if err != nil {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot get prev header for %d: %v",
latest, err)
return
}
// Process upgrade to see if we support the next protocol version
_, upgradeState, err := bookkeeping.ProcessUpgradeParams(prev)
if err != nil {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: error processing upgrade params for next round: %v", err)
return
}
// Ensure we know about the next protocol version (MakeBlock will panic
// if we don't, and we would rather stall locally than panic)
_, ok := config.Consensus[upgradeState.CurrentProtocol]
if !ok {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: next protocol version %v is not supported", upgradeState.CurrentProtocol)
return
}
// Grab the transactions to be played through the new block evaluator
pool.pendingMu.RLock()
txgroups := pool.pendingTxGroups
verifyParams := pool.pendingVerifyParams
pendingCount := pool.pendingCountNoLock()
pool.pendingMu.RUnlock()
pool.assemblyMu.Lock()
pool.assemblyResults = poolAsmResults{
roundStartedEvaluating: prev.Round + basics.Round(1),
}
pool.assemblyMu.Unlock()
next := bookkeeping.MakeBlock(prev)
pool.numPendingWholeBlocks = 0
pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, pendingCount)
if err != nil {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err)
return
}
var asmStats telemetryspec.AssembleBlockMetrics
asmStats.StartCount = len(txgroups)
asmStats.StopReason = telemetryspec.AssembleBlockEmpty
// Feed the transactions in order
for i, txgroup := range txgroups {
if len(txgroup) == 0 {
asmStats.InvalidCount++
continue
}
if _, alreadyCommitted := committedTxIds[txgroup[0].ID()]; alreadyCommitted {
asmStats.EarlyCommittedCount++
continue
}
err := pool.add(txgroup, verifyParams[i], &asmStats)
if err != nil {
for _, tx := range txgroup {
pool.statusCache.put(tx, err.Error())
}
switch err.(type) {
case ledger.TransactionInLedgerError:
asmStats.CommittedCount++
stats.RemovedInvalidCount++
case transactions.TxnDeadError:
asmStats.InvalidCount++
stats.ExpiredCount++
case transactions.MinFeeError:
asmStats.InvalidCount++
stats.RemovedInvalidCount++
logging.Base().Infof("Cannot re-add pending transaction to pool: %v", err)
default:
asmStats.InvalidCount++
stats.RemovedInvalidCount++
logging.Base().Warnf("Cannot re-add pending transaction to pool: %v", err)
}
}
}
pool.assemblyMu.Lock()
if !pool.assemblyResults.ok && pool.assemblyRound <= pool.pendingBlockEvaluator.Round() {
pool.assemblyResults.ok = true
pool.assemblyResults.stats = asmStats
lvb, err := pool.pendingBlockEvaluator.GenerateBlock()
if err != nil {
pool.assemblyResults.err = fmt.Errorf("could not generate block for %d (end): %v", pool.assemblyResults.roundStartedEvaluating, err)
} else {
pool.assemblyResults.blk = lvb
}
pool.assemblyCond.Broadcast()
}
pool.assemblyMu.Unlock()
pool.rememberCommit(true)
return
}
// AssembleBlock assembles a block for a given round, trying not to
// take longer than deadline to finish.
func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Time) (assembled *ledger.ValidatedBlock, err error) {
var stats telemetryspec.AssembleBlockMetrics
if pool.logAssembleStats {
start := time.Now()
defer func() {
if err != nil {
return
}
// Measure time here because we want to know how close to deadline we are
dt := time.Now().Sub(start)
stats.Nanoseconds = dt.Nanoseconds()
payset := assembled.Block().Payset
if len(payset) != 0 {
totalFees := uint64(0)
for i, txib := range payset {
fee := txib.Txn.Fee.Raw
encodedLen := len(protocol.Encode(&txib))
stats.IncludedCount++
totalFees += fee
if i == 0 {
stats.MinFee = fee
stats.MaxFee = fee
stats.MinLength = encodedLen
stats.MaxLength = encodedLen
} else {
if fee < stats.MinFee {
stats.MinFee = fee
} else if fee > stats.MaxFee {
stats.MaxFee = fee
}
if encodedLen < stats.MinLength {
stats.MinLength = encodedLen
} else if encodedLen > stats.MaxLength {
stats.MaxLength = encodedLen
}
}
stats.TotalLength += uint64(encodedLen)
}
stats.AverageFee = totalFees / uint64(stats.IncludedCount)
}
var details struct {
Round uint64
}
details.Round = uint64(round)
logging.Base().Metrics(telemetryspec.Transaction, stats, details)
}()
}
pool.assemblyMu.Lock()
// if the transaction pool is more than two rounds behind, we don't want to wait.
if pool.assemblyResults.roundStartedEvaluating <= round.SubSaturate(2) {
logging.Base().Infof("AssembleBlock: requested round is more than a single round ahead of the transaction pool %d <= %d-2", pool.assemblyResults.roundStartedEvaluating, round)
stats.StopReason = telemetryspec.AssembleBlockEmpty
pool.assemblyMu.Unlock()
return pool.assembleEmptyBlock(round)
}
defer pool.assemblyMu.Unlock()
if pool.assemblyResults.roundStartedEvaluating > round {
// we've already assembled a round in the future. Since we're clearly won't go backward, it means
// that the agreement is far behind us, so we're going to return here with error code to let
// the agreement know about it.
// since the network is already ahead of us, there is no issue here in not generating a block ( since the block would get discarded anyway )
logging.Base().Infof("AssembleBlock: requested round is behind transaction pool round %d < %d", round, pool.assemblyResults.roundStartedEvaluating)
return nil, ErrStaleBlockAssemblyRequest
}
pool.assemblyDeadline = deadline
pool.assemblyRound = round
for time.Now().Before(deadline) && (!pool.assemblyResults.ok || pool.assemblyResults.roundStartedEvaluating != round) {
condvar.TimedWait(&pool.assemblyCond, deadline.Sub(time.Now()))
}
if !pool.assemblyResults.ok {
// we've passed the deadline, so we're either going to have a partial block, or that we won't make it on time.
// start preparing an empty block in case we'll miss the extra time (assemblyWaitEps).
// the assembleEmptyBlock is using the database, so we want to unlock here and take the lock again later on.
pool.assemblyMu.Unlock()
emptyBlock, emptyBlockErr := pool.assembleEmptyBlock(round)
pool.assemblyMu.Lock()
if pool.assemblyResults.roundStartedEvaluating > round {
// this case is expected to happen only if the transaction pool was able to construct *two* rounds during the time we were trying to assemble the empty block.
// while this is extreamly unlikely, we need to handle this. the handling it quite straight-forward :
// since the network is already ahead of us, there is no issue here in not generating a block ( since the block would get discarded anyway )
logging.Base().Infof("AssembleBlock: requested round is behind transaction pool round after timing out %d < %d", round, pool.assemblyResults.roundStartedEvaluating)
return nil, ErrStaleBlockAssemblyRequest
}
deadline = deadline.Add(assemblyWaitEps)
for time.Now().Before(deadline) && (!pool.assemblyResults.ok || pool.assemblyResults.roundStartedEvaluating != round) {
condvar.TimedWait(&pool.assemblyCond, deadline.Sub(time.Now()))
}
// check to see if the extra time helped us to get a block.
if !pool.assemblyResults.ok {
// it didn't. Lucky us - we already prepared an empty block, so we can return this right now.
logging.Base().Warnf("AssembleBlock: ran out of time for round %d", round)
stats.StopReason = telemetryspec.AssembleBlockTimeout
if emptyBlockErr != nil {
emptyBlockErr = fmt.Errorf("AssembleBlock: failed to construct empty block : %v", emptyBlockErr)
}
return emptyBlock, emptyBlockErr
}
}
pool.assemblyDeadline = time.Time{}
if pool.assemblyResults.err != nil {
return nil, fmt.Errorf("AssemblyBlock: encountered error for round %d: %v", round, pool.assemblyResults.err)
}
if pool.assemblyResults.roundStartedEvaluating > round {
// this scenario should not happen unless the txpool is receiving the new blocks via OnNewBlocks
// with "jumps" between consecutive blocks ( which is why it's a warning )
// The "normal" usecase is evaluated on the top of the function.
logging.Base().Warnf("AssembleBlock: requested round is behind transaction pool round %d < %d", round, pool.assemblyResults.roundStartedEvaluating)
return nil, ErrStaleBlockAssemblyRequest
} else if pool.assemblyResults.roundStartedEvaluating != round {
return nil, fmt.Errorf("AssembleBlock: assembled block round does not match: %d != %d",
pool.assemblyResults.roundStartedEvaluating, round)
}
stats = pool.assemblyResults.stats
return pool.assemblyResults.blk, nil
}
// assembleEmptyBlock construct a new block for the given round. Internally it's using the ledger database calls, so callers
// need to be aware that it might take a while before it would return.
func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *ledger.ValidatedBlock, err error) {
prevRound := round - 1
prev, err := pool.ledger.BlockHdr(prevRound)
if err != nil {
err = fmt.Errorf("TransactionPool.assembleEmptyBlock: cannot get prev header for %d: %v", prevRound, err)
return nil, err
}
next := bookkeeping.MakeBlock(prev)
blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0)
if err != nil {
err = fmt.Errorf("TransactionPool.assembleEmptyBlock: cannot start evaluator for %d: %v", round, err)
return nil, err
}
return blockEval.GenerateBlock()
}
| 1 | 39,852 | Why is this a 'transaction' and not in the block header? | algorand-go-algorand | go |
@@ -2,12 +2,14 @@
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.IO;
+using System.Security.Cryptography.X509Certificates;
namespace Microsoft.AspNet.Server.Kestrel.Filter
{
public class ConnectionFilterContext
{
public ServerAddress Address { get; set; }
- public Stream Connection { get; set; }
+ public Stream Connection { get; set; }
+ public X509Certificate2 ClientCertificate { get; set; }
}
} | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.IO;
namespace Microsoft.AspNet.Server.Kestrel.Filter
{
public class ConnectionFilterContext
{
public ServerAddress Address { get; set; }
public Stream Connection { get; set; }
}
}
| 1 | 6,565 | This doesn't fit the abstraction level. Should we have a property bag for extra stuff? | aspnet-KestrelHttpServer | .cs |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.