patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -88,6 +88,12 @@ class TestHeaders:
headers = Headers(Host="foobarfoo.com", Accept="foo/bar")
replacements = headers.replace("foo", "bar", count=1)
assert replacements == 1
+ assert headers["Host"] == "barbarfoo.com"
+ assert headers["Accept"] == "foo/bar"
+ replacements = headers.replace("foo", "bar", count=2)
+ assert replacements == 2
+ assert headers["Host"] == "barbarbar.com"
+ assert headers["Accept"] == "bar/bar"
def test_parse_content_type(): | 1 | import collections
import pytest
from mitmproxy.net.http.headers import Headers, parse_content_type, assemble_content_type
class TestHeaders:
def _2host(self):
return Headers(
(
(b"Host", b"example.com"),
(b"host", b"example.org")
)
)
def test_init(self):
headers = Headers()
assert len(headers) == 0
headers = Headers([[b"Host", b"example.com"]])
assert len(headers) == 1
assert headers["Host"] == "example.com"
headers = Headers(Host="example.com")
assert len(headers) == 1
assert headers["Host"] == "example.com"
headers = Headers(
[[b"Host", b"invalid"]],
Host="example.com"
)
assert len(headers) == 1
assert headers["Host"] == "example.com"
headers = Headers(
[[b"Host", b"invalid"], [b"Accept", b"text/plain"]],
Host="example.com"
)
assert len(headers) == 2
assert headers["Host"] == "example.com"
assert headers["Accept"] == "text/plain"
with pytest.raises(TypeError):
Headers([[b"Host", u"not-bytes"]])
def test_set(self):
headers = Headers()
headers[u"foo"] = u"1"
headers[b"bar"] = b"2"
headers["baz"] = b"3"
with pytest.raises(TypeError):
headers["foobar"] = 42
assert len(headers) == 3
def test_bytes(self):
headers = Headers(Host="example.com")
assert bytes(headers) == b"Host: example.com\r\n"
headers = Headers([
[b"Host", b"example.com"],
[b"Accept", b"text/plain"]
])
assert bytes(headers) == b"Host: example.com\r\nAccept: text/plain\r\n"
headers = Headers()
assert bytes(headers) == b""
def test_replace_simple(self):
headers = Headers(Host="example.com", Accept="text/plain")
replacements = headers.replace("Host: ", "X-Host: ")
assert replacements == 1
assert headers["X-Host"] == "example.com"
assert "Host" not in headers
assert headers["Accept"] == "text/plain"
def test_replace_multi(self):
headers = self._2host()
headers.replace(r"Host: example.com", r"Host: example.de")
assert headers.get_all("Host") == ["example.de", "example.org"]
def test_replace_remove_spacer(self):
headers = Headers(Host="example.com")
replacements = headers.replace(r"Host: ", "X-Host ")
assert replacements == 0
assert headers["Host"] == "example.com"
def test_replace_with_count(self):
headers = Headers(Host="foobarfoo.com", Accept="foo/bar")
replacements = headers.replace("foo", "bar", count=1)
assert replacements == 1
def test_parse_content_type():
p = parse_content_type
assert p("text/html") == ("text", "html", {})
assert p("text") is None
v = p("text/html; charset=UTF-8")
assert v == ('text', 'html', {'charset': 'UTF-8'})
def test_assemble_content_type():
p = assemble_content_type
assert p("text", "html", {}) == "text/html"
assert p("text", "html", {"charset": "utf8"}) == "text/html; charset=utf8"
assert p("text", "html", collections.OrderedDict([("charset", "utf8"), ("foo", "bar")])) == "text/html; charset=utf8; foo=bar"
| 1 | 14,670 | This test does not really verify that we're doing the correct thing now, is it? It seems to pass with and without the patch. | mitmproxy-mitmproxy | py |
@@ -16,7 +16,13 @@ const BlocksTopic = "/fil/blocks"
const MessageTopic = "/fil/msgs"
// AddNewBlock processes a block on the local chain and publishes it to the network.
-func (node *Node) AddNewBlock(ctx context.Context, b *types.Block) error {
+func (node *Node) AddNewBlock(ctx context.Context, b *types.Block) (err error) {
+ ctx = log.Start(ctx, "Node.AddNewBlock")
+ log.SetTag(ctx, "block", b)
+ defer func() {
+ log.FinishWithErr(ctx, err)
+ }()
+
if _, err := node.ChainMgr.ProcessNewBlock(ctx, b); err != nil {
return err
} | 1 | package node
import (
"context"
"gx/ipfs/QmSFihvoND3eDaAYRCeLgLPt62yCPgMZs1NSZmKFEtJQQw/go-libp2p-floodsub"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
"github.com/filecoin-project/go-filecoin/types"
)
// BlocksTopic is the pubsub topic identifier on which new blocks are announced.
const BlocksTopic = "/fil/blocks"
// MessageTopic is the pubsub topic identifier on which new messages are announced.
const MessageTopic = "/fil/msgs"
// AddNewBlock processes a block on the local chain and publishes it to the network.
func (node *Node) AddNewBlock(ctx context.Context, b *types.Block) error {
if _, err := node.ChainMgr.ProcessNewBlock(ctx, b); err != nil {
return err
}
return node.PubSub.Publish(BlocksTopic, b.ToNode().RawData())
}
type floodSubProcessorFunc func(ctx context.Context, msg *floodsub.Message) error
func (node *Node) handleSubscription(ctx context.Context, f floodSubProcessorFunc, fname string, s *floodsub.Subscription, sname string) {
for {
pubSubMsg, err := s.Next(ctx)
if err != nil {
log.Errorf("%s.Next(): %s", sname, err)
return
}
if err := f(ctx, pubSubMsg); err != nil {
log.Errorf("%s(): %s", fname, err)
}
}
}
func (node *Node) processBlock(ctx context.Context, pubSubMsg *floodsub.Message) error {
// ignore messages from ourself
if pubSubMsg.GetFrom() == node.Host.ID() {
return nil
}
blk, err := types.DecodeBlock(pubSubMsg.GetData())
if err != nil {
return errors.Wrap(err, "got bad block data")
}
res, err := node.ChainMgr.ProcessNewBlock(ctx, blk)
if err != nil {
return errors.Wrap(err, "processing block from network")
}
log.Infof("message processed: %s", res)
return nil
}
func (node *Node) processMessage(ctx context.Context, pubSubMsg *floodsub.Message) error {
unmarshaled := &types.Message{}
if err := unmarshaled.Unmarshal(pubSubMsg.GetData()); err != nil {
return err
}
_, err := node.MsgPool.Add(unmarshaled)
return err
}
// AddNewMessage adds a new message to the pool and publishes it to the network.
func (node *Node) AddNewMessage(ctx context.Context, msg *types.Message) error {
if _, err := node.MsgPool.Add(msg); err != nil {
return err
}
msgdata, err := msg.Marshal()
if err != nil {
return err
}
return node.PubSub.Publish(MessageTopic, msgdata)
}
| 1 | 11,777 | should just be able to defer the call directly here too | filecoin-project-venus | go |
@@ -916,7 +916,7 @@ class LayoutPlot(GenericLayoutPlot, CompositePlot):
# Create title handle
if self.show_title and len(self.coords) > 1:
- title = self.handles['fig'].suptitle('', **self._fontsize('title'))
+ title = self.handles['fig'].suptitle('', y=1.05, **self._fontsize('title'))
self.handles['title'] = title
return layout_subplots, layout_axes, collapsed_layout | 1 | from __future__ import division
from collections import defaultdict
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # noqa (For 3D plots)
from matplotlib import pyplot as plt
from matplotlib import gridspec, animation
import param
from ...core import (OrderedDict, HoloMap, AdjointLayout, NdLayout,
GridSpace, Element, CompositeOverlay, Element3D,
Empty, Collator)
from ...core.options import Store, Compositor
from ...core.util import int_to_roman, int_to_alpha, basestring
from ...core import traversal
from ..plot import DimensionedPlot, GenericLayoutPlot, GenericCompositePlot
from ..util import get_dynamic_mode, initialize_sampled
from .renderer import MPLRenderer
class MPLPlot(DimensionedPlot):
"""
An MPLPlot object draws a matplotlib figure object when called or
indexed but can also return a matplotlib animation object as
appropriate. MPLPlots take element objects such as Image, Contours
or Points as inputs and plots them in the appropriate format using
matplotlib. As HoloMaps are supported, all plots support animation
via the anim() method.
"""
renderer = MPLRenderer
sideplots = {}
fig_alpha = param.Number(default=1.0, bounds=(0, 1), doc="""
Alpha of the overall figure background.""")
fig_bounds = param.NumericTuple(default=(0.15, 0.15, 0.85, 0.85),
doc="""
The bounds of the overall figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
fig_inches = param.Parameter(default=4, doc="""
The overall matplotlib figure size in inches. May be set as
an integer in which case it will be used to autocompute a
size. Alternatively may be set with an explicit tuple or list,
in which case it will be applied directly after being scaled
by fig_size. If either the width or height is set to None,
it will be computed automatically.""")
fig_latex = param.Boolean(default=False, doc="""
Whether to use LaTeX text in the overall figure.""")
fig_rcparams = param.Dict(default={}, doc="""
matplotlib rc parameters to apply to the overall figure.""")
fig_size = param.Integer(default=100, bounds=(1, None), doc="""
Size relative to the supplied overall fig_inches in percent.""")
initial_hooks = param.HookList(default=[], doc="""
Optional list of hooks called before plotting the data onto
the axis. The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
final_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
sublabel_format = param.String(default=None, allow_None=True, doc="""
Allows labeling the subaxes in each plot with various formatters
including {Alpha}, {alpha}, {numeric} and {roman}.""")
sublabel_position = param.NumericTuple(default=(-0.35, 0.85), doc="""
Position relative to the plot for placing the optional subfigure label.""")
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
projection = param.ObjectSelector(default=None,
objects=['3d', 'polar', None], doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
_close_figures = True
def __init__(self, fig=None, axis=None, **params):
self._create_fig = True
super(MPLPlot, self).__init__(**params)
# List of handles to matplotlib objects for animation update
scale = self.fig_size/100.
if isinstance(self.fig_inches, (tuple, list)):
self.fig_inches = [None if i is None else i*scale
for i in self.fig_inches]
else:
self.fig_inches *= scale
fig, axis = self._init_axis(fig, axis)
self.handles['fig'] = fig
self.handles['axis'] = axis
if self.final_hooks and self.finalize_hooks:
self.warning('Set either final_hooks or deprecated '
'finalize_hooks, not both.')
self.finalize_hooks = self.final_hooks
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
with mpl.rc_context(rc=rc_params):
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
def _subplot_label(self, axis):
layout_num = self.layout_num if self.subplot else 1
if self.sublabel_format and not self.adjoined and layout_num > 0:
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
labels = {}
if '{Alpha}' in self.sublabel_format:
labels['Alpha'] = int_to_alpha(layout_num-1)
elif '{alpha}' in self.sublabel_format:
labels['alpha'] = int_to_alpha(layout_num-1, upper=False)
elif '{numeric}' in self.sublabel_format:
labels['numeric'] = self.layout_num
elif '{Roman}' in self.sublabel_format:
labels['Roman'] = int_to_roman(layout_num)
elif '{roman}' in self.sublabel_format:
labels['roman'] = int_to_roman(layout_num).lower()
at = AnchoredText(self.sublabel_format.format(**labels), loc=3,
bbox_to_anchor=self.sublabel_position, frameon=False,
prop=dict(size=self.sublabel_size, weight='bold'),
bbox_transform=axis.transAxes)
at.patch.set_visible(False)
axis.add_artist(at)
def _finalize_axis(self, key):
"""
General method to finalize the axis and plot.
"""
if 'title' in self.handles:
self.handles['title'].set_visible(self.show_title)
self.drawn = True
if self.subplot:
return self.handles['axis']
else:
fig = self.handles['fig']
if self._close_figures: plt.close(fig)
return fig
@property
def state(self):
return self.handles['fig']
def anim(self, start=0, stop=None, fps=30):
"""
Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps.
"""
figure = self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
# Close the figure handle
if self._close_figures: plt.close(figure)
return anim
def update(self, key):
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
mpl.rcParams.update(rc_params)
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
class CompositePlot(GenericCompositePlot, MPLPlot):
"""
CompositePlot provides a baseclass for plots coordinate multiple
subplots to form a Layout.
"""
def update_frame(self, key, ranges=None):
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.update_frame(key, ranges=ranges)
axis = self.handles['axis']
self.update_handles(axis, self.layout, key, ranges)
class GridPlot(CompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
aspect = param.Parameter(default='equal', doc="""
Aspect ratios on GridPlot should be automatically determined.""")
padding = param.Number(default=0.1, doc="""
The amount of padding as a fraction of the total Grid size""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
show_frame = param.Boolean(default=False, doc="""
Whether to draw a frame around the Grid.""")
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
tick_format = param.String(default="%.2f", doc="""
Formatting string for the GridPlot ticklabels.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', None], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
def __init__(self, layout, axis=None, create_axes=True, ranges=None,
keys=None, dimensions=None, layout_num=1, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
self.layout = layout
self.cols, self.rows = layout.shape
self.layout_num = layout_num
extra_opts = self.lookup_options(layout, 'plot').options
if not keys or not dimensions:
dimensions, keys = traversal.unique_dimkeys(layout)
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
dynamic, sampled = get_dynamic_mode(layout)
if sampled:
initialize_sampled(layout, dimensions, keys[0])
super(GridPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(extra_opts, **params))
# Compute ranges layoutwise
grid_kwargs = {}
if axis is not None:
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
grid_kwargs = {'left': l, 'right': l+w, 'bottom': b, 'top': b+h}
self.position = (l, b, w, h)
self.fig_inches = self._get_size()
self._layoutspec = gridspec.GridSpec(self.rows, self.cols, **grid_kwargs)
self.subplots, self.subaxes, self.layout = self._create_subplots(layout, axis, ranges, create_axes)
def _get_size(self):
max_dim = max(self.layout.shape)
# Reduce plot size as GridSpace gets larger
shape_factor = 1. / max_dim
# Expand small grids to a sensible viewing size
expand_factor = 1 + (max_dim - 1) * 0.1
scale_factor = expand_factor * shape_factor
cols, rows = self.layout.shape
if isinstance(self.fig_inches, (tuple, list)):
fig_inches = list(self.fig_inches)
if fig_inches[0] is None:
fig_inches[0] = fig_inches[1] * (cols/rows)
if fig_inches[1] is None:
fig_inches[1] = fig_inches[0] * (rows/cols)
return fig_inches
else:
fig_inches = (self.fig_inches,)*2
return (scale_factor * cols * fig_inches[0],
scale_factor * rows * fig_inches[1])
def _create_subplots(self, layout, axis, ranges, create_axes):
layout = layout.map(Compositor.collapse_element, [CompositeOverlay],
clone=False)
norm_opts = self._deep_options(layout, 'norm', ['axiswise'], [Element])
axiswise = all(v.get('axiswise', False) for v in norm_opts.values())
if not ranges:
self.handles['fig'].set_size_inches(self.fig_inches)
subplots, subaxes = OrderedDict(), OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
r, c = (0, 0)
for coord in layout.keys(full_grid=True):
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
else:
continue
# Create axes
kwargs = {}
if create_axes:
threed = issubclass(vtype, Element3D)
subax = plt.subplot(self._layoutspec[r, c],
projection='3d' if threed else None)
if not axiswise and self.shared_xaxis and self.xaxis is not None:
self.xaxis = 'top'
if not axiswise and self.shared_yaxis and self.yaxis is not None:
self.yaxis = 'right'
# Disable subplot axes depending on shared axis options
# and the position in the grid
if (self.shared_xaxis or self.shared_yaxis) and not axiswise:
if c == 0 and r != 0:
subax.xaxis.set_ticks_position('none')
kwargs['xaxis'] = 'bottom-bare'
if c != 0 and r == 0 and not layout.ndims == 1:
subax.yaxis.set_ticks_position('none')
kwargs['yaxis'] = 'left-bare'
if r != 0 and c != 0:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
if not self.shared_xaxis:
kwargs['xaxis'] = 'bottom-bare'
if not self.shared_yaxis:
kwargs['yaxis'] = 'left-bare'
else:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
subaxes[(r, c)] = subax
else:
subax = None
if issubclass(vtype, CompositeOverlay) and (c == self.cols - 1 and
r == self.rows//2):
kwargs['show_legend'] = self.show_legend
kwargs['legend_position'] = 'right'
# Create subplot
if view is not None:
params = dict(fig=self.handles['fig'], axis=subax,
dimensions=self.dimensions, show_title=False,
subplot=not create_axes, ranges=frame_ranges,
uniform=self.uniform, keys=self.keys,
show_legend=False)
plotting_class = Store.registry['matplotlib'][vtype]
subplot = plotting_class(view, **dict(opts, **dict(params, **kwargs)))
collapsed_layout[coord] = subplot.layout if isinstance(subplot, CompositePlot) else subplot.hmap
subplots[(r, c)] = subplot
else:
subax.set_visible(False)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if create_axes:
self.handles['axis'] = self._layout_axis(layout, axis)
self._adjust_subplots(self.handles['axis'], subaxes)
return subplots, subaxes, collapsed_layout
def initialize_plot(self, ranges=None):
# Get the extent of the layout elements (not the whole layout)
key = self.keys[-1]
axis = self.handles['axis']
subplot_kwargs = dict()
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges, **subplot_kwargs)
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
self._readjust_axes(axis)
self.drawn = True
if self.subplot: return self.handles['axis']
if self._close_figures: plt.close(self.handles['fig'])
return self.handles['fig']
def _readjust_axes(self, axis):
if self.subplot:
axis.set_position(self.position)
if self.aspect == 'equal':
axis.set_aspect(float(self.rows)/self.cols)
self.handles['fig'].canvas.draw()
self._adjust_subplots(self.handles['axis'], self.subaxes)
def update_handles(self, axis, view, key, ranges=None):
"""
Should be called by the update_frame class to update
any handles on the plot.
"""
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
def _layout_axis(self, layout, axis):
fig = self.handles['fig']
axkwargs = {'gid': str(self.position)} if axis else {}
layout_axis = fig.add_subplot(1,1,1, **axkwargs)
if axis:
axis.set_visible(False)
layout_axis.set_position(self.position)
layout_axis.patch.set_visible(False)
tick_fontsize = self._fontsize('ticks','labelsize',common=False)
if tick_fontsize: layout_axis.tick_params(**tick_fontsize)
# Set labels
layout_axis.set_xlabel(str(layout.kdims[0]),
**self._fontsize('xlabel'))
if layout.ndims == 2:
layout_axis.set_ylabel(str(layout.kdims[1]),
**self._fontsize('ylabel'))
# Compute and set x- and y-ticks
dims = layout.kdims
keys = layout.keys()
if layout.ndims == 1:
dim1_keys = keys
dim2_keys = [0]
layout_axis.get_yaxis().set_visible(False)
else:
dim1_keys, dim2_keys = zip(*keys)
layout_axis.set_ylabel(str(dims[1]))
layout_axis.set_aspect(float(self.rows)/self.cols)
# Process ticks
plot_width = (1.0 - self.padding) / self.cols
border_width = self.padding / (self.cols-1)
xticks = [(plot_width/2)+(r*(plot_width+border_width)) for r in range(self.cols)]
plot_height = (1.0 - self.padding) / self.rows
border_height = self.padding / (self.rows-1) if layout.ndims > 1 else 0
yticks = [(plot_height/2)+(r*(plot_height+border_height)) for r in range(self.rows)]
layout_axis.set_xticks(xticks)
layout_axis.set_xticklabels(self._process_ticklabels(sorted(set(dim1_keys)), dims[0]))
for tick in layout_axis.get_xticklabels():
tick.set_rotation(self.xrotation)
ydim = dims[1] if layout.ndims > 1 else None
layout_axis.set_yticks(yticks)
layout_axis.set_yticklabels(self._process_ticklabels(sorted(set(dim2_keys)), ydim))
for tick in layout_axis.get_yticklabels():
tick.set_rotation(self.yrotation)
if not self.show_frame:
layout_axis.spines['right' if self.yaxis == 'left' else 'left'].set_visible(False)
layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False)
axis = layout_axis
if self.xaxis is not None:
axis.xaxis.set_ticks_position(self.xaxis)
axis.xaxis.set_label_position(self.xaxis)
else:
axis.xaxis.set_visible(False)
if self.yaxis is not None:
axis.yaxis.set_ticks_position(self.yaxis)
axis.yaxis.set_label_position(self.yaxis)
else:
axis.yaxis.set_visible(False)
for pos in ['left', 'right', 'top', 'bottom']:
axis.spines[pos].set_visible(False)
return layout_axis
def _process_ticklabels(self, labels, dim):
formatted_labels = []
for k in labels:
if dim and dim.value_format:
k = dim.value_format(k)
elif not isinstance(k, (str, type(None))):
k = self.tick_format % k
elif k is None:
k = ''
formatted_labels.append(k)
return formatted_labels
def _adjust_subplots(self, axis, subaxes):
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
if self.padding:
width_padding = w/(1./self.padding)
height_padding = h/(1./self.padding)
else:
width_padding, height_padding = 0, 0
if self.cols == 1:
b_w = 0
else:
b_w = width_padding / (self.cols - 1)
if self.rows == 1:
b_h = 0
else:
b_h = height_padding / (self.rows - 1)
ax_w = (w - (width_padding if self.cols > 1 else 0)) / self.cols
ax_h = (h - (height_padding if self.rows > 1 else 0)) / self.rows
r, c = (0, 0)
for ax in subaxes.values():
xpos = l + (c*ax_w) + (c * b_w)
ypos = b + (r*ax_h) + (r * b_h)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if not ax is None:
ax.set_position([xpos, ypos, ax_w, ax_h])
class AdjointLayoutPlot(CompositePlot):
"""
LayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
Initially, a LayoutPlot computes an appropriate layout based for
the number of Views in the AdjointLayout object it has been given, but
when embedded in a NdLayout, it can recompute the layout to
match the number of rows and columns as part of a larger grid.
"""
layout_dict = {'Single': ['main'],
'Dual': ['main', 'right'],
'Triple': ['top', None, 'main', 'right'],
'Embedded Dual': [None, 'main']}
def __init__(self, layout, layout_type, subaxes, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]
# The supplied (axes, view) objects as indexed by position
self.subaxes = {pos: ax for ax, pos in zip(subaxes, self.view_positions)}
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
def initialize_plot(self, ranges=None):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = self.layout.get(pos, None)
subplot = self.subplots.get(pos, None)
ax = self.subaxes.get(pos, None)
# If no view object or empty position, disable the axis
if None in [view, pos, subplot]:
ax.set_axis_off()
continue
subplot.initialize_plot(ranges=ranges)
self.adjust_positions()
self.drawn = True
def adjust_positions(self):
"""
Make adjustments to the positions of subplots (if available)
relative to the main plot axes as required.
This method is called by LayoutPlot after an initial pass
used to position all the Layouts together. This method allows
LayoutPlots to make final adjustments to the axis positions.
"""
checks = [self.view_positions, self.subaxes, self.subplots]
right = all('right' in check for check in checks)
top = all('top' in check for check in checks)
if not 'main' in self.subplots or not (top or right):
return
self.handles['fig'].canvas.draw()
main_ax = self.subplots['main'].handles['axis']
bbox = main_ax.get_position()
if right:
ax = self.subaxes['right']
subplot = self.subplots['right']
ax.set_position([bbox.x1 + bbox.width * subplot.border_size,
bbox.y0,
bbox.width * subplot.subplot_size, bbox.height])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
if top:
ax = self.subaxes['top']
subplot = self.subplots['top']
ax.set_position([bbox.x0,
bbox.y1 + bbox.height * subplot.border_size,
bbox.width, bbox.height * subplot.subplot_size])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
def update_frame(self, key, ranges=None):
for pos in self.view_positions:
subplot = self.subplots.get(pos)
if subplot is not None:
subplot.update_frame(key, ranges)
def __len__(self):
return max([1 if self.keys is None else len(self.keys), 1])
class LayoutPlot(GenericLayoutPlot, CompositePlot):
"""
A LayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
aspect_weight = param.Number(default=0, doc="""
Weighting of the individual aspects when computing the Layout
grid aspects and overall figure size.""")
fig_bounds = param.NumericTuple(default=(0.05, 0.05, 0.95, 0.95), doc="""
The bounds of the figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
tight = param.Boolean(default=False, doc="""
Tightly fit the axes in the layout within the fig_bounds
and tight_padding.""")
tight_padding = param.Parameter(default=3, doc="""
Integer or tuple specifying the padding in inches in a tight layout.""")
hspace = param.Number(default=0.5, doc="""
Specifies the space between horizontally adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
vspace = param.Number(default=0.1, doc="""
Specifies the space between vertically adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
fontsize = param.Parameter(default={'title':16}, allow_None=True)
def __init__(self, layout, **params):
super(LayoutPlot, self).__init__(layout=layout, **params)
self.subplots, self.subaxes, self.layout = self._compute_gridspec(layout)
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
row_heightratios, col_widthratios = {}, {}
col_aspects, row_aspects = defaultdict(lambda: [0, 0]), defaultdict(lambda: [0, 0])
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((r, c), (None, None))
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = 1
if layout_type in ['Dual', 'Triple']:
el = layout_view.get('right', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = plot_type.border_size + plot_type.subplot_size
width_ratios = [4, 4*ratio]
else:
width_ratios = [4, 1]
col_aspect = [main_aspect, 1/(4/width_ratios[1])]
else:
width_ratios = [4]
col_aspect = [main_aspect, 0]
if layout_type in ['Embedded Dual', 'Triple']:
el = layout_view.get('top', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = plot_type.border_size + plot_type.subplot_size
height_ratios = [4*ratio, 4]
else:
height_ratios = [1, 4]
row_aspect = [1/(4/height_ratios[0]), 1./main_aspect]
hidx = 1
else:
height_ratios = [4]
row_aspect = [0, 1./main_aspect]
hidx = 0
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios[0] = (width_ratios[0] * main_aspect)
height_ratios[-1] = (height_ratios[-1] * 1./main_aspect)
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
if layout_shape[1] > row_heightratios.get(r, (0, None))[0]:
row_heightratios[r] = [layout_shape[1], height_ratios]
if height_ratios[hidx] > row_heightratios[r][1][hidx]:
row_heightratios[r][1][-1] = height_ratios[hidx]
if layout_shape[0] > col_widthratios.get(c, (0, None))[0]:
col_widthratios[c] = (layout_shape[0], width_ratios)
if width_ratios[0] > col_widthratios[c][1][0]:
col_widthratios[c][1][0] = width_ratios[0]
for i in range(2):
if col_aspect[i] > col_aspects.get(c, [0,0])[i]:
col_aspects[c][i] = col_aspect[i]
if row_aspect[i] > row_aspects.get(r, [0,0])[i]:
row_aspects[r][i] = row_aspect[i]
# In order of row/column collect the largest width and height ratios
height_ratios = [v[1] for k, v in sorted(row_heightratios.items())]
width_ratios = [v[1] for k, v in sorted(col_widthratios.items())]
col_aspect_ratios = [v for k, v in sorted(col_aspects.items())]
row_aspect_ratios = [v for k, v in sorted(row_aspects.items())]
# Compute the number of rows and cols
cols = np.sum([len(wr) for wr in width_ratios])
rows = np.sum([len(hr) for hr in height_ratios])
# Flatten the width and height ratio lists
wr_list = [wr for wrs in width_ratios for wr in wrs]
hr_list = [hr for hrs in height_ratios for hr in hrs]
# Compute and set the plot size if not explicitly supplied
col_ars = [ar for ars in col_aspect_ratios for ar in ars]
row_ars = [ar for ars in row_aspect_ratios for ar in ars]
width = len(col_ars[::2]) + sum(col_ars[1::2])
yscale = sum(col_ars)/sum(row_ars)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = len(width_ratios[c])
hsplits = len(height_ratios[r])
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if empty:
obj = AdjointLayout([])
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
# Create title handle
if self.show_title and len(self.coords) > 1:
title = self.handles['fig'].suptitle('', **self._fontsize('title'))
self.handles['title'] = title
return layout_subplots, layout_axes, collapsed_layout
def grid_situate(self, current_idx, layout_type, subgrid_width):
"""
Situate the current AdjointLayoutPlot in a LayoutPlot. The
LayoutPlot specifies a layout_type into which the AdjointLayoutPlot
must be embedded. This enclosing layout is guaranteed to have
enough cells to display all the views.
Based on this enforced layout format, a starting index
supplied by LayoutPlot (indexing into a large gridspec
arrangement) is updated to the appropriate embedded value. It
will also return a list of gridspec indices associated with
the all the required layout axes.
"""
# Set the layout configuration as situated in a NdLayout
if layout_type == 'Single':
start, inds = current_idx+1, [current_idx]
elif layout_type == 'Dual':
start, inds = current_idx+2, [current_idx, current_idx+1]
bottom_idx = current_idx + subgrid_width
if layout_type == 'Embedded Dual':
bottom = ((current_idx+1) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx)+1
start, inds = grid_idx, [current_idx, bottom_idx]
elif layout_type == 'Triple':
bottom = ((current_idx+2) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx) + 2
start, inds = grid_idx, [current_idx, current_idx+1,
bottom_idx, bottom_idx+1]
return start, inds
def _create_subplots(self, layout, positions, layout_dimensions, ranges, axes={}, num=1, create=True):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
projections = []
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
subplot_opts = dict(show_title=False, adjoined=layout)
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = layout.get(pos, None)
ax = axes.get(pos, None)
if view is None:
projections.append(None)
continue
# Determine projection type for plot
components = view.traverse(lambda x: x)
projs = ['3d' if isinstance(c, Element3D) else
self.lookup_options(c, 'plot').options.get('projection', None)
for c in components]
projs = [p for p in projs if p is not None]
if len(set(projs)) > 1:
raise Exception("A single axis may only be assigned one projection type")
elif projs:
projections.append(projs[0])
else:
projections.append(None)
if not create:
continue
# Customize plotopts depending on position.
plotopts = self.lookup_options(view, 'plot').options
# Options common for any subplot
override_opts = {}
sublabel_opts = {}
if pos == 'main':
own_params = self.get_param_values(onlychanged=True)
sublabel_opts = {k: v for k, v in own_params
if 'sublabel_' in k}
if not isinstance(view, GridSpace):
override_opts = dict(aspect='square')
elif pos == 'right':
right_opts = dict(invert_axes=True,
xaxis=None)
override_opts = dict(subplot_opts, **right_opts)
elif pos == 'top':
top_opts = dict(yaxis=None)
override_opts = dict(subplot_opts, **top_opts)
# Override the plotopts as required
plotopts = dict(sublabel_opts, **plotopts)
plotopts.update(override_opts, fig=self.handles['fig'])
vtype = view.type if isinstance(view, HoloMap) else view.__class__
if isinstance(view, GridSpace):
plotopts['create_axes'] = ax is not None
if pos == 'main':
plot_type = Store.registry['matplotlib'][vtype]
else:
plot_type = MPLPlot.sideplots[vtype]
num = num if len(self.coords) > 1 else 0
subplots[pos] = plot_type(view, axis=ax, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
**plotopts)
if isinstance(view, (Element, HoloMap, Collator, CompositeOverlay)):
adjoint_clone[pos] = subplots[pos].hmap
else:
adjoint_clone[pos] = subplots[pos].layout
return subplots, adjoint_clone, projections
def update_handles(self, axis, view, key, ranges=None):
"""
Should be called by the update_frame class to update
any handles on the plot.
"""
if self.show_title and 'title' in self.handles and len(self.coords) > 1:
self.handles['title'].set_text(self._format_title(key))
def initialize_plot(self):
axis = self.handles['axis']
self.update_handles(axis, None, self.keys[-1])
ranges = self.compute_ranges(self.layout, self.keys[-1], None)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges)
return self._finalize_axis(None)
class AdjoinedPlot(DimensionedPlot):
aspect = param.Parameter(default='auto', doc="""
Aspect ratios on SideHistogramPlot should be determined by the
AdjointLayoutPlot.""")
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0.25, doc="""
The size of the border expressed as a fraction of the main plot.""")
show_frame = param.Boolean(default=False)
show_title = param.Boolean(default=False, doc="""
Titles should be disabled on all SidePlots to avoid clutter.""")
subplot_size = param.Number(default=0.25, doc="""
The size subplots as expressed as a fraction of the main plot.""")
show_xlabel = param.Boolean(default=False, doc="""
Whether to show the x-label of the plot. Disabled by default
because plots are often too cramped to fit the title correctly.""")
| 1 | 14,259 | Is this a magic number? I assume this makes it look better. | holoviz-holoviews | py |
@@ -88,8 +88,8 @@ func (ob *Outbox) Send(ctx context.Context, from, to address.Address, value type
msgSendErrCt.Inc(ctx, 1)
}
ob.journal.Write("Send",
- "to", to.String(), "from", from.String(), "value", value.AsBigInt().Uint64(), "method", method,
- "gasPrice", gasPrice.AsBigInt().Uint64(), "gasLimit", uint64(gasLimit), "bcast", bcast,
+ "to", to.String(), "from", from.String(), "value", value.Int.Uint64(), "method", method,
+ "gasPrice", gasPrice.Int.Uint64(), "gasLimit", uint64(gasLimit), "bcast", bcast,
"params", params, "error", err, "cid", out.String())
}()
| 1 | package message
import (
"context"
"sync"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-filecoin/internal/pkg/journal"
"github.com/filecoin-project/go-filecoin/internal/pkg/metrics"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/abi"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor"
)
// Outbox validates and marshals messages for sending and maintains the outbound message queue.
// The code arrangement here is not quite right. We probably want to factor out the bits that
// build and sign a message from those that add to the local queue/pool and broadcast it.
// See discussion in
// https://github.com/filecoin-project/go-filecoin/pull/3178#discussion_r311593312
// and https://github.com/filecoin-project/go-filecoin/issues/3052#issuecomment-513643661
type Outbox struct {
// Signs messages
signer types.Signer
// Validates messages before sending them.
validator messageValidator
// Holds messages sent from this node but not yet mined.
queue *Queue
// Publishes a signed message to the network.
publisher publisher
// Maintains message queue in response to new tipsets.
policy QueuePolicy
chains chainProvider
actors actorProvider
// Protects the "next nonce" calculation to avoid collisions.
nonceLock sync.Mutex
journal journal.Writer
}
type messageValidator interface {
// Validate checks a message for validity.
Validate(ctx context.Context, msg *types.UnsignedMessage, fromActor *actor.Actor) error
}
type actorProvider interface {
// GetActorAt returns the actor state defined by the chain up to some tipset
GetActorAt(ctx context.Context, tipset block.TipSetKey, addr address.Address) (*actor.Actor, error)
}
type publisher interface {
Publish(ctx context.Context, message *types.SignedMessage, height uint64, bcast bool) error
}
var msgSendErrCt = metrics.NewInt64Counter("message_sender_error", "Number of errors encountered while sending a message")
// NewOutbox creates a new outbox
func NewOutbox(signer types.Signer, validator messageValidator, queue *Queue,
publisher publisher, policy QueuePolicy, chains chainProvider, actors actorProvider, jw journal.Writer) *Outbox {
return &Outbox{
signer: signer,
validator: validator,
queue: queue,
publisher: publisher,
policy: policy,
chains: chains,
actors: actors,
journal: jw,
}
}
// Queue returns the outbox's outbound message queue.
func (ob *Outbox) Queue() *Queue {
return ob.queue
}
// Send marshals and sends a message, retaining it in the outbound message queue.
// If bcast is true, the publisher broadcasts the message to the network at the current block height.
func (ob *Outbox) Send(ctx context.Context, from, to address.Address, value types.AttoFIL,
gasPrice types.AttoFIL, gasLimit types.GasUnits, bcast bool, method types.MethodID, params ...interface{}) (out cid.Cid, pubErrCh chan error, err error) {
defer func() {
if err != nil {
msgSendErrCt.Inc(ctx, 1)
}
ob.journal.Write("Send",
"to", to.String(), "from", from.String(), "value", value.AsBigInt().Uint64(), "method", method,
"gasPrice", gasPrice.AsBigInt().Uint64(), "gasLimit", uint64(gasLimit), "bcast", bcast,
"params", params, "error", err, "cid", out.String())
}()
encodedParams, err := abi.ToEncodedValues(params...)
if err != nil {
return cid.Undef, nil, errors.Wrap(err, "invalid params")
}
// Lock to avoid a race inspecting the actor state and message queue to calculate next nonce.
ob.nonceLock.Lock()
defer ob.nonceLock.Unlock()
head := ob.chains.GetHead()
fromActor, err := ob.actors.GetActorAt(ctx, head, from)
if err != nil {
return cid.Undef, nil, errors.Wrapf(err, "no actor at address %s", from)
}
nonce, err := nextNonce(fromActor, ob.queue, from)
if err != nil {
return cid.Undef, nil, errors.Wrapf(err, "failed calculating nonce for actor at %s", from)
}
rawMsg := types.NewMeteredMessage(from, to, nonce, value, method, encodedParams, gasPrice, gasLimit)
signed, err := types.NewSignedMessage(*rawMsg, ob.signer)
if err != nil {
return cid.Undef, nil, errors.Wrap(err, "failed to sign message")
}
// Slightly awkward: it would be better validate before signing but the MeteredMessage construction
// is hidden inside NewSignedMessage.
err = ob.validator.Validate(ctx, &signed.Message, fromActor)
if err != nil {
return cid.Undef, nil, errors.Wrap(err, "invalid message")
}
return sendSignedMsg(ctx, ob, signed, bcast)
}
// SignedSend send a signed message, retaining it in the outbound message queue.
// If bcast is true, the publisher broadcasts the message to the network at the current block height.
func (ob *Outbox) SignedSend(ctx context.Context, signed *types.SignedMessage, bcast bool) (out cid.Cid, pubErrCh chan error, err error) {
defer func() {
if err != nil {
msgSendErrCt.Inc(ctx, 1)
}
}()
return sendSignedMsg(ctx, ob, signed, bcast)
}
// sendSignedMsg add signed message in pool and return cid
func sendSignedMsg(ctx context.Context, ob *Outbox, signed *types.SignedMessage, bcast bool) (cid.Cid, chan error, error) {
head := ob.chains.GetHead()
height, err := tipsetHeight(ob.chains, head)
if err != nil {
return cid.Undef, nil, errors.Wrap(err, "failed to get block height")
}
// Add to the local message queue/pool at the last possible moment before broadcasting to network.
if err := ob.queue.Enqueue(ctx, signed, height); err != nil {
return cid.Undef, nil, errors.Wrap(err, "failed to add message to outbound queue")
}
c, err := signed.Cid()
if err != nil {
return cid.Undef, nil, err
}
pubErrCh := make(chan error)
go func() {
err = ob.publisher.Publish(ctx, signed, height, bcast)
if err != nil {
log.Errorf("error: %s publishing message %s", err, c.String())
}
pubErrCh <- err
close(pubErrCh)
}()
return c, pubErrCh, nil
}
// HandleNewHead maintains the message queue in response to a new head tipset.
func (ob *Outbox) HandleNewHead(ctx context.Context, oldTips, newTips []block.TipSet) error {
return ob.policy.HandleNewHead(ctx, ob.queue, oldTips, newTips)
}
// nextNonce returns the next expected nonce value for an account actor. This is the larger
// of the actor's nonce value, or one greater than the largest nonce from the actor found in the message queue.
func nextNonce(act *actor.Actor, queue *Queue, address address.Address) (uint64, error) {
actorNonce, err := actor.NextNonce(act)
if err != nil {
return 0, err
}
poolNonce, found := queue.LargestNonce(address)
if found && poolNonce >= actorNonce {
return poolNonce + 1, nil
}
return actorNonce, nil
}
func tipsetHeight(provider chainProvider, key block.TipSetKey) (uint64, error) {
head, err := provider.GetTipSet(key)
if err != nil {
return 0, err
}
return head.Height()
}
| 1 | 22,887 | Would welcome a stringification method on big.Int in specs-actors | filecoin-project-venus | go |
@@ -49,6 +49,8 @@ type (
WorkingSet interface {
// states and actions
RunActions(context.Context, uint64, []action.SealedEnvelope) (hash.Hash32B, map[hash.Hash32B]*action.Receipt, error)
+ RunAction(context.Context, action.SealedEnvelope) (*action.Receipt, error)
+ PersistBlockLevelInfo(blockHeight uint64) hash.Hash32B
Snapshot() int
Revert(int) error
Commit() error | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package factory
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/db/trie"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/state"
)
var (
stateDBMtc = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_state_db",
Help: "IoTeX State DB",
},
[]string{"type"},
)
dbBatchSizelMtc = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "iotex_db_batch_size",
Help: "DB batch size",
},
[]string{},
)
)
func init() {
prometheus.MustRegister(stateDBMtc)
prometheus.MustRegister(dbBatchSizelMtc)
}
type (
// WorkingSet defines an interface for working set of states changes
WorkingSet interface {
// states and actions
RunActions(context.Context, uint64, []action.SealedEnvelope) (hash.Hash32B, map[hash.Hash32B]*action.Receipt, error)
Snapshot() int
Revert(int) error
Commit() error
RootHash() hash.Hash32B
Digest() hash.Hash32B
Version() uint64
Height() uint64
// General state
State(hash.PKHash, interface{}) error
PutState(hash.PKHash, interface{}) error
DelState(pkHash hash.PKHash) error
GetDB() db.KVStore
GetCachedBatch() db.CachedBatch
}
// workingSet implements WorkingSet interface, tracks pending changes to account/contract in local cache
workingSet struct {
ver uint64
blkHeight uint64
accountTrie trie.Trie // global account state trie
trieRoots map[int]hash.Hash32B // root of trie at time of snapshot
cb db.CachedBatch // cached batch for pending writes
dao db.KVStore // the underlying DB for account/contract storage
actionHandlers []protocol.ActionHandler
}
)
// NewWorkingSet creates a new working set
func NewWorkingSet(
version uint64,
kv db.KVStore,
root hash.Hash32B,
actionHandlers []protocol.ActionHandler,
) (WorkingSet, error) {
ws := &workingSet{
ver: version,
trieRoots: make(map[int]hash.Hash32B),
cb: db.NewCachedBatch(),
dao: kv,
actionHandlers: actionHandlers,
}
dbForTrie, err := db.NewKVStoreForTrie(AccountKVNameSpace, ws.dao, db.CachedBatchOption(ws.cb))
if err != nil {
return nil, errors.Wrap(err, "failed to generate state tire db")
}
tr, err := trie.NewTrie(trie.KVStoreOption(dbForTrie), trie.RootHashOption(root[:]))
if err != nil {
return nil, errors.Wrap(err, "failed to generate state trie from config")
}
ws.accountTrie = tr
if err := ws.accountTrie.Start(context.Background()); err != nil {
return nil, errors.Wrapf(err, "failed to load state trie from root = %x", root)
}
return ws, nil
}
// RootHash returns the hash of the root node of the accountTrie
func (ws *workingSet) RootHash() hash.Hash32B {
return byteutil.BytesTo32B(ws.accountTrie.RootHash())
}
// Digest returns the delta state digest
func (ws *workingSet) Digest() hash.Hash32B { return hash.ZeroHash32B }
// Version returns the Version of this working set
func (ws *workingSet) Version() uint64 {
return ws.ver
}
// Height returns the Height of the block being worked on
func (ws *workingSet) Height() uint64 {
return ws.blkHeight
}
// RunActions runs actions in the block and track pending changes in working set
func (ws *workingSet) RunActions(
ctx context.Context,
blockHeight uint64,
elps []action.SealedEnvelope,
) (hash.Hash32B, map[hash.Hash32B]*action.Receipt, error) {
ws.blkHeight = blockHeight
// Handle actions
receipts := make(map[hash.Hash32B]*action.Receipt)
for _, elp := range elps {
for _, actionHandler := range ws.actionHandlers {
receipt, err := actionHandler.Handle(ctx, elp.Action(), ws)
if err != nil {
return hash.ZeroHash32B, nil, errors.Wrapf(
err,
"error when action %x (nonce: %d) from %s mutates states",
elp.Hash(),
elp.Nonce(),
elp.SrcAddr(),
)
}
if receipt != nil {
receipts[elp.Hash()] = receipt
}
}
}
// Persist accountTrie's root hash
rootHash := ws.accountTrie.RootHash()
ws.cb.Put(AccountKVNameSpace, []byte(AccountTrieRootKey), rootHash[:], "failed to store accountTrie's root hash")
// Persist current chain Height
h := byteutil.Uint64ToBytes(blockHeight)
ws.cb.Put(AccountKVNameSpace, []byte(CurrentHeightKey), h, "failed to store accountTrie's current Height")
// Persis the historical accountTrie's root hash
ws.cb.Put(
AccountKVNameSpace,
[]byte(fmt.Sprintf("%s-%d", AccountTrieRootKey, blockHeight)),
rootHash[:],
"failed to store accountTrie's root hash",
)
return ws.RootHash(), receipts, nil
}
func (ws *workingSet) Snapshot() int {
s := ws.cb.Snapshot()
ws.trieRoots[s] = byteutil.BytesTo32B(ws.accountTrie.RootHash())
return s
}
func (ws *workingSet) Revert(snapshot int) error {
if err := ws.cb.Revert(snapshot); err != nil {
return err
}
root, ok := ws.trieRoots[snapshot]
if !ok {
// this should not happen, b/c we save the trie root on a successful return of Snapshot(), but check anyway
return errors.Wrapf(trie.ErrInvalidTrie, "failed to get trie root for snapshot = %d", snapshot)
}
return ws.accountTrie.SetRootHash(root[:])
}
// Commit persists all changes in RunActions() into the DB
func (ws *workingSet) Commit() error {
// Commit all changes in a batch
dbBatchSizelMtc.WithLabelValues().Set(float64(ws.cb.Size()))
if err := ws.dao.Commit(ws.cb); err != nil {
return errors.Wrap(err, "failed to Commit all changes to underlying DB in a batch")
}
ws.clear()
return nil
}
// GetDB returns the underlying DB for account/contract storage
func (ws *workingSet) GetDB() db.KVStore {
return ws.dao
}
// GetCachedBatch returns the cached batch for pending writes
func (ws *workingSet) GetCachedBatch() db.CachedBatch {
return ws.cb
}
// State pulls a state from DB
func (ws *workingSet) State(hash hash.PKHash, s interface{}) error {
stateDBMtc.WithLabelValues("get").Inc()
mstate, err := ws.accountTrie.Get(hash[:])
if errors.Cause(err) == trie.ErrNotExist {
return errors.Wrapf(state.ErrStateNotExist, "addrHash = %x", hash[:])
}
if err != nil {
return errors.Wrapf(err, "failed to get account of %x", hash)
}
return state.Deserialize(s, mstate)
}
// PutState puts a state into DB
func (ws *workingSet) PutState(pkHash hash.PKHash, s interface{}) error {
stateDBMtc.WithLabelValues("put").Inc()
ss, err := state.Serialize(s)
if err != nil {
return errors.Wrapf(err, "failed to convert account %v to bytes", s)
}
return ws.accountTrie.Upsert(pkHash[:], ss)
}
// DelState deletes a state from DB
func (ws *workingSet) DelState(pkHash hash.PKHash) error {
return ws.accountTrie.Delete(pkHash[:])
}
// clearCache removes all local changes after committing to trie
func (ws *workingSet) clear() {
ws.trieRoots = nil
ws.trieRoots = make(map[int]hash.Hash32B)
}
| 1 | 14,757 | PersistBlockLevelInfo -> UpdateBlockLevelInfo | iotexproject-iotex-core | go |
@@ -0,0 +1,10 @@
+_base_ = [
+ 'retinanet_pvt_t_fpn_1x_coco.py',
+]
+model = dict(
+ backbone=dict(
+ num_layers=[3, 4, 6, 3],
+ init_cfg=dict(
+ type='Pretrained',
+ checkpoint='https://github.com/whai362/PVT/'
+ 'releases/download/v2/pvt_small.pth'))) | 1 | 1 | 25,014 | Type is redundant since it is inherited. | open-mmlab-mmdetection | py |
|
@@ -94,6 +94,10 @@ public class TiTableInfo implements Serializable {
primaryKeyColumn = primaryKey;
}
+ public boolean isNotView() {
+ return this.viewInfo == null;
+ }
+
public boolean isView() {
return this.viewInfo != null;
} | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.meta;
import static java.util.Objects.requireNonNull;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableList;
import com.pingcap.tidb.tipb.TableInfo;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.meta.TiColumnInfo.InternalTypeHolder;
import com.pingcap.tikv.types.DataType;
import com.pingcap.tikv.types.DataTypeFactory;
import java.io.Serializable;
import java.util.List;
import java.util.stream.Collectors;
@JsonIgnoreProperties(ignoreUnknown = true)
public class TiTableInfo implements Serializable {
private final long id;
private final String name;
private final String charset;
private final String collate;
private final List<TiColumnInfo> columns;
private final List<TiIndexInfo> indices;
private final boolean pkIsHandle;
private final String comment;
private final long autoIncId;
private final long maxColumnId;
private final long maxIndexId;
private final long oldSchemaId;
private final long rowSize; // estimated row size
private final TiPartitionInfo partitionInfo;
private final TiColumnInfo primaryKeyColumn;
private final TiViewInfo viewInfo;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public TiTableInfo(
@JsonProperty("id") long id,
@JsonProperty("name") CIStr name,
@JsonProperty("charset") String charset,
@JsonProperty("collate") String collate,
@JsonProperty("pk_is_handle") boolean pkIsHandle,
@JsonProperty("cols") List<TiColumnInfo> columns,
@JsonProperty("index_info") List<TiIndexInfo> indices,
@JsonProperty("comment") String comment,
@JsonProperty("auto_inc_id") long autoIncId,
@JsonProperty("max_col_id") long maxColumnId,
@JsonProperty("max_idx_id") long maxIndexId,
@JsonProperty("old_schema_id") long oldSchemaId,
@JsonProperty("partition") TiPartitionInfo partitionInfo,
@JsonProperty("view") TiViewInfo viewInfo) {
this.id = id;
this.name = name.getL();
this.charset = charset;
this.collate = collate;
this.columns = ImmutableList.copyOf(requireNonNull(columns, "columns is null"));
// TODO: Use more precise predication according to types
this.rowSize = columns.stream().mapToLong(TiColumnInfo::getSize).sum();
this.pkIsHandle = pkIsHandle;
this.indices = indices != null ? ImmutableList.copyOf(indices) : ImmutableList.of();
this.indices.forEach(x -> x.calculateIndexSize(columns));
this.comment = comment;
this.autoIncId = autoIncId;
this.maxColumnId = maxColumnId;
this.maxIndexId = maxIndexId;
this.oldSchemaId = oldSchemaId;
this.partitionInfo = partitionInfo;
this.viewInfo = viewInfo;
TiColumnInfo primaryKey = null;
for (TiColumnInfo col : this.columns) {
if (col.isPrimaryKey()) {
primaryKey = col;
break;
}
}
primaryKeyColumn = primaryKey;
}
public boolean isView() {
return this.viewInfo != null;
}
// auto increment column must be a primary key column
public boolean hasAutoIncrementColumn() {
if (primaryKeyColumn != null) {
return primaryKeyColumn.isAutoIncrement();
}
return false;
}
// auto increment column must be a primary key column
public TiColumnInfo getAutoIncrementColInfo() {
if (hasAutoIncrementColumn()) {
return primaryKeyColumn;
}
return null;
}
public boolean isAutoIncColUnsigned() {
TiColumnInfo col = getAutoIncrementColInfo();
if (col == null) return false;
return col.getType().isUnsigned();
}
public long getId() {
return id;
}
public String getName() {
return name;
}
public String getCharset() {
return charset;
}
public String getCollate() {
return collate;
}
public List<TiColumnInfo> getColumns() {
return columns;
}
public long getEstimatedRowSizeInByte() {
return rowSize;
}
public TiColumnInfo getColumn(int offset) {
if (offset < 0 || offset >= columns.size()) {
throw new TiClientInternalException(String.format("Column offset %d out of bound", offset));
}
return columns.get(offset);
}
public boolean isPkHandle() {
return pkIsHandle;
}
public List<TiIndexInfo> getIndices() {
return indices;
}
public String getComment() {
return comment;
}
private long getAutoIncId() {
return autoIncId;
}
private long getMaxColumnId() {
return maxColumnId;
}
private long getMaxIndexId() {
return maxIndexId;
}
private long getOldSchemaId() {
return oldSchemaId;
}
public TiPartitionInfo getPartitionInfo() {
return partitionInfo;
}
TableInfo toProto() {
return TableInfo.newBuilder()
.setTableId(getId())
.addAllColumns(
getColumns().stream().map(col -> col.toProto(this)).collect(Collectors.toList()))
.build();
}
public boolean hasPrimaryKey() {
return primaryKeyColumn != null;
}
// Only Integer Column will be a PK column
// and there exists only one PK column
public TiColumnInfo getPKIsHandleColumn() {
if (isPkHandle()) {
for (TiColumnInfo col : getColumns()) {
if (col.isPrimaryKey()) {
return col;
}
}
}
return null;
}
private TiColumnInfo copyColumn(TiColumnInfo col) {
DataType type = col.getType();
InternalTypeHolder typeHolder = type.toTypeHolder();
typeHolder.setFlag(type.getFlag() & (~DataType.PriKeyFlag));
DataType newType = DataTypeFactory.of(typeHolder);
return new TiColumnInfo(
col.getId(),
col.getName(),
col.getOffset(),
newType,
col.getSchemaState(),
col.getOriginDefaultValue(),
col.getDefaultValue(),
col.getComment(),
col.getVersion(),
col.getGeneratedExprString())
.copyWithoutPrimaryKey();
}
public TiTableInfo copyTableWithRowId() {
if (!isPkHandle()) {
ImmutableList.Builder<TiColumnInfo> newColumns = ImmutableList.builder();
for (TiColumnInfo col : getColumns()) {
newColumns.add(copyColumn(col));
}
newColumns.add(TiColumnInfo.getRowIdColumn(getColumns().size()));
return new TiTableInfo(
getId(),
CIStr.newCIStr(getName()),
getCharset(),
getCollate(),
true,
newColumns.build(),
getIndices(),
getComment(),
getAutoIncId(),
getMaxColumnId(),
getMaxIndexId(),
getOldSchemaId(),
partitionInfo,
null);
} else {
return this;
}
}
@Override
public String toString() {
return toProto().toString();
}
public boolean isPartitionEnabled() {
if (partitionInfo == null) return false;
return partitionInfo.isEnable();
}
public boolean hasGeneratedColumn() {
for (TiColumnInfo col : getColumns()) {
if (col.isGeneratedColumn()) {
return true;
}
}
return false;
}
}
| 1 | 11,042 | better use `!isView()` so that we don't need to modify multiple lines in future. | pingcap-tispark | java |
@@ -43,6 +43,12 @@ module RSpec
extend RSpec::Core::Warnings
+ class << self
+ # Setters for shared global objects
+ # @api private
+ attr_writer :configuration, :world
+ end
+
# Used to ensure examples get reloaded and user configuration gets reset to
# defaults between multiple runs in the same process.
# | 1 | # rubocop:disable Style/GlobalVars
$_rspec_core_load_started_at = Time.now
# rubocop:enable Style/GlobalVars
require 'rbconfig'
require "rspec/support"
RSpec::Support.require_rspec_support "caller_filter"
RSpec::Support.define_optimized_require_for_rspec(:core) { |f| require_relative f }
%w[
version
warnings
flat_map
filter_manager
dsl
notifications
reporter
hooks
memoized_helpers
metadata
metadata_filter
pending
formatters
ordering
world
configuration
option_parser
configuration_options
runner
example
shared_example_group
example_group
].each { |name| RSpec::Support.require_rspec_core name }
# Namespace for all core RSpec code.
module RSpec
autoload :SharedContext, 'rspec/core/shared_context'
extend RSpec::Core::Warnings
# Used to ensure examples get reloaded and user configuration gets reset to
# defaults between multiple runs in the same process.
#
# Users must invoke this if they want to have the configuration reset when
# they use the runner multiple times within the same process. Users must deal
# themselves with re-configuration of RSpec before run.
def self.reset
@world = nil
@configuration = nil
end
# Used to ensure examples get reloaded between multiple runs in the same
# process and ensures user configuration is persisted.
#
# Users must invoke this if they want to clear all examples but preserve
# current configuration when they use the runner multiple times within the
# same process.
def self.clear_examples
world.reset
configuration.reporter.reset
configuration.start_time = ::RSpec::Core::Time.now
configuration.reset_filters
end
# Returns the global [Configuration](RSpec/Core/Configuration) object. While
# you _can_ use this method to access the configuration, the more common
# convention is to use [RSpec.configure](RSpec#configure-class_method).
#
# @example
# RSpec.configuration.drb_port = 1234
# @see RSpec.configure
# @see Core::Configuration
def self.configuration
@configuration ||= begin
config = RSpec::Core::Configuration.new
config.expose_dsl_globally = true
config
end
end
# Yields the global configuration to a block.
# @yield [Configuration] global configuration
#
# @example
# RSpec.configure do |config|
# config.add_formatter 'documentation'
# end
# @see Core::Configuration
def self.configure
yield configuration if block_given?
end
# The example being executed.
#
# The primary audience for this method is library authors who need access
# to the example currently being executed and also want to support all
# versions of RSpec 2 and 3.
#
# @example
#
# RSpec.configure do |c|
# # context.example is deprecated, but RSpec.current_example is not
# # available until RSpec 3.0.
# fetch_current_example = RSpec.respond_to?(:current_example) ?
# proc { RSpec.current_example } : proc { |context| context.example }
#
# c.before(:example) do
# example = fetch_current_example.call(self)
#
# # ...
# end
# end
#
def self.current_example
thread_local_metadata[:current_example]
end
# Set the current example being executed.
# @api private
def self.current_example=(example)
thread_local_metadata[:current_example] = example
end
# @private
# A single thread local variable so we don't excessively pollute that
# namespace.
def self.thread_local_metadata
Thread.current[:_rspec] ||= { :shared_example_group_inclusions => [] }
end
# @private
# Internal container for global non-configuration data.
def self.world
@world ||= RSpec::Core::World.new
end
# Namespace for the rspec-core code.
module Core
# @private
# This avoids issues with reporting time caused by examples that
# change the value/meaning of Time.now without properly restoring
# it.
class Time
class << self
define_method(:now, &::Time.method(:now))
end
end
# @private path to executable file.
def self.path_to_executable
@path_to_executable ||= File.expand_path('../../../exe/rspec', __FILE__)
end
end
# @private
MODULES_TO_AUTOLOAD = {
:Matchers => "rspec/expectations",
:Expectations => "rspec/expectations",
:Mocks => "rspec/mocks"
}
# @private
def self.const_missing(name)
# Load rspec-expectations when RSpec::Matchers is referenced. This allows
# people to define custom matchers (using `RSpec::Matchers.define`) before
# rspec-core has loaded rspec-expectations (since it delays the loading of
# it to allow users to configure a different assertion/expectation
# framework). `autoload` can't be used since it works with ruby's built-in
# require (e.g. for files that are available relative to a load path dir),
# but not with rubygems' extended require.
#
# As of rspec 2.14.1, we no longer require `rspec/mocks` and
# `rspec/expectations` when `rspec` is required, so we want
# to make them available as an autoload.
require MODULES_TO_AUTOLOAD.fetch(name) { return super }
::RSpec.const_get(name)
end
end
| 1 | 14,389 | I know you don't usually open up the singleton class, but I was getting a `private method called` error when trying to use `self.attr_writer` - any idea? | rspec-rspec-core | rb |
@@ -273,8 +273,13 @@ define(['apphost', 'globalize'], function (appHost, globalize) {
},
canRate: function (item) {
+ var itemType = item.Type;
+
+ if (item.Type === 'Program' || item.Type === 'Timer' || item.Type === 'SeriesTimer' || item.Type === 'CollectionFolder' || item.Type === 'UserView' || item.Type === 'Channel' || itemType === 'Season' || itemType === 'Studio') {
+ return false;
+ }
- if (item.Type === 'Program' || item.Type === 'Timer' || item.Type === 'SeriesTimer' || item.Type === 'CollectionFolder' || item.Type === 'UserView' || item.Type === 'Channel') {
+ if (!item.UserData) {
return false;
}
| 1 | define(['apphost', 'globalize'], function (appHost, globalize) {
'use strict';
function getDisplayName(item, options) {
if (!item) {
throw new Error("null item passed into getDisplayName");
}
options = options || {};
if (item.Type === 'Timer') {
item = item.ProgramInfo || item;
}
var name = ((item.Type === 'Program' || item.Type === 'Recording') && (item.IsSeries || item.EpisodeTitle) ? item.EpisodeTitle : item.Name) || '';
if (item.Type === "TvChannel") {
if (item.ChannelNumber) {
return item.ChannelNumber + ' ' + name;
}
return name;
}
if (item.Type === "Episode" && item.ParentIndexNumber === 0) {
name = globalize.translate('ValueSpecialEpisodeName', name);
} else if ((item.Type === "Episode" || item.Type === 'Program') && item.IndexNumber != null && item.ParentIndexNumber != null && options.includeIndexNumber !== false) {
var displayIndexNumber = item.IndexNumber;
var number = displayIndexNumber;
var nameSeparator = " - ";
if (options.includeParentInfo !== false) {
number = "S" + item.ParentIndexNumber + ":E" + number;
} else {
nameSeparator = ". ";
}
if (item.IndexNumberEnd) {
displayIndexNumber = item.IndexNumberEnd;
number += "-" + displayIndexNumber;
}
if (number) {
name = name ? (number + nameSeparator + name) : number;
}
}
return name;
}
function supportsAddingToCollection(item) {
var invalidTypes = ['Genre', 'MusicGenre', 'Studio', 'UserView', 'CollectionFolder', 'Audio', 'Program', 'Timer', 'SeriesTimer'];
if (item.Type === 'Recording') {
if (item.Status !== 'Completed') {
return false;
}
}
return !item.CollectionType && invalidTypes.indexOf(item.Type) === -1 && item.MediaType !== 'Photo' && !isLocalItem(item);
}
function supportsAddingToPlaylist(item) {
if (item.Type === 'Program') {
return false;
}
if (item.Type === 'TvChannel') {
return false;
}
if (item.Type === 'Timer') {
return false;
}
if (item.Type === 'SeriesTimer') {
return false;
}
if (item.MediaType === 'Photo') {
return false;
}
if (item.Type === 'Recording') {
if (item.Status !== 'Completed') {
return false;
}
}
if (isLocalItem(item)) {
return false;
}
if (item.CollectionType === 'livetv') {
return false;
}
return item.MediaType || item.IsFolder || item.Type === "Genre" || item.Type === "MusicGenre" || item.Type === "MusicArtist";
}
function canEdit(user, item) {
var itemType = item.Type;
if (itemType === "UserRootFolder" || itemType === "UserView") {
return false;
}
if (itemType === 'Program') {
return false;
}
if (itemType === 'Timer') {
return false;
}
if (itemType === 'SeriesTimer') {
return false;
}
if (item.Type === 'Recording') {
if (item.Status !== 'Completed') {
return false;
}
}
if (isLocalItem(item)) {
return false;
}
return user.Policy.IsAdministrator;
}
function isLocalItem(item) {
if (item && item.Id && item.Id.indexOf('local') === 0) {
return true;
}
return false;
}
return {
getDisplayName: getDisplayName,
supportsAddingToCollection: supportsAddingToCollection,
supportsAddingToPlaylist: supportsAddingToPlaylist,
isLocalItem: isLocalItem,
canIdentify: function (user, item) {
var itemType = item.Type;
if (itemType === "Movie" ||
itemType === "Trailer" ||
itemType === "Series" ||
itemType === "BoxSet" ||
itemType === "Person" ||
itemType === "Book" ||
itemType === "MusicAlbum" ||
itemType === "MusicArtist" ||
itemType === "MusicVideo") {
if (user.Policy.IsAdministrator) {
if (!isLocalItem(item)) {
return true;
}
}
}
return false;
},
canEdit: canEdit,
canEditImages: function (user, item) {
var itemType = item.Type;
if (item.MediaType === 'Photo') {
return false;
}
if (itemType === 'UserView') {
if (user.Policy.IsAdministrator) {
return true;
}
return false;
}
if (item.Type === 'Recording') {
if (item.Status !== 'Completed') {
return false;
}
}
return itemType !== 'Timer' && itemType !== 'SeriesTimer' && canEdit(user, item) && !isLocalItem(item);
},
canSync: function (user, item) {
if (user && !user.Policy.EnableContentDownloading) {
return false;
}
if (isLocalItem(item)) {
return false;
}
return item.SupportsSync;
},
canShare: function (item, user) {
if (item.Type === 'Program') {
return false;
}
if (item.Type === 'TvChannel') {
return false;
}
if (item.Type === 'Timer') {
return false;
}
if (item.Type === 'SeriesTimer') {
return false;
}
if (item.Type === 'Recording') {
if (item.Status !== 'Completed') {
return false;
}
}
if (isLocalItem(item)) {
return false;
}
return user.Policy.EnablePublicSharing && appHost.supports('sharing');
},
enableDateAddedDisplay: function (item) {
return !item.IsFolder && item.MediaType && item.Type !== 'Program' && item.Type !== 'TvChannel' && item.Type !== 'Trailer';
},
canMarkPlayed: function (item) {
if (item.Type === 'Program') {
return false;
}
if (item.MediaType === 'Video') {
if (item.Type !== 'TvChannel') {
return true;
}
}
else if (item.MediaType === 'Audio') {
if (item.Type === 'AudioPodcast') {
return true;
}
if (item.Type === 'AudioBook') {
return true;
}
}
if (item.Type === "Series" ||
item.Type === "Season" ||
item.Type === "BoxSet" ||
item.MediaType === "Book" ||
item.MediaType === "Recording") {
return true;
}
return false;
},
canRate: function (item) {
if (item.Type === 'Program' || item.Type === 'Timer' || item.Type === 'SeriesTimer' || item.Type === 'CollectionFolder' || item.Type === 'UserView' || item.Type === 'Channel') {
return false;
}
return true;
},
canConvert: function (item, user) {
if (!user.Policy.EnableMediaConversion) {
return false;
}
if (isLocalItem(item)) {
return false;
}
var mediaType = item.MediaType;
if (mediaType === 'Book' || mediaType === 'Photo' || mediaType === 'Audio') {
return false;
}
var collectionType = item.CollectionType;
if (collectionType === 'livetv') {
return false;
}
var type = item.Type;
if (type === 'Channel' || type === 'Person' || type === 'Year' || type === 'Program' || type === 'Timer' || type === 'SeriesTimer') {
return false;
}
if (item.LocationType === 'Virtual' && !item.IsFolder) {
return false;
}
if (item.IsPlaceHolder) {
return false;
}
return true;
},
canRefreshMetadata: function (item, user) {
if (user.Policy.IsAdministrator) {
var collectionType = item.CollectionType;
if (collectionType === 'livetv') {
return false;
}
if (item.Type !== 'Timer' && item.Type !== 'SeriesTimer' && item.Type !== 'Program' && item.Type !== 'TvChannel' && !(item.Type === 'Recording' && item.Status !== 'Completed')) {
if (!isLocalItem(item)) {
return true;
}
}
}
return false;
},
supportsMediaSourceSelection: function (item) {
if (item.MediaType !== 'Video') {
return false;
}
if (item.Type === 'TvChannel') {
return false;
}
if (!item.MediaSources || (item.MediaSources.length === 1 && item.MediaSources[0].Type === 'Placeholder')) {
return false;
}
if (item.EnableMediaSourceDisplay === false) {
return false;
}
if (item.EnableMediaSourceDisplay == null && item.SourceType && item.SourceType !== 'Library') {
return false;
}
return true;
}
};
});
| 1 | 12,124 | There's no need to create a new variable here, the other checks just use the item type directly. | jellyfin-jellyfin-web | js |
@@ -19,6 +19,14 @@ class Product < ActiveRecord::Base
where product_type: 'book'
end
+ def self.workshops
+ where product_type: 'workshop'
+ end
+
+ def self.videos
+ where product_type: 'video'
+ end
+
def self.ordered
order("name asc")
end | 1 | class Product < ActiveRecord::Base
has_many :purchases
has_many :downloads
has_many :classifications, as: :classifiable
has_many :topics, through: :classifications
has_many :videos
validates_presence_of :name, :sku, :individual_price, :company_price, :fulfillment_method
accepts_nested_attributes_for :downloads, :allow_destroy => true
has_attached_file :product_image, {
styles: { book: '230x300#', video: '153x100#' }
}.merge(PAPERCLIP_STORAGE_OPTIONS)
def self.active
where(active: true)
end
def self.books
where product_type: 'book'
end
def self.ordered
order("name asc")
end
def self.promoted(location)
where(promo_location: location).first
end
def to_param
"#{id}-#{name.parameterize}"
end
def product_type_symbol
self.product_type.split(" ")[0].downcase.to_sym
rescue
"book"
end
def image_url
raw_url = self.product_image.url(product_type_symbol)
product_image_file_name.nil? ? "/assets/#{raw_url}" : raw_url
end
def external?
fulfillment_method == 'external'
end
def self.videos
where product_type: 'video'
end
end
| 1 | 6,564 | Any other possible names? Feels like going with "workshop" for this product_type could increase the confusion between course/workshop in the codebase. | thoughtbot-upcase | rb |
@@ -114,7 +114,10 @@ const baseResolvers = {
return;
}
- yield fetchGetURLChannelsStore.actions.fetchGetURLChannels( accountID, clientID );
+ const { error } = yield fetchGetURLChannelsStore.actions.fetchGetURLChannels( accountID, clientID );
+ if ( error ) {
+ yield errorStoreActions.receiveError( error, 'getURLChannels', [ accountID, clientID ] );
+ }
},
};
| 1 | /**
* `modules/adsense` data store: URL channels.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import invariant from 'invariant';
/**
* Internal dependencies
*/
import API from 'googlesitekit-api';
import Data from 'googlesitekit-data';
import { STORE_NAME } from './constants';
import { createFetchStore } from '../../../googlesitekit/data/create-fetch-store';
import { actions as errorStoreActions } from '../../../googlesitekit/data/create-error-store';
const fetchGetURLChannelsStore = createFetchStore( {
baseName: 'getURLChannels',
controlCallback: ( { accountID, clientID } ) => {
return API.get( 'modules', 'adsense', 'urlchannels', { accountID, clientID }, {
useCache: false,
} );
},
reducerCallback: ( state, urlchannels, { accountID, clientID } ) => {
return {
...state,
urlchannels: {
...state.urlchannels,
[ `${ accountID }::${ clientID }` ]: [ ...urlchannels ],
},
};
},
argsToParams: ( accountID, clientID ) => {
return { accountID, clientID };
},
validateParams: ( { accountID, clientID } = {} ) => {
invariant( accountID, 'accountID is required.' );
invariant( clientID, 'clientID is required.' );
},
} );
// Actions
const RESET_URLCHANNELS = 'RESET_URLCHANNELS';
const baseInitialState = {
urlchannels: {},
};
const baseActions = {
*resetURLChannels() {
const { dispatch } = yield Data.commonActions.getRegistry();
yield {
payload: {},
type: RESET_URLCHANNELS,
};
yield errorStoreActions.clearErrors( 'getURLChannels' );
return dispatch( STORE_NAME )
.invalidateResolutionForStoreSelector( 'getURLChannels' );
},
};
const baseReducer = ( state, { type } ) => {
switch ( type ) {
case RESET_URLCHANNELS: {
const {
siteStatus,
siteSetupComplete,
} = state.savedSettings || {};
return {
...state,
urlchannels: initialState.urlchannels,
settings: {
...( state.settings || {} ),
siteStatus,
siteSetupComplete,
},
};
}
default: {
return state;
}
}
};
const baseResolvers = {
*getURLChannels( accountID, clientID ) {
if ( undefined === accountID || undefined === clientID ) {
return;
}
const registry = yield Data.commonActions.getRegistry();
const existingURLChannels = registry.select( STORE_NAME ).getURLChannels( accountID, clientID );
if ( existingURLChannels ) {
return;
}
yield fetchGetURLChannelsStore.actions.fetchGetURLChannels( accountID, clientID );
},
};
const baseSelectors = {
/**
* Gets all Google AdSense URL channels for this account and client.
*
* @since 1.9.0
*
* @param {Object} state Data store's state.
* @param {string} accountID The AdSense Account ID to fetch URL channels for.
* @param {string} clientID The AdSense Client ID to fetch URL channels for.
* @return {(Array.<Object>|undefined)} An array of AdSense URL channels; `undefined` if not loaded.
*/
getURLChannels( state, accountID, clientID ) {
if ( undefined === accountID || undefined === clientID ) {
return undefined;
}
return state.urlchannels[ `${ accountID }::${ clientID }` ];
},
};
const store = Data.combineStores(
fetchGetURLChannelsStore,
{
initialState: baseInitialState,
actions: baseActions,
reducer: baseReducer,
resolvers: baseResolvers,
selectors: baseSelectors,
}
);
export const initialState = store.initialState;
export const actions = store.actions;
export const controls = store.controls;
export const reducer = store.reducer;
export const resolvers = store.resolvers;
export const selectors = store.selectors;
export default store;
| 1 | 36,633 | @eugene-manuilov Isn't this already taken care of by `fetchGetURLChannels` via `createFetchStore`? Why is the extra `receiveError` call needed here? (This was already in the IB, but just struck me while reviewing here.) | google-site-kit-wp | js |
@@ -22,6 +22,7 @@ import time
from concurrent import futures
import grpc
+import googlecloudprofiler
from google.cloud.forseti.common.util import logger
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti Server program."""
# pylint: disable=line-too-long
import argparse
import os
import sys
import time
from concurrent import futures
import grpc
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.base.config import ServiceConfig
from google.cloud.forseti.services.explain.service import GrpcExplainerFactory
from google.cloud.forseti.services.inventory.service import GrpcInventoryFactory
from google.cloud.forseti.services.model.service import GrpcModellerFactory
from google.cloud.forseti.services.notifier.service import GrpcNotifierFactory
from google.cloud.forseti.services.scanner.service import GrpcScannerFactory
from google.cloud.forseti.services.server_config.service import GrpcServerConfigFactory
LOGGER = logger.get_logger(__name__)
SERVICE_MAP = {
'explain': GrpcExplainerFactory,
'inventory': GrpcInventoryFactory,
'scanner': GrpcScannerFactory,
'notifier': GrpcNotifierFactory,
'model': GrpcModellerFactory,
'server': GrpcServerConfigFactory
}
def serve(endpoint,
services,
forseti_db_connect_string,
config_file_path,
log_level,
enable_console_log,
max_workers=32,
wait_shutdown_secs=3):
"""Instantiate the services and serves them via gRPC.
Args:
endpoint (str): the server channel endpoint
services (list): services to register on the server
forseti_db_connect_string (str): Forseti database string
config_file_path (str): Path to Forseti configuration file.
log_level (str): Sets the threshold for Forseti's logger.
enable_console_log (bool): Enable console logging.
max_workers (int): maximum number of workers for the crawler
wait_shutdown_secs (int): seconds to wait before shutdown
Raises:
Exception: No services to start
"""
# Configuring log level for the application
logger.set_logger_level_from_config(log_level)
if enable_console_log:
logger.enable_console_log()
factories = []
for service in services:
factories.append(SERVICE_MAP[service])
if not factories:
raise Exception('No services to start.')
# Server config service is always started.
factories.append(SERVICE_MAP['server'])
config = ServiceConfig(
forseti_config_file_path=config_file_path,
forseti_db_connect_string=forseti_db_connect_string,
endpoint=endpoint)
is_config_updated, error_msg = config.update_configuration()
if not is_config_updated:
update_config_msg = (
'Please update the forseti_conf_server.yaml file on GCS '
'and reset the server VM.')
raise Exception(error_msg + ' ' + update_config_msg)
server = grpc.server(futures.ThreadPoolExecutor(max_workers))
for factory in factories:
factory(config).create_and_register_service(server)
server.add_insecure_port(endpoint)
server.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
server.stop(wait_shutdown_secs).wait()
return
def check_args(args):
"""Make sure the required args are present and valid.
The exit codes are arbitrary and just serve the purpose of facilitating
distinction betweeen the various error cases.
Args:
args (dict): the command line args
Returns:
tuple: 2-tuple with an exit code and error message.
"""
if not args['services']:
return (1, 'ERROR: please specify at least one service.')
if not args['config_file_path']:
return (2, 'ERROR: please specify the Forseti config file.')
if not os.path.isfile(args['config_file_path']):
return (3, 'ERROR: "%s" is not a file.' % args['config_file_path'])
if not os.access(args['config_file_path'], os.R_OK):
return(4, 'ERROR: "%s" is not readable.' % args['config_file_path'])
if not args['forseti_db']:
return(5, 'ERROR: please specify the Forseti database string.')
return (0, 'All good!')
# pylint: enable=too-many-locals
def main():
"""Run."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--endpoint',
default='[::]:50051',
help='Server endpoint')
parser.add_argument(
'--forseti_db',
help=('Forseti database string, formatted as '
'"mysql://<db_user>@<db_host>:<db_port>/<db_name>"'))
parser.add_argument(
'--config_file_path',
help='Path to Forseti configuration file.')
services = sorted(SERVICE_MAP.keys())
parser.add_argument(
'--services',
nargs='+',
choices=services,
help=('Forseti services i.e. at least one of: %s.' %
', '.join(services)))
parser.add_argument(
'--log_level',
default='info',
choices=['debug', 'info', 'warning', 'error'],
help='Sets the threshold for Forseti\'s logger.'
' Logging messages which are less severe'
' than the level you set will be ignored.')
parser.add_argument(
'--enable_console_log',
action='store_true',
help='Print log to console.')
args = vars(parser.parse_args())
exit_code, error_msg = check_args(args)
if exit_code:
sys.stderr.write('%s\n\n' % error_msg)
parser.print_usage()
sys.exit(exit_code)
serve(args['endpoint'],
args['services'],
args['forseti_db'],
args['config_file_path'],
args['log_level'],
args['enable_console_log'])
if __name__ == '__main__':
main()
| 1 | 34,627 | nit: should go before `grpc`? | forseti-security-forseti-security | py |
@@ -71,7 +71,7 @@ func init() {
}
func testWorkflow() *Workflow {
- ctx, cancel := context.WithCancel(context.Background())
+ cancel := make(chan struct{})
return &Workflow{
Name: testWf,
GCSPath: testGCSPath, | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"regexp"
"strings"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/option"
)
type mockStep struct {
runImpl func(*Workflow) error
validateImpl func(*Workflow) error
}
func (m *mockStep) run(w *Workflow) error {
if m.runImpl != nil {
return m.runImpl(w)
}
return nil
}
func (m *mockStep) validate(w *Workflow) error {
if m.validateImpl != nil {
return m.validateImpl(w)
}
return nil
}
var (
testGCEClient *compute.Client
testGCSClient *storage.Client
testGCSDNEVal = "dne"
testWf = "test-wf"
testProject = "test-project"
testZone = "test-zone"
testGCSPath = "gs://test-bucket"
)
func init() {
var err error
testGCEClient, err = newTestGCEClient()
if err != nil {
panic(err)
}
testGCSClient, err = newTestGCSClient()
if err != nil {
panic(err)
}
}
func testWorkflow() *Workflow {
ctx, cancel := context.WithCancel(context.Background())
return &Workflow{
Name: testWf,
GCSPath: testGCSPath,
Project: testProject,
Zone: testZone,
ComputeClient: testGCEClient,
StorageClient: testGCSClient,
Ctx: ctx,
Cancel: cancel,
diskRefs: &refMap{},
imageRefs: &refMap{},
instanceRefs: &refMap{},
logger: log.New(ioutil.Discard, "", 0),
}
}
func newTestGCEClient() (*compute.Client, error) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && strings.Contains(r.URL.String(), fmt.Sprintf("/%s/zones/%s/instances/", testProject, testZone)) {
fmt.Fprintln(w, `{"Status":"TERMINATED","SelfLink":"link"}`)
} else if r.Method == "GET" && strings.Contains(r.URL.String(), fmt.Sprintf("/%s/zones/%s/machineTypes", testProject, testZone)) {
fmt.Fprintln(w, `{"Items":[{"Name": "foo-type"}]}`)
} else {
fmt.Fprintln(w, `{"Status":"DONE","SelfLink":"link"}`)
}
}))
return compute.NewClient(context.Background(), option.WithEndpoint(ts.URL), option.WithHTTPClient(http.DefaultClient))
}
func newTestGCSClient() (*storage.Client, error) {
nameRgx := regexp.MustCompile(`"name":"([^"].*)"`)
rewriteRgx := regexp.MustCompile("/b/([^/]+)/o/([^/]+)/rewriteTo/b/([^/]+)/o/([^?]+)")
uploadRgx := regexp.MustCompile("/b/([^/]+)/o?.*uploadType=multipart.*")
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u := r.URL.String()
m := r.Method
if match := uploadRgx.FindStringSubmatch(u); m == "POST" && match != nil {
body, _ := ioutil.ReadAll(r.Body)
n := nameRgx.FindStringSubmatch(string(body))[1]
fmt.Fprintf(w, `{"kind":"storage#object","bucket":"%s","name":"%s"}\n`, match[1], n)
} else if match := rewriteRgx.FindStringSubmatch(u); m == "POST" && match != nil {
if strings.Contains(match[1], testGCSDNEVal) || strings.Contains(match[2], testGCSDNEVal) {
w.WriteHeader(http.StatusNotFound)
}
o := fmt.Sprintf(`{"bucket":"%s","name":"%s"}`, match[3], match[4])
fmt.Fprintf(w, `{"kind": "storage#rewriteResponse", "done": true, "objectSize": "1", "totalBytesRewritten": "1", "resource": %s}\n`, o)
} else {
fmt.Println("got something else")
}
}))
return storage.NewClient(context.Background(), option.WithEndpoint(ts.URL), option.WithHTTPClient(http.DefaultClient))
}
| 1 | 6,318 | Why not using the Cancel? I thought cancelling a "parent" context would cascade to "children" contexts, cancelling them as well. In short, how does cancel work? | GoogleCloudPlatform-compute-image-tools | go |
@@ -28,10 +28,12 @@ func (w *contentTypeOverridingResponseWriter) overrideMimeType(
// by the frontend WebView.
ty := strings.ToLower(mimeType)
switch {
- // First reject anything containing javascript/xml/html.
+ // First anything textual as text/plain.
+ // Includes javascript, html, xml. (note that the type may be e.g. application/xhtml+xml)
case strings.Contains(ty, "javascript") ||
strings.Contains(ty, "xml") ||
- strings.Contains(ty, "html"):
+ strings.Contains(ty, "html") ||
+ strings.HasPrefix(ty, "text/"):
return "text/plain"
// Pass multimedia types through, and pdf too.
case strings.HasPrefix(ty, "audio/") || | 1 | // Copyright 2018 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libhttpserver
import (
"net/http"
"strings"
)
type contentTypeOverridingResponseWriter struct {
original http.ResponseWriter
}
var _ http.ResponseWriter = (*contentTypeOverridingResponseWriter)(nil)
func newContentTypeOverridingResponseWriter(
original http.ResponseWriter) *contentTypeOverridingResponseWriter {
return &contentTypeOverridingResponseWriter{
original: original,
}
}
func (w *contentTypeOverridingResponseWriter) overrideMimeType(
mimeType string) (newMimeType string) {
// Send text/plain for all HTML and JS files to avoid them being executed
// by the frontend WebView.
ty := strings.ToLower(mimeType)
switch {
// First reject anything containing javascript/xml/html.
case strings.Contains(ty, "javascript") ||
strings.Contains(ty, "xml") ||
strings.Contains(ty, "html"):
return "text/plain"
// Pass multimedia types through, and pdf too.
case strings.HasPrefix(ty, "audio/") ||
strings.HasPrefix(ty, "image/") ||
strings.HasPrefix(ty, "video/") ||
ty == "application/pdf":
return ty
// Otherwise text/plain.
default:
return "text/plain"
}
}
func (w *contentTypeOverridingResponseWriter) override() {
t := w.original.Header().Get("Content-Type")
if len(t) > 0 {
w.original.Header().Set("Content-Type", w.overrideMimeType(t))
}
w.original.Header().Set("X-Content-Type-Options", "nosniff")
}
func (w *contentTypeOverridingResponseWriter) Header() http.Header {
return w.original.Header()
}
func (w *contentTypeOverridingResponseWriter) WriteHeader(statusCode int) {
w.override()
w.original.WriteHeader(statusCode)
}
func (w *contentTypeOverridingResponseWriter) Write(data []byte) (int, error) {
w.override()
return w.original.Write(data)
}
var additionalMimeTypes = map[string]string{
".go": "text/plain",
".py": "text/plain",
".zsh": "text/plain",
".fish": "text/plain",
".cs": "text/plain",
".rb": "text/plain",
".m": "text/plain",
".mm": "text/plain",
".swift": "text/plain",
".flow": "text/plain",
".php": "text/plain",
".pl": "text/plain",
".sh": "text/plain",
".js": "text/plain",
".json": "text/plain",
".sql": "text/plain",
".rs": "text/plain",
".xml": "text/plain",
".tex": "text/plain",
".pub": "text/plain",
}
| 1 | 20,189 | Could you explain why we default to binary now? It seems it's safer to default to text to avoid stuff getting executed or parsed by browsers accidentally. If we need to add support for particular types, we can add them explicitly right? | keybase-kbfs | go |
@@ -139,6 +139,8 @@ type Config struct {
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
+ OpenstackRegion string `config:"string;;local"`
+
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude string `config:"iface-list;kube-ipvs0"`
| 1 | // Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
)
var (
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude string `config:"iface-list;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"string;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (c *Config) InterfaceExcludes() []string {
return strings.Split(c.InterfaceExclude, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := apiconfig.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case.
if config.setByConfigFileOrEnvironment("DatastoreType") && config.DatastoreType == "etcdv3" {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
cfg.Spec.EtcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
}
if !config.IpInIpEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("IPIP disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 16,662 | Bit worried about the prefix since this seem to be being used for non-Openstack data too (host endpoints). Should we just leave host endpoint status at the old path (or remove it since AIFAIK, it's not used anywhere)? | projectcalico-felix | c |
@@ -68,7 +68,7 @@ public class Timeline {
private static final Pattern TIMECODE_LINK_REGEX = Pattern.compile("antennapod://timecode/((\\d+))");
private static final String TIMECODE_LINK = "<a class=\"timecode\" href=\"antennapod://timecode/%d\">%s</a>";
- private static final Pattern TIMECODE_REGEX = Pattern.compile("\\b(?:(?:(([0-9][0-9])):))?(([0-9][0-9])):(([0-9][0-9]))\\b");
+ private static final Pattern TIMECODE_REGEX = Pattern.compile("\\b(?:(?:([01]?\\d|2[0-3]):)?([0-5]?\\d):)?([0-5]?\\d)\\b");
private static final Pattern LINE_BREAK_REGEX = Pattern.compile("<br */?>");
| 1 | package de.danoeh.antennapod.core.util.playback;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Color;
import android.support.annotation.ColorInt;
import android.support.annotation.NonNull;
import android.text.TextUtils;
import android.util.Log;
import android.util.TypedValue;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.util.Converter;
import de.danoeh.antennapod.core.util.ShownotesProvider;
/**
* Connects chapter information and shownotes of a shownotesProvider, for example by making it possible to use the
* shownotes to navigate to another position in the podcast or by highlighting certain parts of the shownotesProvider's
* shownotes.
* <p/>
* A timeline object needs a shownotesProvider from which the chapter information is retrieved and shownotes are generated.
*/
public class Timeline {
private static final String TAG = "Timeline";
private static final String WEBVIEW_STYLE = "@font-face { font-family: 'Roboto-Light'; src: url('file:///android_asset/Roboto-Light.ttf'); } * { color: %s; font-family: roboto-Light; font-size: 13pt; } a { font-style: normal; text-decoration: none; font-weight: normal; color: #00A8DF; } a.timecode { color: #669900; } img { display: block; margin: 10 auto; max-width: %s; height: auto; } body { margin: %dpx %dpx %dpx %dpx; }";
private ShownotesProvider shownotesProvider;
private final String noShownotesLabel;
private final String colorPrimaryString;
private final String colorSecondaryString;
private final int pageMargin;
public Timeline(Context context, ShownotesProvider shownotesProvider) {
if (shownotesProvider == null) {
throw new IllegalArgumentException("shownotesProvider = null");
}
this.shownotesProvider = shownotesProvider;
noShownotesLabel = context.getString(R.string.no_shownotes_label);
TypedArray res = context.getTheme().obtainStyledAttributes(new int[]{android.R.attr.textColorPrimary});
@ColorInt int col = res.getColor(0, 0);
colorPrimaryString = "rgba(" + Color.red(col) + "," + Color.green(col) + "," +
Color.blue(col) + "," + (Color.alpha(col) / 255.0) + ")";
res.recycle();
res = context.getTheme().obtainStyledAttributes(new int[]{android.R.attr.textColorSecondary});
col = res.getColor(0, 0);
colorSecondaryString = "rgba(" + Color.red(col) + "," + Color.green(col) + "," +
Color.blue(col) + "," + (Color.alpha(col) / 255.0) + ")";
res.recycle();
pageMargin = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 8,
context.getResources().getDisplayMetrics()
);
}
private static final Pattern TIMECODE_LINK_REGEX = Pattern.compile("antennapod://timecode/((\\d+))");
private static final String TIMECODE_LINK = "<a class=\"timecode\" href=\"antennapod://timecode/%d\">%s</a>";
private static final Pattern TIMECODE_REGEX = Pattern.compile("\\b(?:(?:(([0-9][0-9])):))?(([0-9][0-9])):(([0-9][0-9]))\\b");
private static final Pattern LINE_BREAK_REGEX = Pattern.compile("<br */?>");
/**
* Applies an app-specific CSS stylesheet and adds timecode links (optional).
* <p/>
* This method does NOT change the original shownotes string of the shownotesProvider object and it should
* also not be changed by the caller.
*
* @param addTimecodes True if this method should add timecode links
* @return The processed HTML string.
*/
public String processShownotes(final boolean addTimecodes) {
final Playable playable = (shownotesProvider instanceof Playable) ? (Playable) shownotesProvider : null;
// load shownotes
String shownotes;
try {
shownotes = shownotesProvider.loadShownotes().call();
} catch (Exception e) {
e.printStackTrace();
return null;
}
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html>" +
"<head>" +
"<style type='text/css'>" +
"html, body { margin: 0; padding: 0; width: 100%; height: 100%; } " +
"html { display: table; }" +
"body { display: table-cell; vertical-align: middle; text-align:center;" +
"-webkit-text-size-adjust: none; font-size: 87%; color: " + colorSecondaryString + ";} " +
"</style>" +
"</head>" +
"<body>" +
"<p>" + noShownotesLabel + "</p>" +
"</body>" +
"</html>";
Log.d(TAG, "shownotes: " + shownotes);
return shownotes;
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
// apply style
String styleStr = String.format(Locale.getDefault(), WEBVIEW_STYLE, colorPrimaryString, "100%",
pageMargin, pageMargin, pageMargin, pageMargin);
document.head().appendElement("style").attr("type", "text/css").text(styleStr);
// apply timecode links
if (addTimecodes) {
Elements elementsWithTimeCodes = document.body().getElementsMatchingOwnText(TIMECODE_REGEX);
Log.d(TAG, "Recognized " + elementsWithTimeCodes.size() + " timecodes");
for (Element element : elementsWithTimeCodes) {
Matcher matcherLong = TIMECODE_REGEX.matcher(element.html());
StringBuffer buffer = new StringBuffer();
while (matcherLong.find()) {
String h = matcherLong.group(1);
String group = matcherLong.group(0);
int time = (h != null) ? Converter.durationStringLongToMs(group) :
Converter.durationStringShortToMs(group);
String rep;
if (playable == null || playable.getDuration() > time) {
rep = String.format(Locale.getDefault(), TIMECODE_LINK, time, group);
} else {
rep = group;
}
matcherLong.appendReplacement(buffer, rep);
}
matcherLong.appendTail(buffer);
element.html(buffer.toString());
}
}
return document.toString();
}
/**
* Returns true if the given link is a timecode link.
*/
public static boolean isTimecodeLink(String link) {
return link != null && link.matches(TIMECODE_LINK_REGEX.pattern());
}
/**
* Returns the time in milliseconds that is attached to this link or -1
* if the link is no valid timecode link.
*/
public static int getTimecodeLinkTime(String link) {
if (isTimecodeLink(link)) {
Matcher m = TIMECODE_LINK_REGEX.matcher(link);
try {
if (m.find()) {
return Integer.parseInt(m.group(1));
}
} catch (NumberFormatException e) {
e.printStackTrace();
}
}
return -1;
}
public void setShownotesProvider(@NonNull ShownotesProvider shownotesProvider) {
this.shownotesProvider = shownotesProvider;
}
}
| 1 | 14,492 | This might be a silly question, but what happens to files with durations > 24 hours? | AntennaPod-AntennaPod | java |
@@ -92,7 +92,7 @@ namespace OpenTelemetry.Trace
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal static TracerProviderBuilder AddDiagnosticSourceInstrumentation<TInstrumentation>(
this TracerProviderBuilder tracerProviderBuilder,
- Func<ActivitySourceAdapter, TInstrumentation> instrumentationFactory)
+ Func<TInstrumentation> instrumentationFactory)
where TInstrumentation : class
{
if (instrumentationFactory == null) | 1 | // <copyright file="TracerProviderBuilderExtensions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Trace
{
public static class TracerProviderBuilderExtensions
{
/// <summary>
/// Sets sampler.
/// </summary>
/// <param name="tracerProviderBuilder">TracerProviderBuilder instance.</param>
/// <param name="sampler">Sampler instance.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
public static TracerProviderBuilder SetSampler(this TracerProviderBuilder tracerProviderBuilder, Sampler sampler)
{
if (tracerProviderBuilder is TracerProviderBuilderSdk tracerProviderBuilderSdk)
{
tracerProviderBuilderSdk.SetSampler(sampler);
}
return tracerProviderBuilder;
}
/// <summary>
/// Sets the <see cref="ResourceBuilder"/> from which the Resource associated with
/// this provider is built from. Overwrites currently set ResourceBuilder.
/// </summary>
/// <param name="tracerProviderBuilder">TracerProviderBuilder instance.</param>
/// <param name="resourceBuilder"><see cref="ResourceBuilder"/> from which Resource will be built.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
public static TracerProviderBuilder SetResourceBuilder(this TracerProviderBuilder tracerProviderBuilder, ResourceBuilder resourceBuilder)
{
if (tracerProviderBuilder is TracerProviderBuilderSdk tracerProviderBuilderSdk)
{
tracerProviderBuilderSdk.SetResourceBuilder(resourceBuilder);
}
return tracerProviderBuilder;
}
/// <summary>
/// Adds processor to the provider.
/// </summary>
/// <param name="tracerProviderBuilder">TracerProviderBuilder instance.</param>
/// <param name="processor">Activity processor to add.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
public static TracerProviderBuilder AddProcessor(this TracerProviderBuilder tracerProviderBuilder, BaseProcessor<Activity> processor)
{
if (tracerProviderBuilder is TracerProviderBuilderSdk tracerProviderBuilderSdk)
{
tracerProviderBuilderSdk.AddProcessor(processor);
}
return tracerProviderBuilder;
}
public static TracerProvider Build(this TracerProviderBuilder tracerProviderBuilder)
{
if (tracerProviderBuilder is TracerProviderBuilderSdk tracerProviderBuilderSdk)
{
return tracerProviderBuilderSdk.Build();
}
return null;
}
/// <summary>
/// Adds a DiagnosticSource based instrumentation.
/// This is required for libraries which is already instrumented with
/// DiagnosticSource and Activity, without using ActivitySource.
/// </summary>
/// <typeparam name="TInstrumentation">Type of instrumentation class.</typeparam>
/// <param name="tracerProviderBuilder">TracerProviderBuilder instance.</param>
/// <param name="instrumentationFactory">Function that builds instrumentation.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal static TracerProviderBuilder AddDiagnosticSourceInstrumentation<TInstrumentation>(
this TracerProviderBuilder tracerProviderBuilder,
Func<ActivitySourceAdapter, TInstrumentation> instrumentationFactory)
where TInstrumentation : class
{
if (instrumentationFactory == null)
{
throw new ArgumentNullException(nameof(instrumentationFactory));
}
if (tracerProviderBuilder is TracerProviderBuilderSdk tracerProviderBuilderSdk)
{
tracerProviderBuilderSdk.AddDiagnosticSourceInstrumentation(instrumentationFactory);
}
return tracerProviderBuilder;
}
}
}
| 1 | 19,242 | `AddDiagnosticSourceInstrumentation` method can now be eliminated and simply use `AddInstrumentation` | open-telemetry-opentelemetry-dotnet | .cs |
@@ -78,9 +78,9 @@ describe Bolt::Project do
describe "with namespaced project names" do
let(:config) { { 'name' => 'puppetlabs-foo' } }
- it "strips namespace and hyphen" do
- project = Bolt::Project.new(config, pwd)
- expect(project.name).to eq('foo')
+ it "raises an error" do
+ expect { Bolt::Project.new(config, pwd).validate }
+ .to raise_error(/Invalid project name 'puppetlabs-foo' in bolt-project.yaml/)
end
end
end | 1 | # frozen_string_literal: true
require 'spec_helper'
require 'bolt/project'
describe Bolt::Project do
it "loads from system-wide config path if homedir expansion fails" do
allow(File).to receive(:expand_path).and_call_original
allow(File)
.to receive(:expand_path)
.with(File.join('~', '.puppetlabs', 'bolt'))
.and_raise(ArgumentError, "couldn't find login name -- expanding `~'")
project = Bolt::Project.default_project
# we have to call expand_path to ensure C:/ instead of C:\ on Windows
expect(project.path.to_s).to eq(File.expand_path(Bolt::Project.system_path))
end
describe "configuration" do
let(:pwd) { @tmpdir }
let(:config) { { 'tasks' => ['facts'] } }
around(:each) do |example|
Dir.mktmpdir("foo") do |tmpdir|
@tmpdir = Pathname.new(File.join(tmpdir, "validprojectname"))
FileUtils.mkdir_p(@tmpdir)
FileUtils.touch(@tmpdir + 'bolt-project.yaml')
example.run
end
end
it "loads config with defaults" do
project = Bolt::Project.new(config, pwd)
expect(project.tasks).to eq(config['tasks'])
expect(project.plans).to eq(nil)
end
context 'with bolt config values' do
let(:config) {
{
'concurrency' => 20,
'transport' => 'ssh',
'ssh' => {
'user' => 'blueberry'
}
}
}
it 'loads config' do
project = Bolt::Project.new(config, pwd)
expect(project.data['concurrency']).to eq(20)
end
it "ignores transport config" do
project = Bolt::Project.new(config, pwd)
expect(project.data.key?('ssh')).to be false
expect(project.data.key?('transport')).to be false
end
end
describe "with invalid tasks config" do
let(:config) { { 'tasks' => 'foo' } }
it "raises an error" do
expect { Bolt::Project.new(config, pwd).validate }
.to raise_error(/'tasks' in bolt-project.yaml must be an array/)
end
end
describe "with invalid name config" do
let(:config) { { 'name' => '_invalid' } }
it "raises an error" do
expect { Bolt::Project.new(config, pwd).validate }
.to raise_error(/Invalid project name '_invalid' in bolt-project.yaml/)
end
end
describe "with namespaced project names" do
let(:config) { { 'name' => 'puppetlabs-foo' } }
it "strips namespace and hyphen" do
project = Bolt::Project.new(config, pwd)
expect(project.name).to eq('foo')
end
end
end
describe "::find_boltdir" do
let(:boltdir_path) { @tmpdir + 'foo' + 'Boltdir' }
let(:project) { Bolt::Project.new({}, boltdir_path) }
around(:each) do |example|
Dir.mktmpdir do |tmpdir|
@tmpdir = Pathname.new(tmpdir)
FileUtils.mkdir_p(boltdir_path)
example.run
end
end
describe "when the project directory is named Boltdir" do
it 'finds project from inside project' do
pwd = boltdir_path
expect(Bolt::Project.find_boltdir(pwd)).to eq(project)
end
it 'finds project from the parent directory' do
pwd = boltdir_path.parent
expect(Bolt::Project.find_boltdir(pwd)).to eq(project)
end
it 'does not find project from the grandparent directory' do
pwd = boltdir_path.parent.parent
expect(Bolt::Project.find_boltdir(pwd)).not_to eq(project)
end
it 'finds the project from a sibling directory' do
pwd = boltdir_path.parent + 'bar'
FileUtils.mkdir_p(pwd)
expect(Bolt::Project.find_boltdir(pwd)).to eq(project)
end
it 'finds the project from a child directory' do
pwd = boltdir_path + 'baz'
FileUtils.mkdir_p(pwd)
expect(Bolt::Project.find_boltdir(pwd)).to eq(project)
end
end
describe "when using a control repo-style project" do
it 'uses the current directory if it has a bolt.yaml' do
pwd = @tmpdir
FileUtils.touch(pwd + 'bolt.yaml')
expect(Bolt::Project.find_boltdir(pwd)).to eq(Bolt::Project.new({}, pwd))
end
it 'ignores non-project children with bolt.yaml' do
pwd = @tmpdir
FileUtils.mkdir_p(pwd + 'bar')
FileUtils.touch(pwd + 'bar' + 'bolt.yaml')
expect(Bolt::Project.find_boltdir(pwd)).to eq(Bolt::Project.default_project)
end
it 'prefers a directory called Boltdir over the local directory' do
pwd = boltdir_path.parent
FileUtils.touch(pwd + 'bolt.yaml')
expect(Bolt::Project.find_boltdir(pwd)).to eq(project)
end
it 'prefers a directory called Boltdir over the parent directory' do
pwd = boltdir_path.parent + 'bar'
FileUtils.mkdir_p(pwd)
FileUtils.touch(boltdir_path.parent + 'bolt.yaml')
expect(Bolt::Project.find_boltdir(pwd)).to eq(project)
end
end
describe 'when setting a type' do
it 'sets type to embedded when a project is used' do
pwd = boltdir_path.parent
expect(Bolt::Project.find_boltdir(pwd).type).to eq('embedded')
end
it 'sets type to local when a bolt.yaml is used' do
pwd = @tmpdir
FileUtils.touch(pwd + 'bolt.yaml')
expect(Bolt::Project.find_boltdir(pwd).type).to eq('local')
end
it 'sets type to user when the default is used' do
pwd = @tmpdir
expect(Bolt::Project.find_boltdir(pwd).type).to eq('user')
end
end
it 'returns the default when no project is found' do
pwd = @tmpdir
expect(Bolt::Project.find_boltdir(pwd)).to eq(Bolt::Project.default_project)
end
end
end
| 1 | 15,094 | I think we also need to add `name:` to the bolt-project.yaml files in `spec/fixtures/projects`. | puppetlabs-bolt | rb |
@@ -3225,6 +3225,11 @@ func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
return nil
}
+// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
+// TLF's current unmerged branch and unmerged branch, between the
+// merge point for the branch and the current head. The returned MDs
+// are the same instances that are stored in the MD cache, so they
+// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
MetadataRevision, []*RootMetadata, error) { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol"
"golang.org/x/net/context"
)
// mdReqType indicates whether an operation makes MD modifications or not
type mdReqType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReqType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
// A write request.
mdWrite
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
maxParallelBlockPuts = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Time between checks for dirty files to flush, in case Sync is
// never called.
secondsBetweenBackgroundFlushes = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * (512 << 10)
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead = 2
fboBlock = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid BranchID // protected by mdWriterLock
bType branchType
head *RootMetadata
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
headLock leveledRWMutex // protects access to the MD
blocks folderBlockOps
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Set to true when we have staged, unmerged commits for this
// device. This means the device has forked from the main branch
// seen by other devices. Protected by mdWriterLock.
staged bool
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log logger.Logger
deferLog logger.Logger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
// rekeyWithPromptTimer tracks a timed function that will try to
// rekey with a paper key prompt, if enough time has passed.
// Protected by mdWriterLock
rekeyWithPromptTimer *time.Timer
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
nodeCache := newNodeCacheStandard(fb)
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
unrefCache: make(map[blockRef]*syncInfo),
deCache: make(map[blockRef]DirEntry),
deferredWrites: make(
[]func(context.Context, *lockState, *RootMetadata, path) error, 0),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: log,
deferLog: log.CloneWithAddedDepth(1),
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second)
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown() error {
if fbo.config.CheckStateOnShutdown() {
ctx := context.TODO()
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if fbo.getStaged(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() TlfID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
name string, public bool) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == nil {
return errors.New("Can't add a favorite without a handle")
}
h := head.GetTlfHandle()
fav := h.toFavorite(created)
favorites.AddAsync(ctx, fav)
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == nil {
return errors.New("Can't delete a favorite without a handle")
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
// getStaged should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) getStaged(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.staged
}
func (fbo *folderBranchOps) getHead(lState *lockState) *RootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.head
}
func (fbo *folderBranchOps) setStagedLocked(
lState *lockState, staged bool, bid BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
fbo.staged = staged
fbo.bid = bid
if !staged {
fbo.status.setCRChains(nil, nil)
}
}
func (fbo *folderBranchOps) checkDataVersion(p path, ptr BlockPointer) error {
if ptr.DataVer < FirstValidDataVer {
return InvalidDataVersionError{ptr.DataVer}
}
// TODO: migrate back to fbo.config.DataVersion
if ptr.DataVer > FilesWithHolesDataVer {
return NewDataVersionError{p, ptr.DataVer}
}
return nil
}
func (fbo *folderBranchOps) setHeadLocked(ctx context.Context,
lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == nil
var oldHandle *TlfHandle
wasReadable := false
if !isFirstHead {
wasReadable = fbo.head.IsReadable()
mdID, err := md.MetadataID(fbo.config)
if err != nil {
return err
}
headID, err := fbo.head.MetadataID(fbo.config)
if err != nil {
return err
}
if headID == mdID {
// only save this new MD if the MDID has changed
return nil
}
oldHandle = fbo.head.GetTlfHandle()
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision)
err := fbo.config.MDCache().Put(md)
if err != nil {
return err
}
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == Unmerged {
fbo.setStagedLocked(lState, true, md.BID)
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(md.Revision, MetadataRevisionUninitialized)
}
fbo.head = md
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
} else if h := fbo.head.GetTlfHandle(); !reflect.DeepEqual(oldHandle, h) {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldHandle.GetCanonicalName(), h.GetCanonicalName())
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, h)
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle().GetCanonicalName(), md.ID.IsPublic()))
}
return nil
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md *RootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone {
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
return nil
}
// if rtype == mdWrite || mdRekey, then mdWriterLock must be taken
func (fbo *folderBranchOps) getMDLocked(
ctx context.Context, lState *lockState, rtype mdReqType) (
md *RootMetadata, err error) {
defer func() {
if err != nil || rtype == mdReadNoIdentify || rtype == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md)
}()
md = fbo.getHead(lState)
if md != nil {
return md, nil
}
// Unless we're in mdWrite or mdRekey mode, we can't safely fetch
// the new MD without causing races, so bail.
if rtype != mdWrite && rtype != mdRekey {
return nil, MDWriteNeededInRequest{}
}
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID)
if err != nil {
return nil, err
}
if md == nil {
// no unmerged MDs for this device, so just get the current head
md, err = mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return nil, err
}
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
err = fbo.initMDLocked(ctx, lState, md)
if err != nil {
return nil, err
}
} else {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return nil, err
}
}
return md, err
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReqType) (*RootMetadata, error) {
md, err := fbo.getMDLocked(ctx, lState, rtype)
if err != nil {
return nil, err
}
if !md.ID.IsPublic() {
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
if !md.GetTlfHandle().IsReader(uid) {
return nil, NewReadAccessError(md.GetTlfHandle(), username)
}
}
return md, nil
}
// getMDForFBM is a helper method for the folderBlockManager only.
func (fbo *folderBranchOps) getMDForFBM(ctx context.Context) (
*RootMetadata, error) {
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
func (fbo *folderBranchOps) getMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdWrite)
if err != nil {
return nil, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
if !md.GetTlfHandle().IsWriter(uid) {
return nil,
NewWriteAccessError(md.GetTlfHandle(), username)
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into
// syncBlockAndCheckEmbedLocked or the changes will be lost.
newMd, err := md.MakeSuccessor(fbo.config, true)
if err != nil {
return nil, err
}
return newMd, nil
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (rmd *RootMetadata, wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdRekey)
if err != nil {
return nil, false, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(uid) {
return nil, false,
NewRekeyPermissionError(md.GetTlfHandle(), username)
}
newMd, err := md.MakeSuccessor(fbo.config, handle.IsWriter(uid))
if err != nil {
return nil, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(uid) && !newMd.IsWriterMetadataCopiedSet() {
return nil, false,
NewRekeyPermissionError(handle, username)
}
return newMd, md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// create a dblock since one doesn't exist yet
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
if !handle.IsWriter(uid) {
return NewWriteAccessError(handle, username)
}
newDblock := &DirBlock{
Children: make(map[string]DirEntry),
}
var expectedKeyGen KeyGen
var tlfCryptKey *TLFCryptKey
if md.ID.IsPublic() {
expectedKeyGen = PublicKeyGen
} else {
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return fmt.Errorf("Initial rekey unexpectedly not done for private TLF %v", md.ID)
}
expectedKeyGen = FirstValidKeyGen
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return InvalidKeyGenerationError{handle, keyGen}
}
info, plainSize, readyBlockData, err :=
fbo.blocks.ReadyBlock(ctx, md, newDblock, uid)
if err != nil {
return err
}
now := fbo.nowUnixNano()
md.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
md.AddOp(newCreateOp("", BlockPointer{}, Dir))
md.AddRefBlock(md.data.Dir.BlockInfo)
md.UnrefBytes = 0
if err = fbo.config.BlockOps().Put(ctx, md, info.BlockPointer,
readyBlockData); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
// finally, write out the new metadata
if err = fbo.config.MDOps().Put(ctx, md); err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != nil {
headID, _ := fbo.head.MetadataID(fbo.config)
return fmt.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.ID, headID)
}
fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(md.ID, keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
err = errors.New("GetOrCreateRootNode is not supported by " +
"folderBranchOps")
return
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// CheckForNewMDAndInit sees whether the given MD object has been
// initialized yet; if not, it does so.
func (fbo *folderBranchOps) CheckForNewMDAndInit(
ctx context.Context, md *RootMetadata) (created bool, err error) {
fbo.log.CDebugf(ctx, "CheckForNewMDAndInit, revision=%d (%s)",
md.Revision, md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v, created: %t", err, created)
}()
err = runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.ID, MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md)
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.data.Dir.Type == Dir {
// this MD is already initialized
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == nil {
err := fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
}
// Initialize if needed
created = true
return fbo.initMDLocked(ctx, lState, md)
})
if err != nil {
return false, err
}
return created, nil
}
// execMDReadNoIdentifyThenMDWrite first tries to execute the
// passed-in method in mdReadNoIdentify mode. If it fails with an
// MDWriteNeededInRequest error, it re-executes the method as in
// mdWrite mode. The passed-in method must note whether or not this
// is an mdWrite call.
//
// This must only be used by getRootNode().
func (fbo *folderBranchOps) execMDReadNoIdentifyThenMDWrite(
lState *lockState, f func(*lockState, mdReqType) error) error {
err := f(lState, mdReadNoIdentify)
// Redo as an MD write request if needed
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
err = f(lState, mdWrite)
}
return err
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
// node may still be nil if we're unwinding
// from a panic.
fbo.deferLog.CDebugf(ctx, "Done: %v", node)
}
}()
lState := makeFBOLockState()
var md *RootMetadata
err = fbo.execMDReadNoIdentifyThenMDWrite(lState,
func(lState *lockState, rtype mdReqType) error {
md, err = fbo.getMDLocked(ctx, lState, rtype)
return err
})
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := md.isReadableOrError(ctx, fbo.config); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %p", dir.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done GetDirChildren: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md, dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %p %s", dir.GetID(), name)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
childPath := dirPath.ChildPathNoPtr(name)
de, err = fbo.blocks.GetDirtyEntry(ctx, lState, md, childPath)
if err != nil {
return err
}
if de.Type == Sym {
node = nil
} else {
err = fbo.checkDataVersion(childPath, de.BlockPointer)
if err != nil {
return err
}
node, err = fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return node, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md *RootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntry(ctx, lState, md, nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %p", node.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb})
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context,
md *RootMetadata, currBlock Block, uid keybase1.UID, bps *blockPutState) (
info BlockInfo, plainSize int, err error) {
info, plainSize, readyBlockData, err :=
fbo.blocks.ReadyBlock(ctx, md, currBlock, uid)
if err != nil {
return
}
bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil)
return
}
func (fbo *folderBranchOps) unembedBlockChanges(
ctx context.Context, bps *blockPutState, md *RootMetadata,
changes *BlockChanges, uid keybase1.UID) (err error) {
buf, err := fbo.config.Codec().Encode(changes)
if err != nil {
return
}
block := NewFileBlock().(*FileBlock)
block.Contents = buf
info, _, err := fbo.readyBlockMultiple(ctx, md, block, uid, bps)
if err != nil {
return
}
md.data.cachedChanges = *changes
changes.Info = info
changes.Ops = nil
md.RefBytes += uint64(info.EncodedSize)
md.DiskUsage += uint64(info.EncodedSize)
return
}
type localBcache map[BlockPointer]*DirBlock
// syncBlock updates, and readies, the blocks along the path for the
// given write, up to the root of the tree or stopAt (if specified).
// When it updates the root of the tree, it also modifies the given
// head object with a new revision number and root block ID. It first
// checks the provided lbc for blocks that may have been modified by
// previous syncBlock calls or the FS calls themselves. It returns
// the updated path to the changed directory, the new or updated
// directory entry created as part of the call, and a summary of all
// the blocks that now must be put to the block server.
//
// This function is safe to use unlocked, but may modify MD to have
// the same revision number as another one. All functions in this file
// must call syncBlockLocked instead, which holds mdWriterLock and
// thus serializes the revision numbers. Conflict resolution may call
// syncBlockForConflictResolution, which doesn't hold the lock, since
// it already handles conflicts correctly.
//
// entryType must not be Sym.
//
// TODO: deal with multiple nodes for indirect blocks
func (fbo *folderBranchOps) syncBlock(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
// now ready each dblock and write the DirEntry for the next one
// in the path
currBlock := newBlock
currName := name
newPath := path{
FolderBranch: dir.FolderBranch,
path: make([]pathNode, 0, len(dir.path)),
}
bps := newBlockPutState(len(dir.path))
refPath := dir.ChildPathNoPtr(name)
var newDe DirEntry
doSetTime := true
now := fbo.nowUnixNano()
for len(newPath.path) < len(dir.path)+1 {
info, plainSize, err :=
fbo.readyBlockMultiple(ctx, md, currBlock, uid, bps)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// prepend to path and setup next one
newPath.path = append([]pathNode{{info.BlockPointer, currName}},
newPath.path...)
// get the parent block
prevIdx := len(dir.path) - len(newPath.path)
var prevDblock *DirBlock
var de DirEntry
var nextName string
nextDoSetTime := false
if prevIdx < 0 {
// root dir, update the MD instead
de = md.data.Dir
} else {
prevDir := path{
FolderBranch: dir.FolderBranch,
path: dir.path[:prevIdx+1],
}
// First, check the localBcache, which could contain
// blocks that were modified across multiple calls to
// syncBlock.
var ok bool
prevDblock, ok = lbc[prevDir.tailPointer()]
if !ok {
// If the block isn't in the local bcache, we
// have to fetch it, possibly from the
// network. Directory blocks are only ever
// modified while holding mdWriterLock, so it's
// safe to fetch them one at a time.
prevDblock, err = fbo.blocks.GetDir(
ctx, lState, md, prevDir, blockWrite)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
// modify the direntry for currName; make one
// if it doesn't exist (which should only
// happen the first time around).
//
// TODO: Pull the creation out of here and
// into createEntryLocked().
if de, ok = prevDblock.Children[currName]; !ok {
// If this isn't the first time
// around, we have an error.
if len(newPath.path) > 1 {
return path{}, DirEntry{}, nil, NoSuchNameError{currName}
}
// If this is a file, the size should be 0. (TODO:
// Ensure this.) If this is a directory, the size will
// be filled in below. The times will be filled in
// below as well, since we should only be creating a
// new directory entry when doSetTime is true.
de = DirEntry{
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
},
}
// If we're creating a new directory entry, the
// parent's times must be set as well.
nextDoSetTime = true
}
currBlock = prevDblock
nextName = prevDir.tailName()
}
if de.Type == Dir {
// TODO: When we use indirect dir blocks,
// we'll have to calculate the size some other
// way.
de.Size = uint64(plainSize)
}
if prevIdx < 0 {
md.AddUpdate(md.data.Dir.BlockInfo, info)
} else if prevDe, ok := prevDblock.Children[currName]; ok {
md.AddUpdate(prevDe.BlockInfo, info)
} else {
// this is a new block
md.AddRefBlock(info)
}
if len(refPath.path) > 1 {
refPath = *refPath.parentPath()
}
de.BlockInfo = info
if doSetTime {
if mtime {
de.Mtime = now
}
if ctime {
de.Ctime = now
}
}
if !newDe.IsInitialized() {
newDe = de
}
if prevIdx < 0 {
md.data.Dir = de
} else {
prevDblock.Children[currName] = de
}
currName = nextName
// Stop before we get to the common ancestor; it will be taken care of
// on the next sync call
if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt {
// Put this back into the cache as dirty -- the next
// syncBlock call will ready it.
dblock, ok := currBlock.(*DirBlock)
if !ok {
return path{}, DirEntry{}, nil, BadDataError{stopAt.ID}
}
lbc[stopAt] = dblock
break
}
doSetTime = nextDoSetTime
}
return newPath, newDe, bps, nil
}
// syncBlockLock calls syncBlock under mdWriterLock.
func (fbo *folderBranchOps) syncBlockLocked(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name,
entryType, mtime, ctime, stopAt, lbc)
}
// syncBlockForConflictResolution calls syncBlock unlocked, since
// conflict resolution can handle MD revision number conflicts
// correctly.
func (fbo *folderBranchOps) syncBlockForConflictResolution(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
return fbo.syncBlock(
ctx, lState, uid, md, newBlock, dir,
name, entryType, mtime, ctime, stopAt, lbc)
}
// entryType must not be Sym.
func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, lbc localBcache) (
path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return path{}, DirEntry{}, nil, err
}
newPath, newDe, bps, err := fbo.syncBlockLocked(
ctx, lState, uid, md, newBlock, dir, name, entryType, mtime,
ctime, stopAt, lbc)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// do the block changes need their own blocks?
bsplit := fbo.config.BlockSplitter()
if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) {
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes,
uid)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
return newPath, newDe, bps, nil
}
func isRecoverableBlockError(err error) bool {
_, isArchiveError := err.(BServerErrorBlockArchived)
_, isDeleteError := err.(BServerErrorBlockDeleted)
_, isRefError := err.(BServerErrorBlockNonExistent)
return isArchiveError || isDeleteError || isRefError
}
func isRetriableError(err error, retries int) bool {
recoverable := isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) doOneBlockPut(ctx context.Context,
md *RootMetadata, blockState blockState,
errChan chan error, blocksToRemoveChan chan *FileBlock) {
err := fbo.config.BlockOps().
Put(ctx, md, blockState.blockPtr, blockState.readyBlockData)
if err == nil && blockState.syncedCb != nil {
err = blockState.syncedCb()
}
if err != nil {
if isRecoverableBlockError(err) {
fblock, ok := blockState.block.(*FileBlock)
if ok && !fblock.IsInd {
blocksToRemoveChan <- fblock
}
}
// one error causes everything else to cancel
select {
case errChan <- err:
default:
return
}
}
}
// doBlockPuts writes all the pending block puts to the cache and
// server. If the err returned by this function satisfies
// isRecoverableBlockError(err), the caller should retry its entire
// operation, starting from when the MD successor was created.
//
// Returns a slice of block pointers that resulted in recoverable
// errors and should be removed by the caller from any saved state.
func (fbo *folderBranchOps) doBlockPuts(ctx context.Context,
md *RootMetadata, bps blockPutState) ([]BlockPointer, error) {
errChan := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
blocks := make(chan blockState, len(bps.blockStates))
var wg sync.WaitGroup
numWorkers := len(bps.blockStates)
if numWorkers > maxParallelBlockPuts {
numWorkers = maxParallelBlockPuts
}
wg.Add(numWorkers)
// A channel to list any blocks that have been archived or
// deleted. Any of these will result in an error, so the maximum
// we'll get is the same as the number of workers.
blocksToRemoveChan := make(chan *FileBlock, numWorkers)
worker := func() {
defer wg.Done()
for blockState := range blocks {
fbo.doOneBlockPut(ctx, md, blockState, errChan, blocksToRemoveChan)
select {
// return early if the context has been canceled
case <-ctx.Done():
return
default:
}
}
}
for i := 0; i < numWorkers; i++ {
go worker()
}
for _, blockState := range bps.blockStates {
blocks <- blockState
}
close(blocks)
go func() {
wg.Wait()
close(errChan)
close(blocksToRemoveChan)
}()
err := <-errChan
var blocksToRemove []BlockPointer
if isRecoverableBlockError(err) {
bcache := fbo.config.BlockCache()
// Wait for all the outstanding puts to finish, to amortize
// the work of re-doing the put.
for fblock := range blocksToRemoveChan {
for i, bs := range bps.blockStates {
if bs.block == fblock {
// Let the caller know which blocks shouldn't be
// retried.
blocksToRemove = append(blocksToRemove,
bps.blockStates[i].blockPtr)
}
}
// Remove each problematic block from the cache so the
// redo can just make a new block instead.
if err := bcache.DeleteKnownPtr(fbo.id(), fblock); err != nil {
fbo.log.CWarningf(ctx, "Couldn't delete ptr for a block: %v",
err)
}
}
}
return blocksToRemove, err
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func (fbo *folderBranchOps) isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(MDServerErrorConflictRevision)
_, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage)
_, isConditionFailed := err.(MDServerErrorConditionFailed)
_, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut, wasStaged := true, fbo.staged
mergedRev := MetadataRevisionUninitialized
if !fbo.staged {
// only do a normal Put if we're not already staged.
err = mdops.Put(ctx, md)
doUnmergedPut = fbo.isRevisionConflict(err)
if err != nil && !doUnmergedPut {
return err
}
// The first time we transition, our last known MD revision is
// the same (at least) as what we thought our new revision
// should be. Otherwise, just leave it at uninitialized and
// let the resolver sort it out.
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision
}
}
if doUnmergedPut {
// We're out of date, so put it as an unmerged MD.
var bid BranchID
if !wasStaged {
// new branch ID
crypto := fbo.config.Crypto()
if bid, err = crypto.MakeRandomBranchID(); err != nil {
return err
}
} else {
bid = fbo.bid
}
err := mdops.PutUnmerged(ctx, md, bid)
if err != nil {
// TODO: if this is a conflict error, we should try to
// fast-forward to the most recent revision after
// returning this error.
return err
}
fbo.setStagedLocked(lState, true, bid)
fbo.cr.Resolve(md.Revision, mergedRev)
} else {
if fbo.staged {
// If we were staged, prune all unmerged history now
err = fbo.config.MDServer().PruneBranch(ctx, fbo.id(), fbo.bid)
if err != nil {
return err
}
}
fbo.setStagedLocked(lState, false, NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.ID)
}
}
md.swapCachedBlockChanges()
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
// Archive the old, unref'd blocks
fbo.fbm.archiveUnrefBlocks(md)
fbo.notifyBatchLocked(ctx, lState, md)
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
err = fbo.config.MDOps().Put(ctx, md)
isConflict := fbo.isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// drop this block. we've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. we'll queue another rekey just in case. it should
// be safe as it's idempotent. we don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.ID)
return err
}
fbo.setStagedLocked(lState, false, NullBranchID)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.setHeadLocked(ctx, lState, md)
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *gcOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
if !fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
bps := newBlockPutState(1)
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid)
if err != nil {
return err
}
ptrsToDelete, err := fbo.doBlockPuts(ctx, md, *bps)
if err != nil {
return err
}
if len(ptrsToDelete) > 0 {
return fmt.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
}
// finally, write out the new metadata
err = fbo.config.MDOps().Put(ctx, md)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setStagedLocked(lState, false, NullBranchID)
md.swapCachedBlockChanges()
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, md)
return nil
}
func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer) (de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
_, de, bps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newBlock, dir, name, entryType, mtime,
ctime, zeroPtr, nil)
if err != nil {
return DirEntry{}, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md, bps)
}
}()
_, err = fbo.doBlockPuts(ctx, md, *bps)
if err != nil {
return DirEntry{}, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps)
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func checkDisallowedPrefixes(name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md *RootMetadata, dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType) (Node, DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(ctx, lState, md, dirPath, blockWrite)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(ctx, lState, md, dirPath, name); err != nil {
return nil, DirEntry{}, err
}
md.AddOp(newCreateOp(name, dirPath.tailPointer(), entryType))
// create new data block
var newBlock Block
// XXX: for now, put a unique ID in every new block, to make sure it
// has a unique block ID. This may not be needed once we have encryption.
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
de, err := fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, newBlock, dirPath, name, entryType,
true, true, zeroPtr)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
fbo.mdWriterLock.Unlock(lState)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %p %s", dir.GetID(), path)
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID())
}
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err := fbo.createEntryLocked(ctx, lState, dir, path, Dir)
n = node
ei = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return n, ei, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %p %s", dir.GetID(), path)
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID())
}
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType)
n = node
ei = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return n, ei, nil
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(ctx, lState, md, dirPath, blockWrite)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md,
dirPath, fromName); err != nil {
return DirEntry{}, err
}
md.AddOp(newCreateOp(fromName, dirPath.tailPointer(), Sym))
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
dblock.Children[fromName] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *dirPath.parentPath(),
dirPath.tailName(), Dir, true, true, zeroPtr)
if err != nil {
return DirEntry{}, err
}
return dblock.Children[fromName], nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %p %s -> %s",
dir.GetID(), fromName, toPath)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
ei = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return ei, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntry(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, de DirEntry,
name string) error {
md.AddUnrefBlock(de.BlockInfo)
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, md, childPath)
if err != nil {
return NoSuchBlockError{de.ID}
}
for _, blockInfo := range blockInfos {
md.AddUnrefBlock(blockInfo)
}
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, name string) error {
fbo.mdWriterLock.AssertLocked(lState)
pblock, err := fbo.blocks.GetDir(ctx, lState, md, dir, blockWrite)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
md.AddOp(newRmOp(name, dir.tailPointer()))
err = fbo.unrefEntry(ctx, lState, md, dir, de, name)
if err != nil {
return err
}
// the actual unlink
delete(pblock.Children, name)
// sync the parent directory
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(),
Dir, true, true, zeroPtr)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDir(ctx, lState, md, dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDir(
ctx, lState, md, childPath, blockRead)
if err != nil {
return err
}
if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %p %s", dir.GetID(), dirName)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %p %s", dir.GetID(), name)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent path,
oldName string, newParent path, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename(
ctx, lState, md, oldParent, oldName, newParent, newName)
if err != nil {
return err
}
// does name exist?
if de, ok := newPBlock.Children[newName]; ok {
if de.Type == Dir {
fbo.log.CWarningf(ctx, "Renaming over a directory (%s/%s) is not "+
"allowed.", newParent, newName)
return NotFileError{newParent.ChildPathNoPtr(newName)}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName)
if err != nil {
return err
}
}
// only the ctime changes
newDe.Ctime = fbo.nowUnixNano()
newPBlock.Children[newName] = newDe
delete(oldPBlock.Children, oldName)
// find the common ancestor
var i int
found := false
// the root block will always be the same, so start at number 1
for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ {
if oldParent.path[i].ID != newParent.path[i].ID {
found = true
i--
break
}
}
if !found {
// if we couldn't find one, then the common ancestor is the
// last node in the shorter path
if len(oldParent.path) < len(newParent.path) {
i = len(oldParent.path) - 1
} else {
i = len(newParent.path) - 1
}
}
commonAncestor := oldParent.path[i].BlockPointer
oldIsCommon := oldParent.tailPointer() == commonAncestor
newIsCommon := newParent.tailPointer() == commonAncestor
newOldPath := path{FolderBranch: oldParent.FolderBranch}
var oldBps *blockPutState
if oldIsCommon {
if newIsCommon {
// if old and new are both the common ancestor, there is
// nothing to do (syncBlock will take care of everything)
} else {
// If the old one is common and the new one is
// not, then the last
// syncBlockAndCheckEmbedLocked call will need
// to access the old one.
lbc[oldParent.tailPointer()] = oldPBlock
}
} else {
if newIsCommon {
// If the new one is common, then the first
// syncBlockAndCheckEmbedLocked call will need to access
// it.
lbc[newParent.tailPointer()] = newPBlock
}
// The old one is not the common ancestor, so we need to sync it.
// TODO: optimize by pushing blocks from both paths in parallel
newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(),
Dir, true, true, commonAncestor, lbc)
if err != nil {
return err
}
}
newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(),
Dir, true, true, zeroPtr, lbc)
if err != nil {
return err
}
// newOldPath is really just a prefix now. A copy is necessary as an
// append could cause the new path to contain nodes from the old path.
newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...)
copy(newOldPath.path[:i+1], newNewPath.path[:i+1])
// merge and finalize the blockPutStates
if oldBps != nil {
newBps.mergeOtherBps(oldBps)
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md, newBps)
}
}()
_, err = fbo.doBlockPuts(ctx, md, *newBps)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps)
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %p/%s -> %p/%s", oldParent.GetID(),
oldName, newParent.GetID(), newName)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// only works for paths within the same topdir
if oldParentPath.FolderBranch != newParentPath.FolderBranch {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParentPath, oldName,
newParentPath, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %p %d %d", file.GetID(), len(dest), off)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return err
}
bytesRead, err = fbo.blocks.Read(ctx, lState, md, filePath, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %p %d %d", file.GetID(), len(data), off)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(ctx, lState, md, file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %p %d", file.GetID(), size)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(ctx, lState, md, file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file path,
ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md, file)
if err != nil {
return
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym {
return
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
}
parentPath := file.parentPath()
md.AddOp(newSetAttrOp(file.tailName(), parentPath.tailPointer(), exAttr,
file.tailPointer()))
// If the type isn't File or Exec, there's nothing to do, but
// change the ctime anyway (to match ext4 behavior).
de.Ctime = fbo.nowUnixNano()
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr)
return err
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %p %t", file.GetID(), ex)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setExLocked(ctx, lState, filePath, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file path,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md, file)
if err != nil {
return err
}
parentPath := file.parentPath()
md.AddOp(newSetAttrOp(file.tailName(), parentPath.tailPointer(), mtimeAttr,
file.tailPointer()))
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr)
return err
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %p %v", file.GetID(), mtime)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setMtimeLocked(ctx, lState, filePath, mtime)
})
}
func (fbo *folderBranchOps) syncLocked(ctx context.Context,
lState *lockState, file path) (stillDirty bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, nil
}
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who sync clean files on close
// would get an error.
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return true, err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file. TODO: Hook this in with the
// node cache GC logic to be perfectly accurate.
fbo.blocks.ClearCacheInfo(lState, file)
return true, nil
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return true, err
}
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
// Filled in by doBlockPuts below.
var blocksToRemove []BlockPointer
fblock, bps, lbc, syncState, err :=
fbo.blocks.StartSync(ctx, lState, md, uid, file)
defer func() {
fbo.blocks.CleanupSyncState(
ctx, lState, file, blocksToRemove, syncState, err)
}()
if err != nil {
return true, err
}
newPath, _, newBps, err :=
fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, fblock, *file.parentPath(),
file.tailName(), File, true, true, zeroPtr, lbc)
if err != nil {
return true, err
}
bps.mergeOtherBps(newBps)
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md, bps)
}
}()
blocksToRemove, err = fbo.doBlockPuts(ctx, md, *bps)
if err != nil {
return true, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps)
if err != nil {
return true, err
}
// At this point, all reads through the old path (i.e., file)
// see writes that happened since StartSync, whereas all reads
// through the new path (newPath) don't.
//
// TODO: This isn't completely correct, since reads that
// happen after a write should always see the new data.
//
// After FinishSync succeeds, then reads through both the old
// and the new paths will see the writes that happened during
// the sync.
return fbo.blocks.FinishSync(ctx, lState, file, newPath, md, syncState)
}
func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) {
fbo.log.CDebugf(ctx, "Sync %p", file.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return
}
defer func() {
lState := makeFBOLockState()
fbo.blocks.NotifyBlockedWrites(lState, err)
}()
var stillDirty bool
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
stillDirty, err = fbo.syncLocked(ctx, lState, filePath)
return err
})
if err != nil {
return err
}
if !stillDirty {
fbo.status.rmDirtyNode(file)
}
return nil
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
// Wait for conflict resolution to settle down, if necessary.
fbo.cr.Wait(ctx)
return fbo.status.getStatus(ctx)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for the most recent op
// in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md *RootMetadata) {
fbo.headLock.AssertLocked(lState)
lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1]
fbo.notifyOneOpLocked(ctx, lState, lastOp, md)
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md *RootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.AllUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache, []BlockPointer{ptr},
newPtrs, md)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) unlinkFromCache(op op, oldDir BlockPointer,
node Node, name string) error {
// The entry could be under any one of the unref'd blocks, and
// it's safe to perform this when the pointer isn't real, so just
// try them all to avoid the overhead of looking up the right
// pointer in the old version of the block.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childPath := p.ChildPathNoPtr(name)
// revert the parent pointer
childPath.path[len(childPath.path)-2].BlockPointer = oldDir
for _, ptr := range op.Unrefs() {
childPath.path[len(childPath.path)-1].BlockPointer = ptr
fbo.nodeCache.Unlink(ptr.ref(), childPath)
}
return nil
}
func (fbo *folderBranchOps) updatePointers(op op) {
for _, update := range op.AllUpdates() {
oldRef := update.Unref.ref()
fbo.nodeCache.UpdatePointer(oldRef, update.Ref)
}
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md *RootMetadata) {
fbo.headLock.AssertLocked(lState)
fbo.updatePointers(op)
var changes []NodeChange
switch realOp := op.(type) {
default:
return
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %p",
realOp.NewName, node.GetID())
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %p",
realOp.OldName, node.GetID())
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
err := fbo.unlinkFromCache(op, realOp.Dir.Unref, node, realOp.OldName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
var newNodeID NodeID
if newNode != nil {
newNodeID = newNode.GetID()
}
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%p to %s/%p",
realOp.Renamed, realOp.OldName, oldNode.GetID(), realOp.NewName,
newNodeID)
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md)
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
// If new node exists as well, unlink any previously
// existing entry and move the node.
var unrefPtr BlockPointer
if oldNode != newNode {
unrefPtr = realOp.NewDir.Unref
} else {
unrefPtr = realOp.OldDir.Unref
}
err := fbo.unlinkFromCache(op, unrefPtr, newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
err = fbo.nodeCache.Move(realOp.Renamed.ref(), newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't move node in cache: %v", err)
return
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %p",
len(realOp.Writes), node.GetID())
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %p",
realOp.Attr, realOp.Name, node.GetID())
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md, p, realOp)
if err != nil {
// TODO: Log error?
return
}
if childNode == nil {
return
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *gcOp:
// Unreferenced blocks in a gcOp mean that we shouldn't cache
// them anymore
bcache := fbo.config.BlockCache()
for _, ptr := range realOp.Unrefs() {
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
}
fbo.observers.batchChanges(ctx, changes)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != nil {
return fbo.head.Revision
}
return MetadataRevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
func (fbo *folderBranchOps) reembedBlockChanges(ctx context.Context,
lState *lockState, rmds []*RootMetadata) error {
// if any of the operations have unembedded block ops, fetch those
// now and fix them up. TODO: parallelize me.
for _, rmd := range rmds {
info := rmd.data.Changes.Info
if info.BlockPointer == zeroPtr {
continue
}
fblock, err := fbo.blocks.GetFileBlockForReading(ctx, lState, rmd,
info.BlockPointer, fbo.folderBranch.Branch, path{})
if err != nil {
return err
}
err = fbo.config.Codec().Decode(fblock.Contents, &rmd.data.Changes)
if err != nil {
return err
}
// The changes block pointer is an implicit ref block
rmd.data.Changes.Ops[0].AddRefBlock(info.BlockPointer)
rmd.data.cachedChanges.Info = info
}
return nil
}
// reembedForFBM is a helper method for the folderBlockManager only.
func (fbo *folderBranchOps) reembedForFBM(ctx context.Context,
rmds []*RootMetadata) error {
lState := makeFBOLockState()
return fbo.reembedBlockChanges(ctx, lState, rmds)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []*RootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []*RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if fbo.staged {
if len(rmds) > 0 {
unmergedRev := MetadataRevisionUninitialized
if fbo.head != nil {
unmergedRev = fbo.head.Revision
}
fbo.cr.Resolve(unmergedRev, rmds[len(rmds)-1].Revision)
}
return errors.New("Ignoring MD updates while local updates are staged")
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.New("Ignoring MD updates while writes are dirty")
}
if err := fbo.reembedBlockChanges(ctx, lState, rmds); err != nil {
return err
}
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if rmd.Revision != fbo.getCurrMDRevisionLocked(lState)+1 {
return MDUpdateApplyError{rmd.Revision,
fbo.getCurrMDRevisionLocked(lState)}
}
if err := rmd.isReadableOrError(ctx, fbo.config); err != nil {
return err
}
err := fbo.setHeadLocked(ctx, lState, rmd)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
fbo.notifyOneOpLocked(ctx, lState, op, rmd)
}
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []*RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
if err := fbo.reembedBlockChanges(ctx, lState, rmds); err != nil {
return err
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
if rmd.Revision != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision,
fbo.getCurrMDRevisionLocked(lState)}
}
err := fbo.setHeadLocked(ctx, lState, rmd)
if err != nil {
return err
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
fbo.notifyOneOpLocked(ctx, lState, invertOpForLocalNotifications(ops[j]), rmd)
}
}
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []*RootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getCurrMDRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
MetadataRevision, []*RootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
MetadataRevision, []*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setStagedLocked(lState, false, NullBranchID)
rmds, err := getMDRange(ctx, fbo.config, fbo.id(), NullBranchID,
currHead, currHead, Merged)
if err != nil {
return nil, err
}
if len(rmds) == 0 {
return nil, fmt.Errorf("Couldn't find the branch point %d", currHead)
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.setHeadLocked(ctx, lState, rmds[0])
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.AllUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasStaged := fbo.bid, fbo.staged
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if wasStaged {
err = fbo.config.MDServer().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
return fbo.finalizeMDWriteLocked(ctx, lState, md, &blockPutState{})
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if !fbo.getStaged(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
ctxWithTags := fbo.ctxWithFBOID(context.Background())
freshCtx, cancel := context.WithCancel(ctxWithTags)
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
if fbo.staged {
return errors.New("Can't rekey while staged.")
}
head := fbo.getHead(lState)
if head != nil {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(MDUpdateApplyError); !ok ||
applyErr.rev != applyErr.curr {
return err
}
}
}
md, rekeyWasSet, err := fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return err
}
if fbo.rekeyWithPromptTimer != nil {
if !promptPaper {
fbo.log.CDebugf(ctx, "rekeyWithPrompt superseded before it fires.")
} else if !md.IsRekeySet() {
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
// If the rekey bit isn't set, then some other device
// already took care of our request, and we can stop
// early. Note that if this FBO never registered for
// updates, then we might not yet have seen the update, in
// which case we'll still try to rekey but it will fail as
// a conflict.
fbo.log.CDebugf(ctx, "rekeyWithPrompt not needed because the "+
"rekey bit was already unset.")
return nil
}
}
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return nil
}
// Clear the rekey bit if any.
md.Flags &= ^MetadataFlagRekey
md.clearLastRevision()
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError:
stillNeedsRekey = true
case NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
//
// Only ever set the timer once.
if fbo.rekeyWithPromptTimer == nil {
d := fbo.config.RekeyWithPromptWaitTime()
fbo.log.CDebugf(ctx, "Scheduling a rekeyWithPrompt in %s", d)
fbo.rekeyWithPromptTimer = time.AfterFunc(d, fbo.rekeyWithPrompt)
}
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(ctx, lState, md)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.ID, keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
if !stillNeedsRekey && fbo.rekeyWithPromptTimer != nil {
fbo.log.CDebugf(ctx, "Scheduled rekey timer no longer needed")
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
}
return nil
}
func (fbo *folderBranchOps) rekeyWithPrompt() {
var err error
ctx := ctxWithRandomID(context.Background(), CtxRekeyIDKey, CtxRekeyOpID,
fbo.log)
// Only give the user limited time to enter their paper key, so we
// don't wait around forever.
d := fbo.config.RekeyWithPromptWaitTime()
ctx, cancel := context.WithTimeout(ctx, d)
defer cancel()
fbo.log.CDebugf(ctx, "rekeyWithPrompt")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, true)
})
}
// Rekey rekeys the given folder.
func (fbo *folderBranchOps) Rekey(ctx context.Context, tlf TlfID) (err error) {
fbo.log.CDebugf(ctx, "Rekey")
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, false)
})
}
func (fbo *folderBranchOps) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
if fbo.getStaged(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if fbo.getStaged(lState) {
return fmt.Errorf("Conflict resolution didn't take us out of " +
"staging.")
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) > 0 {
for _, ref := range dirtyRefs {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("Can't sync from server while dirty.")
}
if err := fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(MDUpdateApplyError); ok {
if applyErr.rev == applyErr.curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
return err
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
return fbo.fbm.waitForQuotaReclamations(ctx)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return ctxWithRandomID(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
var errShutdownHappened = errors.New("Shutdown happened")
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return errShutdownHappened
}
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
err = fbo.waitForAndProcessUpdates(newCtx, updateChan)
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getCurrMDRevision(lState)
fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(),
currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, updateChan <-chan error) (err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return err
}
return nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return ctx.Err()
}
case <-ctx.Done():
return ctx.Err()
}
}
}
func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) {
ticker := time.NewTicker(betweenFlushes)
defer ticker.Stop()
lState := makeFBOLockState()
for {
select {
case <-ticker.C:
case <-fbo.forceSyncChan:
case <-fbo.shutdownChan:
return
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = context.WithValue(ctx, CtxBackgroundSyncKey, "1")
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
// Make sure this loop doesn't starve user requests for
// too long. But use the longer-timeout version in the
// actual Sync command, to avoid unnecessary errors.
shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second)
defer shortCancel()
for _, ref := range dirtyRefs {
select {
case <-shortCtx.Done():
fbo.log.CDebugf(ctx,
"Stopping background sync early due to timeout")
return nil
default:
}
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
err := fbo.Sync(longCtx, node)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
p := fbo.nodeCache.PathFromNode(node)
fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+
"ref=%v, nodeID=%p, and path=%v: %v",
ref, node.GetID(), p, err)
}
}
return nil
})
}
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// Put the MD. If there's a conflict, abort the whole process and
// let CR restart itself.
err = fbo.config.MDOps().Put(ctx, md)
doUnmergedPut := fbo.isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
err = fbo.config.MDServer().PruneBranch(ctx, fbo.id(), fbo.bid)
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.ID)
}
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setStagedLocked(lState, false, NullBranchID)
// Archive the old, unref'd blocks
fbo.fbm.archiveUnrefBlocks(md)
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
fbo.notifyOneOpLocked(ctx, lState, op, md)
}
return nil
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
MetadataRevisionInitial)
if err != nil {
return TLFUpdateHistory{}, err
}
err = fbo.reembedBlockChanges(ctx, lState, rmds)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.ID.String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter]
if !ok {
name, err := fbo.config.KBPKI().
GetNormalizedUsername(ctx, rmd.LastModifyingWriter)
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision,
Date: time.Unix(0, rmd.data.Dir.Mtime),
Writer: writer,
LiveBytes: rmd.DiskUsage,
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.AllUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 11,456 | worth mentioning the same warning here as in `md_util.go`, I think. | keybase-kbfs | go |
@@ -134,8 +134,12 @@ func (p *Provisioner) GetNodeObjectFromHostName(hostName string) (*v1.Node, erro
Limit: 1,
}
nodeList, err := p.kubeClient.CoreV1().Nodes().List(listOptions)
- if err != nil {
- return nil, errors.Errorf("Unable to get the Node with the NodeHostName")
+ if err != nil || nodeList.Items == nil || len(nodeList.Items) == 0 {
+ // After the PV is created and node affinity is set
+ // based on kubernetes.io/hostname label, either:
+ // - hostname label changed on the node or
+ // - the node is deleted from the cluster.
+ return nil, errors.Errorf("Unable to get the Node with the NodeHostName [%s]", hostName)
}
return &nodeList.Items[0], nil
| 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/openebs/maya/pkg/alertlog"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
pvController "sigs.k8s.io/sig-storage-lib-external-provisioner/controller"
//pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
persistentvolume "github.com/openebs/maya/pkg/kubernetes/persistentvolume/v1alpha1"
)
// ProvisionHostPath is invoked by the Provisioner which expect HostPath PV
// to be provisioned and a valid PV spec returned.
func (p *Provisioner) ProvisionHostPath(opts pvController.VolumeOptions, volumeConfig *VolumeConfig) (*v1.PersistentVolume, error) {
pvc := opts.PVC
nodeHostname := GetNodeHostname(opts.SelectedNode)
taints := GetTaints(opts.SelectedNode)
name := opts.PVName
stgType := volumeConfig.GetStorageType()
saName := getOpenEBSServiceAccountName()
path, err := volumeConfig.GetPath()
if err != nil {
alertlog.Logger.Errorw("",
"eventcode", "local.pv.provision.failure",
"msg", "Failed to provision Local PV",
"rname", opts.PVName,
"reason", "Unable to get volume config",
"storagetype", stgType,
)
return nil, err
}
klog.Infof("Creating volume %v at %v:%v", name, nodeHostname, path)
//Before using the path for local PV, make sure it is created.
initCmdsForPath := []string{"mkdir", "-m", "0777", "-p"}
podOpts := &HelperPodOptions{
cmdsForPath: initCmdsForPath,
name: name,
path: path,
nodeHostname: nodeHostname,
serviceAccountName: saName,
selectedNodeTaints: taints,
}
iErr := p.createInitPod(podOpts)
if iErr != nil {
klog.Infof("Initialize volume %v failed: %v", name, iErr)
alertlog.Logger.Errorw("",
"eventcode", "local.pv.provision.failure",
"msg", "Failed to provision Local PV",
"rname", opts.PVName,
"reason", "Volume initialization failed",
"storagetype", stgType,
)
return nil, iErr
}
// VolumeMode will always be specified as Filesystem for host path volume,
// and the value passed in from the PVC spec will be ignored.
fs := v1.PersistentVolumeFilesystem
// It is possible that the HostPath doesn't already exist on the node.
// Set the Local PV to create it.
//hostPathType := v1.HostPathDirectoryOrCreate
// TODO initialize the Labels and annotations
// Use annotations to specify the context using which the PV was created.
//volAnnotations := make(map[string]string)
//volAnnotations[string(v1alpha1.CASTypeKey)] = casVolume.Spec.CasType
//fstype := casVolume.Spec.FSType
labels := make(map[string]string)
labels[string(mconfig.CASTypeKey)] = "local-" + stgType
//labels[string(v1alpha1.StorageClassKey)] = *className
//TODO Change the following to a builder pattern
pvObj, err := persistentvolume.NewBuilder().
WithName(name).
WithLabels(labels).
WithReclaimPolicy(opts.PersistentVolumeReclaimPolicy).
WithAccessModes(pvc.Spec.AccessModes).
WithVolumeMode(fs).
WithCapacityQty(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]).
WithLocalHostDirectory(path).
WithNodeAffinity(nodeHostname).
Build()
if err != nil {
alertlog.Logger.Errorw("",
"eventcode", "local.pv.provision.failure",
"msg", "Failed to provision Local PV",
"rname", opts.PVName,
"reason", "failed to build persistent volume",
"storagetype", stgType,
)
return nil, err
}
alertlog.Logger.Infow("",
"eventcode", "local.pv.provision.success",
"msg", "Successfully provisioned Local PV",
"rname", opts.PVName,
"storagetype", stgType,
)
return pvObj, nil
}
// GetNodeObjectFromHostName returns the Node Object with matching NodeHostName.
func (p *Provisioner) GetNodeObjectFromHostName(hostName string) (*v1.Node, error) {
labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{persistentvolume.KeyNode: hostName}}
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(labelSelector.MatchLabels).String(),
Limit: 1,
}
nodeList, err := p.kubeClient.CoreV1().Nodes().List(listOptions)
if err != nil {
return nil, errors.Errorf("Unable to get the Node with the NodeHostName")
}
return &nodeList.Items[0], nil
}
// DeleteHostPath is invoked by the PVC controller to perform clean-up
// activities before deleteing the PV object. If reclaim policy is
// set to not-retain, then this function will create a helper pod
// to delete the host path from the node.
func (p *Provisioner) DeleteHostPath(pv *v1.PersistentVolume) (err error) {
defer func() {
err = errors.Wrapf(err, "failed to delete volume %v", pv.Name)
}()
saName := getOpenEBSServiceAccountName()
//Determine the path and node of the Local PV.
pvObj := persistentvolume.NewForAPIObject(pv)
path := pvObj.GetPath()
if path == "" {
return errors.Errorf("no HostPath set")
}
hostname := pvObj.GetAffinitedNodeHostname()
if hostname == "" {
return errors.Errorf("cannot find affinited node hostname")
}
alertlog.Logger.Infof("Get the Node Object from hostName: %v", hostname)
//Get the node Object once again to get updated Taints.
nodeObject, err := p.GetNodeObjectFromHostName(hostname)
if err != nil {
return err
}
taints := GetTaints(nodeObject)
//Initiate clean up only when reclaim policy is not retain.
klog.Infof("Deleting volume %v at %v:%v", pv.Name, hostname, path)
cleanupCmdsForPath := []string{"rm", "-rf"}
podOpts := &HelperPodOptions{
cmdsForPath: cleanupCmdsForPath,
name: pv.Name,
path: path,
nodeHostname: hostname,
serviceAccountName: saName,
selectedNodeTaints: taints,
}
if err := p.createCleanupPod(podOpts); err != nil {
return errors.Wrapf(err, "clean up volume %v failed", pv.Name)
}
return nil
}
| 1 | 18,411 | Do we need both the checks, for `Items` not nil and `len(Items)` | openebs-maya | go |
@@ -349,6 +349,14 @@ ActiveRecord::Schema.define(version: 20160927192531) do
add_index "tags", ["name"], name: "index_tags_on_name", unique: true, using: :btree
+ create_table "test_client_requests", force: :cascade do |t|
+ t.decimal "amount"
+ t.string "project_title"
+ t.integer "approving_official_id"
+ t.datetime "created_at"
+ t.datetime "updated_at"
+ end
+
create_table "user_delegates", force: :cascade do |t|
t.integer "assigner_id"
t.integer "assignee_id" | 1 | # encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20160927192531) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
create_table "active_admin_comments", force: :cascade do |t|
t.string "namespace"
t.text "body"
t.string "resource_id", null: false
t.string "resource_type", null: false
t.integer "author_id"
t.string "author_type"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "active_admin_comments", ["author_type", "author_id"], name: "index_active_admin_comments_on_author_type_and_author_id", using: :btree
add_index "active_admin_comments", ["namespace"], name: "index_active_admin_comments_on_namespace", using: :btree
add_index "active_admin_comments", ["resource_type", "resource_id"], name: "index_active_admin_comments_on_resource_type_and_resource_id", using: :btree
create_table "ahoy_events", id: :uuid, default: nil, force: :cascade do |t|
t.uuid "visit_id"
t.integer "user_id"
t.string "name"
t.json "properties"
t.datetime "time"
end
add_index "ahoy_events", ["time"], name: "index_ahoy_events_on_time", using: :btree
add_index "ahoy_events", ["user_id"], name: "index_ahoy_events_on_user_id", using: :btree
add_index "ahoy_events", ["visit_id"], name: "index_ahoy_events_on_visit_id", using: :btree
create_table "ahoy_messages", force: :cascade do |t|
t.string "token"
t.text "to"
t.integer "user_id"
t.string "user_type"
t.string "mailer"
t.text "subject"
t.text "content"
t.datetime "sent_at"
t.datetime "opened_at"
t.datetime "clicked_at"
end
add_index "ahoy_messages", ["token"], name: "index_ahoy_messages_on_token", using: :btree
add_index "ahoy_messages", ["user_id", "user_type"], name: "index_ahoy_messages_on_user_id_and_user_type", using: :btree
create_table "api_tokens", force: :cascade do |t|
t.string "access_token"
t.datetime "expires_at"
t.datetime "created_at"
t.datetime "updated_at"
t.datetime "used_at"
t.integer "step_id"
end
add_index "api_tokens", ["access_token"], name: "index_api_tokens_on_access_token", unique: true, using: :btree
create_table "attachments", force: :cascade do |t|
t.string "file_file_name"
t.string "file_content_type"
t.integer "file_file_size"
t.datetime "file_updated_at"
t.integer "proposal_id"
t.integer "user_id"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "blazer_audits", force: :cascade do |t|
t.integer "user_id"
t.integer "query_id"
t.text "statement"
t.string "data_source"
t.datetime "created_at"
end
create_table "blazer_checks", force: :cascade do |t|
t.integer "query_id"
t.string "state"
t.text "emails"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "blazer_dashboard_queries", force: :cascade do |t|
t.integer "dashboard_id"
t.integer "query_id"
t.integer "position"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "blazer_dashboards", force: :cascade do |t|
t.text "name"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "blazer_queries", force: :cascade do |t|
t.integer "creator_id"
t.string "name"
t.text "description"
t.text "statement"
t.string "data_source"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "comments", force: :cascade do |t|
t.text "comment_text"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "user_id"
t.integer "proposal_id"
t.boolean "update_comment"
t.uuid "visit_id"
end
add_index "comments", ["proposal_id"], name: "index_comments_on_proposal_id", using: :btree
create_table "delayed_jobs", force: :cascade do |t|
t.integer "priority", default: 0, null: false
t.integer "attempts", default: 0, null: false
t.text "handler", null: false
t.text "last_error"
t.datetime "run_at"
t.datetime "locked_at"
t.datetime "failed_at"
t.string "locked_by"
t.string "queue"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "delayed_jobs", ["priority", "run_at"], name: "delayed_jobs_priority", using: :btree
create_table "gsa18f_events", force: :cascade do |t|
t.string "duty_station"
t.integer "supervisor_id"
t.string "title_of_event"
t.string "event_provider"
t.string "purpose"
t.string "justification"
t.string "link"
t.string "instructions"
t.string "nfs_form"
t.decimal "cost_per_unit"
t.decimal "estimated_travel_expenses"
t.date "start_date"
t.date "end_date"
t.datetime "updated_at"
t.datetime "created_at"
t.text "type_of_event"
t.boolean "free_event", default: false
t.boolean "travel_required", default: false
end
create_table "gsa18f_procurements", force: :cascade do |t|
t.text "office"
t.text "justification", default: "", null: false
t.text "link_to_product", default: "", null: false
t.integer "quantity"
t.datetime "date_requested"
t.text "additional_info"
t.decimal "cost_per_unit"
t.text "product_name_and_description"
t.boolean "recurring", default: false, null: false
t.string "recurring_interval", default: "Daily"
t.integer "recurring_length"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "urgency"
t.integer "purchase_type", null: false
t.boolean "is_tock_billable"
t.string "tock_project"
t.string "pegasys_document_number"
t.boolean "client_billed"
end
create_table "ncr_organizations", force: :cascade do |t|
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "code", null: false
t.string "name", default: "", null: false
end
create_table "ncr_work_orders", force: :cascade do |t|
t.decimal "amount"
t.string "expense_type"
t.string "vendor"
t.boolean "not_to_exceed", default: false, null: false
t.string "building_number"
t.boolean "emergency", default: false, null: false
t.string "rwa_number"
t.string "work_order_code"
t.string "project_title"
t.text "description"
t.datetime "created_at"
t.datetime "updated_at"
t.boolean "direct_pay", default: false, null: false
t.string "cl_number"
t.string "function_code"
t.string "soc_code"
t.integer "ncr_organization_id"
t.integer "approving_official_id"
end
add_index "ncr_work_orders", ["ncr_organization_id"], name: "index_ncr_work_orders_on_ncr_organization_id", using: :btree
create_table "oauth_access_grants", force: :cascade do |t|
t.integer "resource_owner_id", null: false
t.integer "application_id", null: false
t.string "token", null: false
t.integer "expires_in", null: false
t.text "redirect_uri", null: false
t.datetime "created_at", null: false
t.datetime "revoked_at"
t.string "scopes"
end
add_index "oauth_access_grants", ["token"], name: "index_oauth_access_grants_on_token", unique: true, using: :btree
create_table "oauth_access_tokens", force: :cascade do |t|
t.integer "resource_owner_id"
t.integer "application_id"
t.string "token", null: false
t.string "refresh_token"
t.integer "expires_in"
t.datetime "revoked_at"
t.datetime "created_at", null: false
t.string "scopes"
end
add_index "oauth_access_tokens", ["refresh_token"], name: "index_oauth_access_tokens_on_refresh_token", unique: true, using: :btree
add_index "oauth_access_tokens", ["token"], name: "index_oauth_access_tokens_on_token", unique: true, using: :btree
create_table "oauth_applications", force: :cascade do |t|
t.string "name", null: false
t.string "uid", null: false
t.string "secret", null: false
t.text "redirect_uri", null: false
t.string "scopes", default: "", null: false
t.datetime "created_at"
t.datetime "updated_at"
t.integer "owner_id"
t.string "owner_type"
end
add_index "oauth_applications", ["owner_id", "owner_type"], name: "index_oauth_applications_on_owner_id_and_owner_type", using: :btree
add_index "oauth_applications", ["uid"], name: "index_oauth_applications_on_uid", unique: true, using: :btree
create_table "proposal_roles", force: :cascade do |t|
t.integer "role_id", null: false
t.integer "user_id", null: false
t.integer "proposal_id", null: false
end
add_index "proposal_roles", ["role_id", "user_id", "proposal_id"], name: "index_proposal_roles_on_role_id_and_user_id_and_proposal_id", unique: true, using: :btree
create_table "proposals", force: :cascade do |t|
t.string "status"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "client_data_id"
t.string "client_data_type"
t.integer "requester_id"
t.string "public_id", limit: 255
t.uuid "visit_id"
end
add_index "proposals", ["client_data_type", "client_data_id"], name: "index_proposals_on_client_data_type_and_client_data_id", using: :btree
create_table "reports", force: :cascade do |t|
t.string "name", null: false
t.json "query", null: false
t.boolean "shared", default: false
t.integer "user_id", null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.uuid "visit_id"
end
create_table "roles", force: :cascade do |t|
t.string "name"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "roles", ["name"], name: "roles_name_idx", unique: true, using: :btree
create_table "scheduled_reports", force: :cascade do |t|
t.string "name", null: false
t.integer "frequency", default: 0, null: false
t.integer "user_id", null: false
t.integer "report_id", null: false
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "steps", force: :cascade do |t|
t.integer "user_id"
t.string "status"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "position"
t.integer "proposal_id"
t.datetime "completed_at"
t.string "type"
t.integer "parent_id"
t.integer "min_children_needed"
t.integer "completer_id"
end
add_index "steps", ["completer_id"], name: "index_steps_on_completer_id", using: :btree
add_index "steps", ["user_id", "proposal_id"], name: "steps_user_proposal_idx", unique: true, using: :btree
create_table "taggings", force: :cascade do |t|
t.integer "tag_id"
t.integer "taggable_id"
t.string "taggable_type"
t.integer "tagger_id"
t.string "tagger_type"
t.string "context", limit: 128
t.datetime "created_at"
end
add_index "taggings", ["tag_id", "taggable_id", "taggable_type", "context", "tagger_id", "tagger_type"], name: "taggings_idx", unique: true, using: :btree
add_index "taggings", ["taggable_id", "taggable_type", "context"], name: "index_taggings_on_taggable_id_and_taggable_type_and_context", using: :btree
create_table "tags", force: :cascade do |t|
t.string "name"
t.integer "taggings_count", default: 0
end
add_index "tags", ["name"], name: "index_tags_on_name", unique: true, using: :btree
create_table "user_delegates", force: :cascade do |t|
t.integer "assigner_id"
t.integer "assignee_id"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "user_roles", force: :cascade do |t|
t.integer "user_id", null: false
t.integer "role_id", null: false
end
add_index "user_roles", ["user_id", "role_id"], name: "index_user_roles_on_user_id_and_role_id", unique: true, using: :btree
create_table "users", force: :cascade do |t|
t.string "email_address"
t.string "first_name"
t.string "last_name"
t.datetime "created_at"
t.datetime "updated_at"
t.string "client_slug"
t.boolean "active", default: true
t.string "timezone", limit: 255, default: "Eastern Time (US & Canada)"
t.string "new_features_date"
t.text "list_view_config"
end
create_table "versions", force: :cascade do |t|
t.string "item_type", null: false
t.integer "item_id", null: false
t.string "event", null: false
t.string "whodunnit"
t.text "object"
t.datetime "created_at"
end
add_index "versions", ["item_type", "item_id"], name: "index_versions_on_item_type_and_item_id", using: :btree
create_table "visits", id: :uuid, default: nil, force: :cascade do |t|
t.uuid "visitor_id"
t.string "ip"
t.text "user_agent"
t.text "referrer"
t.text "landing_page"
t.integer "user_id"
t.string "referring_domain"
t.string "search_keyword"
t.string "browser"
t.string "os"
t.string "device_type"
t.integer "screen_height"
t.integer "screen_width"
t.string "country"
t.string "region"
t.string "city"
t.string "postal_code"
t.decimal "latitude"
t.decimal "longitude"
t.string "utm_source"
t.string "utm_medium"
t.string "utm_term"
t.string "utm_content"
t.string "utm_campaign"
t.datetime "started_at"
end
add_index "visits", ["user_id"], name: "index_visits_on_user_id", using: :btree
add_foreign_key "attachments", "proposals", name: "proposal_id_fkey"
add_foreign_key "attachments", "users", name: "user_id_fkey"
add_foreign_key "comments", "proposals", name: "proposal_id_fkey"
add_foreign_key "comments", "users", name: "user_id_fkey"
add_foreign_key "comments", "visits"
add_foreign_key "proposal_roles", "proposals", name: "proposal_id_fkey"
add_foreign_key "proposal_roles", "roles", name: "role_id_fkey"
add_foreign_key "proposal_roles", "users", name: "user_id_fkey"
add_foreign_key "proposals", "users", column: "requester_id", name: "requester_id_fkey"
add_foreign_key "proposals", "visits"
add_foreign_key "reports", "visits"
add_foreign_key "scheduled_reports", "reports"
add_foreign_key "scheduled_reports", "users"
add_foreign_key "steps", "proposals", name: "proposal_id_fkey", on_delete: :cascade
add_foreign_key "steps", "steps", column: "parent_id", name: "parent_id_fkey", on_delete: :cascade
add_foreign_key "steps", "users", column: "completer_id", name: "completer_id_fkey"
add_foreign_key "steps", "users", name: "user_id_fkey"
add_foreign_key "user_delegates", "users", column: "assignee_id", name: "assignee_id_fkey"
add_foreign_key "user_delegates", "users", column: "assigner_id", name: "assigner_id_fkey"
add_foreign_key "user_roles", "roles", name: "role_id_fkey"
add_foreign_key "user_roles", "users", name: "user_id_fkey"
end
| 1 | 17,963 | Do you know what this is coming from? It keeps getting deleted/created @nickbristow | 18F-C2 | rb |
@@ -63,11 +63,7 @@ public class FlinkCatalogFactory implements CatalogFactory {
public static final String ICEBERG_CATALOG_TYPE_HADOOP = "hadoop";
public static final String ICEBERG_CATALOG_TYPE_HIVE = "hive";
- public static final String HIVE_URI = "uri";
- public static final String HIVE_CLIENT_POOL_SIZE = "clients";
public static final String HIVE_CONF_DIR = "hive-conf-dir";
- public static final String WAREHOUSE_LOCATION = "warehouse";
-
public static final String DEFAULT_DATABASE = "default-database";
public static final String BASE_NAMESPACE = "base-namespace";
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.flink.runtime.util.HadoopUtils;
import org.apache.flink.table.catalog.Catalog;
import org.apache.flink.table.descriptors.CatalogDescriptorValidator;
import org.apache.flink.table.factories.CatalogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.base.Splitter;
import org.apache.iceberg.relocated.com.google.common.base.Strings;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
/**
* A Flink Catalog factory implementation that creates {@link FlinkCatalog}.
* <p>
* This supports the following catalog configuration options:
* <ul>
* <li><tt>type</tt> - Flink catalog factory key, should be "iceberg"</li>
* <li><tt>catalog-type</tt> - iceberg catalog type, "hive" or "hadoop"</li>
* <li><tt>uri</tt> - the Hive Metastore URI (Hive catalog only)</li>
* <li><tt>clients</tt> - the Hive Client Pool Size (Hive catalog only)</li>
* <li><tt>warehouse</tt> - the warehouse path (Hadoop catalog only)</li>
* <li><tt>default-database</tt> - a database name to use as the default</li>
* <li><tt>base-namespace</tt> - a base namespace as the prefix for all databases (Hadoop catalog only)</li>
* </ul>
* <p>
* To use a custom catalog that is not a Hive or Hadoop catalog, extend this class and override
* {@link #createCatalogLoader(String, Map, Configuration)}.
*/
public class FlinkCatalogFactory implements CatalogFactory {
// Can not just use "type", it conflicts with CATALOG_TYPE.
public static final String ICEBERG_CATALOG_TYPE = "catalog-type";
public static final String ICEBERG_CATALOG_TYPE_HADOOP = "hadoop";
public static final String ICEBERG_CATALOG_TYPE_HIVE = "hive";
public static final String HIVE_URI = "uri";
public static final String HIVE_CLIENT_POOL_SIZE = "clients";
public static final String HIVE_CONF_DIR = "hive-conf-dir";
public static final String WAREHOUSE_LOCATION = "warehouse";
public static final String DEFAULT_DATABASE = "default-database";
public static final String BASE_NAMESPACE = "base-namespace";
/**
* Create an Iceberg {@link org.apache.iceberg.catalog.Catalog} loader to be used by this Flink catalog adapter.
*
* @param name Flink's catalog name
* @param properties Flink's catalog properties
* @param hadoopConf Hadoop configuration for catalog
* @return an Iceberg catalog loader
*/
protected CatalogLoader createCatalogLoader(String name, Map<String, String> properties, Configuration hadoopConf) {
String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL);
if (catalogImpl != null) {
return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl);
}
String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE);
switch (catalogType.toLowerCase(Locale.ENGLISH)) {
case ICEBERG_CATALOG_TYPE_HIVE:
// The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in that case it will
// fallback to parse those values from hadoop configuration which is loaded from classpath.
String uri = properties.get(HIVE_URI);
String warehouse = properties.get(WAREHOUSE_LOCATION);
int clientPoolSize = Integer.parseInt(properties.getOrDefault(HIVE_CLIENT_POOL_SIZE, "2"));
String hiveConfDir = properties.get(HIVE_CONF_DIR);
Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir);
return CatalogLoader.hive(name, newHadoopConf, uri, warehouse, clientPoolSize);
case ICEBERG_CATALOG_TYPE_HADOOP:
String warehouseLocation = properties.get(WAREHOUSE_LOCATION);
return CatalogLoader.hadoop(name, hadoopConf, warehouseLocation);
default:
throw new UnsupportedOperationException("Unknown catalog type: " + catalogType);
}
}
@Override
public Map<String, String> requiredContext() {
Map<String, String> context = Maps.newHashMap();
context.put(CatalogDescriptorValidator.CATALOG_TYPE, "iceberg");
context.put(CatalogDescriptorValidator.CATALOG_PROPERTY_VERSION, "1");
return context;
}
@Override
public List<String> supportedProperties() {
List<String> properties = Lists.newArrayList();
properties.add(ICEBERG_CATALOG_TYPE);
properties.add(HIVE_URI);
properties.add(HIVE_CLIENT_POOL_SIZE);
properties.add(HIVE_CONF_DIR);
properties.add(WAREHOUSE_LOCATION);
properties.add(DEFAULT_DATABASE);
properties.add(BASE_NAMESPACE);
return properties;
}
@Override
public Catalog createCatalog(String name, Map<String, String> properties) {
return createCatalog(name, properties, clusterHadoopConf());
}
protected Catalog createCatalog(String name, Map<String, String> properties, Configuration hadoopConf) {
CatalogLoader catalogLoader = createCatalogLoader(name, properties, hadoopConf);
String defaultDatabase = properties.getOrDefault(DEFAULT_DATABASE, "default");
String[] baseNamespace = properties.containsKey(BASE_NAMESPACE) ?
Splitter.on('.').splitToList(properties.get(BASE_NAMESPACE)).toArray(new String[0]) :
new String[0];
boolean cacheEnabled = Boolean.parseBoolean(properties.getOrDefault("cache-enabled", "true"));
return new FlinkCatalog(name, defaultDatabase, baseNamespace, catalogLoader, cacheEnabled);
}
private static Configuration mergeHiveConf(Configuration hadoopConf, String hiveConfDir) {
Configuration newConf = new Configuration(hadoopConf);
if (!Strings.isNullOrEmpty(hiveConfDir)) {
Preconditions.checkState(Files.exists(Paths.get(hiveConfDir, "hive-site.xml")),
"There should be a hive-site.xml file under the directory %s", hiveConfDir);
newConf.addResource(new Path(hiveConfDir, "hive-site.xml"));
} else {
// If don't provide the hive-site.xml path explicitly, it will try to load resource from classpath. If still
// couldn't load the configuration file, then it will throw exception in HiveCatalog.
URL configFile = CatalogLoader.class.getClassLoader().getResource("hive-site.xml");
if (configFile != null) {
newConf.addResource(configFile);
}
}
return newConf;
}
public static Configuration clusterHadoopConf() {
return HadoopUtils.getHadoopConfiguration(GlobalConfiguration.loadConfiguration());
}
}
| 1 | 26,606 | Nit: leaving these in place would have reduce the number of files that this needed to touch, and avoided a possible problem removing public fields. I don't think it's worth blocking for this change, but we like to keep patches as small as possible by not breaking references like these. | apache-iceberg | java |
@@ -444,6 +444,7 @@ class AdminController extends Controller
*/
protected function prePersistEntity($entity)
{
+ @trigger_error(sprintf('The %s is deprecated since EasyAdmin 1.x and will be removed in 2.0 veresion. Use persistEntity instead', __METHOD__), E_USER_DEPRECATED);
}
/** | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace EasyCorp\Bundle\EasyAdminBundle\Controller;
use Doctrine\DBAL\Exception\ForeignKeyConstraintViolationException;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\QueryBuilder;
use EasyCorp\Bundle\EasyAdminBundle\Event\EasyAdminEvents;
use EasyCorp\Bundle\EasyAdminBundle\Exception\EntityRemoveException;
use EasyCorp\Bundle\EasyAdminBundle\Exception\ForbiddenActionException;
use EasyCorp\Bundle\EasyAdminBundle\Exception\NoEntitiesConfiguredException;
use EasyCorp\Bundle\EasyAdminBundle\Exception\UndefinedEntityException;
use EasyCorp\Bundle\EasyAdminBundle\Form\Util\LegacyFormHelper;
use Pagerfanta\Pagerfanta;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\Form\Form;
use Symfony\Component\Form\FormBuilder;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\RedirectResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
/**
* The controller used to render all the default EasyAdmin actions.
*
* @author Javier Eguiluz <[email protected]>
*/
class AdminController extends Controller
{
/** @var array The full configuration of the entire backend */
protected $config;
/** @var array The full configuration of the current entity */
protected $entity = array();
/** @var Request The instance of the current Symfony request */
protected $request;
/** @var EntityManager The Doctrine entity manager for the current entity */
protected $em;
/**
* @Route("/", name="easyadmin")
* @Route("/", name="admin")
*
* The 'admin' route is deprecated since version 1.8.0 and it will be removed in 2.0.
*
* @param Request $request
*
* @return RedirectResponse|Response
*/
public function indexAction(Request $request)
{
$this->initialize($request);
if (null === $request->query->get('entity')) {
return $this->redirectToBackendHomepage();
}
$action = $request->query->get('action', 'list');
if (!$this->isActionAllowed($action)) {
throw new ForbiddenActionException(array('action' => $action, 'entity_name' => $this->entity['name']));
}
return $this->executeDynamicMethod($action.'<EntityName>Action');
}
/**
* Utility method which initializes the configuration of the entity on which
* the user is performing the action.
*
* @param Request $request
*/
protected function initialize(Request $request)
{
$this->dispatch(EasyAdminEvents::PRE_INITIALIZE);
$this->config = $this->get('easyadmin.config.manager')->getBackendConfig();
if (0 === count($this->config['entities'])) {
throw new NoEntitiesConfiguredException();
}
// this condition happens when accessing the backend homepage and before
// redirecting to the default page set as the homepage
if (null === $entityName = $request->query->get('entity')) {
return;
}
if (!array_key_exists($entityName, $this->config['entities'])) {
throw new UndefinedEntityException(array('entity_name' => $entityName));
}
$this->entity = $this->get('easyadmin.config.manager')->getEntityConfiguration($entityName);
$action = $request->query->get('action', 'list');
if (!$request->query->has('sortField')) {
$sortField = isset($this->entity[$action]['sort']['field']) ? $this->entity[$action]['sort']['field'] : $this->entity['primary_key_field_name'];
$request->query->set('sortField', $sortField);
}
if (!$request->query->has('sortDirection')) {
$sortDirection = isset($this->entity[$action]['sort']['direction']) ? $this->entity[$action]['sort']['direction'] : 'DESC';
$request->query->set('sortDirection', $sortDirection);
}
$this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']);
$this->request = $request;
$this->dispatch(EasyAdminEvents::POST_INITIALIZE);
}
protected function dispatch($eventName, array $arguments = array())
{
$arguments = array_replace(array(
'config' => $this->config,
'em' => $this->em,
'entity' => $this->entity,
'request' => $this->request,
), $arguments);
$subject = isset($arguments['paginator']) ? $arguments['paginator'] : $arguments['entity'];
$event = new GenericEvent($subject, $arguments);
$this->get('event_dispatcher')->dispatch($eventName, $event);
}
/**
* The method that returns the values displayed by an autocomplete field
* based on the user's input.
*
* @return JsonResponse
*/
protected function autocompleteAction()
{
$results = $this->get('easyadmin.autocomplete')->find(
$this->request->query->get('entity'),
$this->request->query->get('query'),
$this->request->query->get('page', 1)
);
return new JsonResponse($results);
}
/**
* The method that is executed when the user performs a 'list' action on an entity.
*
* @return Response
*/
protected function listAction()
{
$this->dispatch(EasyAdminEvents::PRE_LIST);
$fields = $this->entity['list']['fields'];
$paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->entity['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'), $this->entity['list']['dql_filter']);
$this->dispatch(EasyAdminEvents::POST_LIST, array('paginator' => $paginator));
$parameters = array(
'paginator' => $paginator,
'fields' => $fields,
'delete_form_template' => $this->createDeleteForm($this->entity['name'], '__id__')->createView(),
);
return $this->executeDynamicMethod('render<EntityName>Template', array('list', $this->entity['templates']['list'], $parameters));
}
/**
* The method that is executed when the user performs a 'edit' action on an entity.
*
* @return Response|RedirectResponse
*/
protected function editAction()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
if ($this->request->isXmlHttpRequest() && $property = $this->request->query->get('property')) {
$newValue = 'true' === mb_strtolower($this->request->query->get('newValue'));
$fieldsMetadata = $this->entity['list']['fields'];
if (!isset($fieldsMetadata[$property]) || 'toggle' !== $fieldsMetadata[$property]['dataType']) {
throw new \RuntimeException(sprintf('The type of the "%s" property is not "toggle".', $property));
}
$this->updateEntityProperty($entity, $property, $newValue);
// cast to integer instead of string to avoid sending empty responses for 'false'
return new Response((int) $newValue);
}
$fields = $this->entity['edit']['fields'];
$editForm = $this->executeDynamicMethod('create<EntityName>EditForm', array($entity, $fields));
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$editForm->handleRequest($this->request);
if ($editForm->isSubmitted() && $editForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity));
$this->executeDynamicMethod('preUpdate<EntityName>Entity', array($entity));
$this->executeDynamicMethod('update<EntityName>Entity', array($entity));
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity));
return $this->redirectToReferrer();
}
$this->dispatch(EasyAdminEvents::POST_EDIT);
$parameters = array(
'form' => $editForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
'delete_form' => $deleteForm->createView(),
);
return $this->executeDynamicMethod('render<EntityName>Template', array('edit', $this->entity['templates']['edit'], $parameters));
}
/**
* The method that is executed when the user performs a 'show' action on an entity.
*
* @return Response
*/
protected function showAction()
{
$this->dispatch(EasyAdminEvents::PRE_SHOW);
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$fields = $this->entity['show']['fields'];
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$this->dispatch(EasyAdminEvents::POST_SHOW, array(
'deleteForm' => $deleteForm,
'fields' => $fields,
'entity' => $entity,
));
$parameters = array(
'entity' => $entity,
'fields' => $fields,
'delete_form' => $deleteForm->createView(),
);
return $this->executeDynamicMethod('render<EntityName>Template', array('show', $this->entity['templates']['show'], $parameters));
}
/**
* The method that is executed when the user performs a 'new' action on an entity.
*
* @return Response|RedirectResponse
*/
protected function newAction()
{
$this->dispatch(EasyAdminEvents::PRE_NEW);
$entity = $this->executeDynamicMethod('createNew<EntityName>Entity');
$easyadmin = $this->request->attributes->get('easyadmin');
$easyadmin['item'] = $entity;
$this->request->attributes->set('easyadmin', $easyadmin);
$fields = $this->entity['new']['fields'];
$newForm = $this->executeDynamicMethod('create<EntityName>NewForm', array($entity, $fields));
$newForm->handleRequest($this->request);
if ($newForm->isSubmitted() && $newForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_PERSIST, array('entity' => $entity));
$this->executeDynamicMethod('prePersist<EntityName>Entity', array($entity));
$this->executeDynamicMethod('persist<EntityName>Entity', array($entity));
$this->dispatch(EasyAdminEvents::POST_PERSIST, array('entity' => $entity));
return $this->redirectToReferrer();
}
$this->dispatch(EasyAdminEvents::POST_NEW, array(
'entity_fields' => $fields,
'form' => $newForm,
'entity' => $entity,
));
$parameters = array(
'form' => $newForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
);
return $this->executeDynamicMethod('render<EntityName>Template', array('new', $this->entity['templates']['new'], $parameters));
}
/**
* The method that is executed when the user performs a 'delete' action to
* remove any entity.
*
* @return RedirectResponse
*/
protected function deleteAction()
{
$this->dispatch(EasyAdminEvents::PRE_DELETE);
if ('DELETE' !== $this->request->getMethod()) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$id = $this->request->query->get('id');
$form = $this->createDeleteForm($this->entity['name'], $id);
$form->handleRequest($this->request);
if ($form->isSubmitted() && $form->isValid()) {
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$this->dispatch(EasyAdminEvents::PRE_REMOVE, array('entity' => $entity));
$this->executeDynamicMethod('preRemove<EntityName>Entity', array($entity));
try {
$this->executeDynamicMethod('remove<EntityName>Entity', array($entity));
} catch (ForeignKeyConstraintViolationException $e) {
throw new EntityRemoveException(array('entity_name' => $this->entity['name'], 'message' => $e->getMessage()));
}
$this->dispatch(EasyAdminEvents::POST_REMOVE, array('entity' => $entity));
}
$this->dispatch(EasyAdminEvents::POST_DELETE);
return $this->redirectToReferrer();
}
/**
* The method that is executed when the user performs a query on an entity.
*
* @return Response
*/
protected function searchAction()
{
$this->dispatch(EasyAdminEvents::PRE_SEARCH);
$query = trim($this->request->query->get('query'));
// if the search query is empty, redirect to the 'list' action
if ('' === $query) {
$queryParameters = array_replace($this->request->query->all(), array('action' => 'list', 'query' => null));
$queryParameters = array_filter($queryParameters);
return $this->redirect($this->get('router')->generate('easyadmin', $queryParameters));
}
$searchableFields = $this->entity['search']['fields'];
$paginator = $this->findBy(
$this->entity['class'],
$query,
$searchableFields,
$this->request->query->get('page', 1),
$this->entity['list']['max_results'],
isset($this->entity['search']['sort']['field']) ? $this->entity['search']['sort']['field'] : $this->request->query->get('sortField'),
isset($this->entity['search']['sort']['direction']) ? $this->entity['search']['sort']['direction'] : $this->request->query->get('sortDirection'),
$this->entity['search']['dql_filter']
);
$fields = $this->entity['list']['fields'];
$this->dispatch(EasyAdminEvents::POST_SEARCH, array(
'fields' => $fields,
'paginator' => $paginator,
));
$parameters = array(
'paginator' => $paginator,
'fields' => $fields,
'delete_form_template' => $this->createDeleteForm($this->entity['name'], '__id__')->createView(),
);
return $this->executeDynamicMethod('render<EntityName>Template', array('search', $this->entity['templates']['list'], $parameters));
}
/**
* It updates the value of some property of some entity to the new given value.
*
* @param mixed $entity The instance of the entity to modify
* @param string $property The name of the property to change
* @param bool $value The new value of the property
*
* @throws \RuntimeException
*/
protected function updateEntityProperty($entity, $property, $value)
{
$entityConfig = $this->entity;
// the method_exists() check is needed because Symfony 2.3 doesn't have isWritable() method
if (method_exists($this->get('easy_admin.property_accessor'), 'isWritable') && !$this->get('easy_admin.property_accessor')->isWritable($entity, $property)) {
throw new \RuntimeException(sprintf('The "%s" property of the "%s" entity is not writable.', $property, $entityConfig['name']));
}
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity, 'newValue' => $value));
$this->get('easy_admin.property_accessor')->setValue($entity, $property, $value);
$this->executeDynamicMethod('preUpdate<EntityName>Entity', array($entity));
$this->em->persist($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity, 'newValue' => $value));
$this->dispatch(EasyAdminEvents::POST_EDIT);
}
/**
* Creates a new object of the current managed entity.
* This method is mostly here for override convenience, because it allows
* the user to use his own method to customize the entity instantiation.
*
* @return object
*/
protected function createNewEntity()
{
$entityFullyQualifiedClassName = $this->entity['class'];
return new $entityFullyQualifiedClassName();
}
/**
* Allows applications to modify the entity associated with the item being
* created before persisting it.
*
* @param object $entity
*/
protected function prePersistEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* created while persisting it.
*
* @param object $entity
*/
protected function persistEntity($entity)
{
$this->em->persist($entity);
$this->em->flush();
}
/**
* Allows applications to modify the entity associated with the item being
* edited before persisting it.
*
* @param object $entity
*/
protected function preUpdateEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* edited before updating it.
*
* @param object $entity
*/
protected function updateEntity($entity)
{
$this->em->flush();
}
/**
* Allows applications to modify the entity associated with the item being
* deleted before removing it.
*
* @param object $entity
*/
protected function preRemoveEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* deleted before removing it.
*
* @param object $entity
*/
protected function removeEntity($entity)
{
$this->em->remove($entity);
$this->em->flush();
}
/**
* Performs a database query to get all the records related to the given
* entity. It supports pagination and field sorting.
*
* @param string $entityClass
* @param int $page
* @param int $maxPerPage
* @param string|null $sortField
* @param string|null $sortDirection
* @param string|null $dqlFilter
*
* @return Pagerfanta The paginated query results
*/
protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null, $dqlFilter = null)
{
if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) {
$sortDirection = 'DESC';
}
$queryBuilder = $this->executeDynamicMethod('create<EntityName>ListQueryBuilder', array($entityClass, $sortDirection, $sortField, $dqlFilter));
$this->dispatch(EasyAdminEvents::POST_LIST_QUERY_BUILDER, array(
'query_builder' => $queryBuilder,
'sort_field' => $sortField,
'sort_direction' => $sortDirection,
));
return $this->get('easyadmin.paginator')->createOrmPaginator($queryBuilder, $page, $maxPerPage);
}
/**
* Creates Query Builder instance for all the records.
*
* @param string $entityClass
* @param string $sortDirection
* @param string|null $sortField
* @param string|null $dqlFilter
*
* @return QueryBuilder The Query Builder instance
*/
protected function createListQueryBuilder($entityClass, $sortDirection, $sortField = null, $dqlFilter = null)
{
return $this->get('easyadmin.query_builder')->createListQueryBuilder($this->entity, $sortField, $sortDirection, $dqlFilter);
}
/**
* Performs a database query based on the search query provided by the user.
* It supports pagination and field sorting.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
* @param int $page
* @param int $maxPerPage
* @param string|null $sortField
* @param string|null $sortDirection
* @param string|null $dqlFilter
*
* @return Pagerfanta The paginated query results
*/
protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null, $dqlFilter = null)
{
$queryBuilder = $this->executeDynamicMethod('create<EntityName>SearchQueryBuilder', array($entityClass, $searchQuery, $searchableFields, $sortField, $sortDirection, $dqlFilter));
$this->dispatch(EasyAdminEvents::POST_SEARCH_QUERY_BUILDER, array(
'query_builder' => $queryBuilder,
'search_query' => $searchQuery,
'searchable_fields' => $searchableFields,
));
return $this->get('easyadmin.paginator')->createOrmPaginator($queryBuilder, $page, $maxPerPage);
}
/**
* Creates Query Builder instance for search query.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
* @param string|null $sortField
* @param string|null $sortDirection
* @param string|null $dqlFilter
*
* @return QueryBuilder The Query Builder instance
*/
protected function createSearchQueryBuilder($entityClass, $searchQuery, array $searchableFields, $sortField = null, $sortDirection = null, $dqlFilter = null)
{
return $this->get('easyadmin.query_builder')->createSearchQueryBuilder($this->entity, $searchQuery, $sortField, $sortDirection, $dqlFilter);
}
/**
* Creates the form used to edit an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createEditForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'edit');
}
/**
* Creates the form used to create an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createNewForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'new');
}
/**
* Creates the form builder of the form used to create or edit the given entity.
*
* @param object $entity
* @param string $view The name of the view where this form is used ('new' or 'edit')
*
* @return FormBuilder
*/
protected function createEntityFormBuilder($entity, $view)
{
$formOptions = $this->executeDynamicMethod('get<EntityName>EntityFormOptions', array($entity, $view));
return $this->get('form.factory')->createNamedBuilder(mb_strtolower($this->entity['name']), LegacyFormHelper::getType('easyadmin'), $entity, $formOptions);
}
/**
* Retrieves the list of form options before sending them to the form builder.
* This allows adding dynamic logic to the default form options.
*
* @param object $entity
* @param string $view
*
* @return array
*/
protected function getEntityFormOptions($entity, $view)
{
$formOptions = $this->entity[$view]['form_options'];
$formOptions['entity'] = $this->entity['name'];
$formOptions['view'] = $view;
return $formOptions;
}
/**
* Creates the form object used to create or edit the given entity.
*
* @param object $entity
* @param array $entityProperties
* @param string $view
*
* @return FormInterface
*
* @throws \Exception
*/
protected function createEntityForm($entity, array $entityProperties, $view)
{
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EntityForm')) {
$form = $this->{$customMethodName}($entity, $entityProperties, $view);
if (!$form instanceof FormInterface) {
throw new \UnexpectedValueException(sprintf(
'The "%s" method must return a FormInterface, "%s" given.',
$customMethodName, is_object($form) ? get_class($form) : gettype($form)
));
}
return $form;
}
$formBuilder = $this->executeDynamicMethod('create<EntityName>EntityFormBuilder', array($entity, $view));
if (!$formBuilder instanceof FormBuilderInterface) {
throw new \UnexpectedValueException(sprintf(
'The "%s" method must return a FormBuilderInterface, "%s" given.',
'createEntityForm', is_object($formBuilder) ? get_class($formBuilder) : gettype($formBuilder)
));
}
return $formBuilder->getForm();
}
/**
* Creates the form used to delete an entity. It must be a form because
* the deletion of the entity are always performed with the 'DELETE' HTTP method,
* which requires a form to work in the current browsers.
*
* @param string $entityName
* @param int|string $entityId When reusing the delete form for multiple entities, a pattern string is passed instead of an integer
*
* @return Form|FormInterface
*/
protected function createDeleteForm($entityName, $entityId)
{
/** @var FormBuilder $formBuilder */
$formBuilder = $this->get('form.factory')->createNamedBuilder('delete_form')
->setAction($this->generateUrl('easyadmin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId)))
->setMethod('DELETE')
;
$formBuilder->add('submit', LegacyFormHelper::getType('submit'), array('label' => 'delete_modal.action', 'translation_domain' => 'EasyAdminBundle'));
// needed to avoid submitting empty delete forms (see issue #1409)
$formBuilder->add('_easyadmin_delete_flag', LegacyFormHelper::getType('hidden'), array('data' => '1'));
return $formBuilder->getForm();
}
/**
* Utility method that checks if the given action is allowed for
* the current entity.
*
* @param string $actionName
*
* @return bool
*/
protected function isActionAllowed($actionName)
{
return false === in_array($actionName, $this->entity['disabled_actions'], true);
}
/**
* Utility shortcut to render an error when the requested action is not allowed
* for the given entity.
*
* @param string $action
*
* @deprecated Use the ForbiddenException instead of this method
*
* @return Response
*/
protected function renderForbiddenActionError($action)
{
return $this->render('@EasyAdmin/error/forbidden_action.html.twig', array('action' => $action), new Response('', 403));
}
/**
* Given a method name pattern, it looks for the customized version of that
* method (based on the entity name) and executes it. If the custom method
* does not exist, it executes the regular method.
*
* For example:
* executeDynamicMethod('create<EntityName>Entity') and the entity name is 'User'
* if 'createUserEntity()' exists, execute it; otherwise execute 'createEntity()'
*
* @param string $methodNamePattern The pattern of the method name (dynamic parts are enclosed with <> angle brackets)
* @param array $arguments The arguments passed to the executed method
*
* @return mixed
*/
protected function executeDynamicMethod($methodNamePattern, array $arguments = array())
{
$methodName = str_replace('<EntityName>', $this->entity['name'], $methodNamePattern);
if (!is_callable(array($this, $methodName))) {
$methodName = str_replace('<EntityName>', '', $methodNamePattern);
}
return call_user_func_array(array($this, $methodName), $arguments);
}
/**
* Generates the backend homepage and redirects to it.
*/
protected function redirectToBackendHomepage()
{
$homepageConfig = $this->config['homepage'];
$url = isset($homepageConfig['url'])
? $homepageConfig['url']
: $this->get('router')->generate($homepageConfig['route'], $homepageConfig['params']);
return $this->redirect($url);
}
/**
* It renders the main CSS applied to the backend design. This controller
* allows to generate dynamic CSS files that use variables without the need
* to set up a CSS preprocessing toolchain.
*
* @deprecated The CSS styles are no longer rendered at runtime but preprocessed during container compilation. Use the $container['easyadmin.config']['_internal']['custom_css'] variable instead
*/
public function renderCssAction()
{
}
/**
* @return RedirectResponse
*/
protected function redirectToReferrer()
{
$referrerUrl = $this->request->query->get('referer', '');
if (!empty($referrerUrl)) {
return $this->redirect(urldecode($referrerUrl));
}
if ($this->isActionAllowed('list')) {
return $this->redirect($this->generateUrl('easyadmin', array(
'action' => 'list', 'entity' => $this->entity['name'],
)));
}
return $this->redirectToBackendHomepage();
}
/**
* Used to add/modify/remove parameters before passing them to the Twig template.
* Instead of defining a render method per action (list, show, search, etc.) use
* the $actionName argument to discriminate between actions.
*
* @param string $actionName The name of the current action (list, show, new, etc.)
* @param string $templatePath The path of the Twig template to render
* @param array $parameters The parameters passed to the template
*
* @return Response
*/
protected function renderTemplate($actionName, $templatePath, array $parameters = array())
{
return $this->render($templatePath, $parameters);
}
}
class_alias('EasyCorp\Bundle\EasyAdminBundle\Controller\AdminController', 'JavierEguiluz\Bundle\EasyAdminBundle\Controller\AdminController', false);
| 1 | 11,342 | `The %s is deprecated...` -> `The %s method is deprecated...` | EasyCorp-EasyAdminBundle | php |
@@ -19,6 +19,9 @@ import (
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
)
+// TODO: If this is gonna stay, it should move to specs-actors
+const BlockMessageLimit = 512
+
// Block is a block in the blockchain.
type Block struct {
// control field for encoding struct as an array | 1 | package block
import (
"encoding/json"
"fmt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
fbig "github.com/filecoin-project/specs-actors/actors/abi/big"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
node "github.com/ipfs/go-ipld-format"
"github.com/filecoin-project/go-filecoin/internal/pkg/constants"
"github.com/filecoin-project/go-filecoin/internal/pkg/crypto"
"github.com/filecoin-project/go-filecoin/internal/pkg/drand"
e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid"
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
)
// Block is a block in the blockchain.
type Block struct {
// control field for encoding struct as an array
_ struct{} `cbor:",toarray"`
// Miner is the address of the miner actor that mined this block.
Miner address.Address `json:"miner"`
// Ticket is the ticket submitted with this block.
Ticket Ticket `json:"ticket"`
// ElectionProof is the vrf proof giving this block's miner authoring rights
ElectionProof *crypto.ElectionProof
// BeaconEntries contain the verifiable oracle randomness used to elect
// this block's author leader
BeaconEntries []*drand.Entry
// PoStProofs are the winning post proofs
PoStProofs []PoStProof `json:"PoStProofs"`
// Parents is the set of parents this block was based on. Typically one,
// but can be several in the case where there were multiple winning ticket-
// holders for an epoch.
Parents TipSetKey `json:"parents"`
// ParentWeight is the aggregate chain weight of the parent set.
ParentWeight fbig.Int `json:"parentWeight"`
// Height is the chain height of this block.
Height abi.ChainEpoch `json:"height"`
// StateRoot is the CID of the root of the state tree after application of the messages in the parent tipset
// to the parent tipset's state root.
StateRoot e.Cid `json:"stateRoot,omitempty"`
// MessageReceipts is a list of receipts corresponding to the application of the messages in the parent tipset
// to the parent tipset's state root (corresponding to this block's StateRoot).
MessageReceipts e.Cid `json:"messageReceipts,omitempty"`
// Messages is the set of messages included in this block
Messages e.Cid `json:"messages,omitempty"`
// The aggregate signature of all BLS signed messages in the block
BLSAggregateSig *crypto.Signature `json:"blsAggregateSig"`
// The timestamp, in seconds since the Unix epoch, at which this block was created.
Timestamp uint64 `json:"timestamp"`
// The signature of the miner's worker key over the block
BlockSig *crypto.Signature `json:"blocksig"`
// ForkSignaling is extra data used by miners to communicate
ForkSignaling uint64
cachedCid cid.Cid
cachedBytes []byte
}
// IndexMessagesField is the message field position in the encoded block
const IndexMessagesField = 10
// IndexParentsField is the parents field position in the encoded block
const IndexParentsField = 5
// Cid returns the content id of this block.
func (b *Block) Cid() cid.Cid {
if b.cachedCid == cid.Undef {
if b.cachedBytes == nil {
bytes, err := encoding.Encode(b)
if err != nil {
panic(err)
}
b.cachedBytes = bytes
}
c, err := constants.DefaultCidBuilder.Sum(b.cachedBytes)
if err != nil {
panic(err)
}
b.cachedCid = c
}
return b.cachedCid
}
// ToNode converts the Block to an IPLD node.
func (b *Block) ToNode() node.Node {
data, err := encoding.Encode(b)
if err != nil {
panic(err)
}
c, err := constants.DefaultCidBuilder.Sum(data)
if err != nil {
panic(err)
}
blk, err := blocks.NewBlockWithCid(data, c)
if err != nil {
panic(err)
}
node, err := cbor.DecodeBlock(blk)
if err != nil {
panic(err)
}
return node
}
func (b *Block) String() string {
errStr := "(error encoding Block)"
cid := b.Cid()
js, err := json.MarshalIndent(b, "", " ")
if err != nil {
return errStr
}
return fmt.Sprintf("Block cid=[%v]: %s", cid, string(js))
}
// DecodeBlock decodes raw cbor bytes into a Block.
func DecodeBlock(b []byte) (*Block, error) {
var out Block
if err := encoding.Decode(b, &out); err != nil {
return nil, err
}
out.cachedBytes = b
return &out, nil
}
// Equals returns true if the Block is equal to other.
func (b *Block) Equals(other *Block) bool {
return b.Cid().Equals(other.Cid())
}
// SignatureData returns the block's bytes with a null signature field for
// signature creation and verification
func (b *Block) SignatureData() []byte {
tmp := &Block{
Miner: b.Miner,
Ticket: b.Ticket,
ElectionProof: b.ElectionProof,
Parents: b.Parents,
ParentWeight: b.ParentWeight,
Height: b.Height,
Messages: b.Messages,
StateRoot: b.StateRoot,
MessageReceipts: b.MessageReceipts,
PoStProofs: b.PoStProofs,
BeaconEntries: b.BeaconEntries,
Timestamp: b.Timestamp,
BLSAggregateSig: b.BLSAggregateSig,
ForkSignaling: b.ForkSignaling,
// BlockSig omitted
}
return tmp.ToNode().RawData()
}
| 1 | 23,749 | No, it wouldn't go there because that code won't reference or enforce it. Here is ok for now. | filecoin-project-venus | go |
@@ -127,7 +127,7 @@ namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Client
}
catch (Exception exception)
{
- var completeArgs = new TestRunCompleteEventArgs(null, false, true, exception, new Collection<AttachmentSet>(), TimeSpan.Zero);
+ var completeArgs = new TestRunCompleteEventArgs(null, false, false, exception, new Collection<AttachmentSet>(), TimeSpan.Zero);
eventHandler.HandleLogMessage(TestMessageLevel.Error, exception.Message);
eventHandler.HandleTestRunComplete(completeArgs, null, null, null);
} | 1 | // Copyright (c) Microsoft. All rights reserved.
namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Client
{
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Linq;
using Microsoft.VisualStudio.TestPlatform.Common.ExtensionFramework;
using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities;
using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.Interfaces;
using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.ObjectModel;
using Microsoft.VisualStudio.TestPlatform.ObjectModel;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Engine;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Logging;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Engine.ClientProtocol;
using Constants = Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Constants;
/// <summary>
/// Orchestrates test execution operations for the engine communicating with the client.
/// </summary>
internal class ProxyExecutionManager : ProxyOperationManager, IProxyExecutionManager
{
private readonly ITestHostManager testHostManager;
#region Constructors
/// <summary>
/// Initializes a new instance of the <see cref="ProxyExecutionManager"/> class.
/// </summary>
/// <param name="testHostManager">Test host manager for this proxy.</param>
public ProxyExecutionManager(ITestHostManager testHostManager) : this(new TestRequestSender(), testHostManager, Constants.ClientConnectionTimeout)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="ProxyExecutionManager"/> class.
/// Constructor with Dependency injection. Used for unit testing.
/// </summary>
/// <param name="requestSender">Request Sender instance</param>
/// <param name="testHostManager">Test host manager instance</param>
/// <param name="clientConnectionTimeout">The client Connection Timeout</param>
internal ProxyExecutionManager(ITestRequestSender requestSender, ITestHostManager testHostManager, int clientConnectionTimeout)
: base(requestSender, testHostManager, clientConnectionTimeout)
{
this.testHostManager = testHostManager;
}
#endregion
#region IProxyExecutionManager implementation.
/// <summary>
/// Ensure that the Execution component of engine is ready for execution usually by loading extensions.
/// </summary>
public virtual void Initialize()
{
if (this.testHostManager.Shared)
{
// Shared test hosts don't require test source information to launch. Start them early
// to allow fail fast.
EqtTrace.Verbose("ProxyExecutionManager: Test host is shared. SetupChannel it early.");
this.InitializeExtensions(Enumerable.Empty<string>());
}
}
/// <summary>
/// Starts the test run
/// </summary>
/// <param name="testRunCriteria"> The settings/options for the test run. </param>
/// <param name="eventHandler"> EventHandler for handling execution events from Engine. </param>
/// <returns> The process id of the runner executing tests. </returns>
public virtual int StartTestRun(TestRunCriteria testRunCriteria, ITestRunEventsHandler eventHandler)
{
try
{
if (!this.testHostManager.Shared)
{
// Non shared test host requires test source information to launch. Provide the sources
// information and create the channel.
EqtTrace.Verbose("ProxyExecutionManager: Test host is non shared. Lazy initialize.");
var testSources = testRunCriteria.Sources;
// If the test execution is with a test filter, group them by sources
if (testRunCriteria.HasSpecificTests)
{
testSources = testRunCriteria.Tests.GroupBy(tc => tc.Source).Select(g => g.Key);
}
this.InitializeExtensions(testSources);
}
this.SetupChannel(testRunCriteria.Sources);
var executionContext = new TestExecutionContext(
testRunCriteria.FrequencyOfRunStatsChangeEvent,
testRunCriteria.RunStatsChangeEventTimeout,
inIsolation: false,
keepAlive: testRunCriteria.KeepAlive,
isDataCollectionEnabled: false,
areTestCaseLevelEventsRequired: false,
hasTestRun: true,
isDebug: (testRunCriteria.TestHostLauncher != null && testRunCriteria.TestHostLauncher.IsDebug),
testCaseFilter: testRunCriteria.TestCaseFilter);
if (testRunCriteria.HasSpecificSources)
{
var runRequest = new TestRunCriteriaWithSources(
testRunCriteria.AdapterSourceMap,
testRunCriteria.TestRunSettings,
executionContext);
this.RequestSender.StartTestRun(runRequest, eventHandler);
}
else
{
var runRequest = new TestRunCriteriaWithTests(
testRunCriteria.Tests,
testRunCriteria.TestRunSettings,
executionContext);
this.RequestSender.StartTestRun(runRequest, eventHandler);
}
}
catch (Exception exception)
{
var completeArgs = new TestRunCompleteEventArgs(null, false, true, exception, new Collection<AttachmentSet>(), TimeSpan.Zero);
eventHandler.HandleLogMessage(TestMessageLevel.Error, exception.Message);
eventHandler.HandleTestRunComplete(completeArgs, null, null, null);
}
return 0;
}
/// <summary>
/// Cancels the test run.
/// </summary>
public virtual void Cancel()
{
this.RequestSender.SendTestRunCancel();
}
/// <summary>
/// Aborts the test run.
/// </summary>
public void Abort()
{
this.RequestSender.SendTestRunAbort();
}
#endregion
private void InitializeExtensions(IEnumerable<string> sources)
{
var sourceList = sources.ToList();
var extensions = this.testHostManager.GetTestPlatformExtensions(sourceList).ToList();
if (TestPluginCache.Instance.PathToAdditionalExtensions != null)
{
extensions.AddRange(TestPluginCache.Instance.PathToAdditionalExtensions);
}
// Only send this if needed.
if (extensions.Count > 0)
{
this.SetupChannel(sourceList);
this.RequestSender.InitializeExecution(extensions, TestPluginCache.Instance.LoadOnlyWellKnownExtensions);
}
}
}
} | 1 | 11,370 | How will we show that test run aborted (because of a crash) if we don't set aborted to true? | microsoft-vstest | .cs |
@@ -18,8 +18,8 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Sockets.Internal
{
internal sealed class SocketConnection : TransportConnection
{
- private const int MinAllocBufferSize = 2048;
- public readonly static bool IsWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows);
+ private static readonly int MinAllocBufferSize = KestrelMemoryPool.MinimumSegmentSize / 2;
+ private static readonly bool IsWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows);
private readonly Socket _socket;
private readonly PipeScheduler _scheduler; | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Buffers;
using System.Diagnostics;
using System.IO;
using System.IO.Pipelines;
using System.Net;
using System.Net.Sockets;
using System.Runtime.InteropServices;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Connections;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;
using Microsoft.Extensions.Logging;
namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Sockets.Internal
{
internal sealed class SocketConnection : TransportConnection
{
private const int MinAllocBufferSize = 2048;
public readonly static bool IsWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows);
private readonly Socket _socket;
private readonly PipeScheduler _scheduler;
private readonly ISocketsTrace _trace;
private readonly SocketReceiver _receiver;
private readonly SocketSender _sender;
private volatile bool _aborted;
internal SocketConnection(Socket socket, MemoryPool<byte> memoryPool, PipeScheduler scheduler, ISocketsTrace trace)
{
Debug.Assert(socket != null);
Debug.Assert(memoryPool != null);
Debug.Assert(trace != null);
_socket = socket;
MemoryPool = memoryPool;
_scheduler = scheduler;
_trace = trace;
var localEndPoint = (IPEndPoint)_socket.LocalEndPoint;
var remoteEndPoint = (IPEndPoint)_socket.RemoteEndPoint;
LocalAddress = localEndPoint.Address;
LocalPort = localEndPoint.Port;
RemoteAddress = remoteEndPoint.Address;
RemotePort = remoteEndPoint.Port;
// On *nix platforms, Sockets already dispatches to the ThreadPool.
var awaiterScheduler = IsWindows ? _scheduler : PipeScheduler.Inline;
_receiver = new SocketReceiver(_socket, awaiterScheduler);
_sender = new SocketSender(_socket, awaiterScheduler);
}
public override MemoryPool<byte> MemoryPool { get; }
public override PipeScheduler InputWriterScheduler => _scheduler;
public override PipeScheduler OutputReaderScheduler => _scheduler;
public async Task StartAsync(IConnectionDispatcher connectionDispatcher)
{
Exception sendError = null;
try
{
connectionDispatcher.OnConnection(this);
// Spawn send and receive logic
Task receiveTask = DoReceive();
Task<Exception> sendTask = DoSend();
// If the sending task completes then close the receive
// We don't need to do this in the other direction because the kestrel
// will trigger the output closing once the input is complete.
if (await Task.WhenAny(receiveTask, sendTask) == sendTask)
{
// Tell the reader it's being aborted
_socket.Dispose();
}
// Now wait for both to complete
await receiveTask;
sendError = await sendTask;
// Dispose the socket(should noop if already called)
_socket.Dispose();
}
catch (Exception ex)
{
_trace.LogError(0, ex, $"Unexpected exception in {nameof(SocketConnection)}.{nameof(StartAsync)}.");
}
finally
{
// Complete the output after disposing the socket
Output.Complete(sendError);
}
}
private async Task DoReceive()
{
Exception error = null;
try
{
await ProcessReceives();
}
catch (SocketException ex) when (ex.SocketErrorCode == SocketError.ConnectionReset)
{
error = new ConnectionResetException(ex.Message, ex);
_trace.ConnectionReset(ConnectionId);
}
catch (SocketException ex) when (ex.SocketErrorCode == SocketError.OperationAborted ||
ex.SocketErrorCode == SocketError.ConnectionAborted ||
ex.SocketErrorCode == SocketError.Interrupted ||
ex.SocketErrorCode == SocketError.InvalidArgument)
{
if (!_aborted)
{
// Calling Dispose after ReceiveAsync can cause an "InvalidArgument" error on *nix.
error = new ConnectionAbortedException();
_trace.ConnectionError(ConnectionId, error);
}
}
catch (ObjectDisposedException)
{
if (!_aborted)
{
error = new ConnectionAbortedException();
_trace.ConnectionError(ConnectionId, error);
}
}
catch (IOException ex)
{
error = ex;
_trace.ConnectionError(ConnectionId, error);
}
catch (Exception ex)
{
error = new IOException(ex.Message, ex);
_trace.ConnectionError(ConnectionId, error);
}
finally
{
if (_aborted)
{
error = error ?? new ConnectionAbortedException();
}
Input.Complete(error);
}
}
private async Task ProcessReceives()
{
while (true)
{
// Ensure we have some reasonable amount of buffer space
var buffer = Input.GetMemory(MinAllocBufferSize);
var bytesReceived = await _receiver.ReceiveAsync(buffer);
if (bytesReceived == 0)
{
// FIN
_trace.ConnectionReadFin(ConnectionId);
break;
}
Input.Advance(bytesReceived);
var flushTask = Input.FlushAsync();
if (!flushTask.IsCompleted)
{
_trace.ConnectionPause(ConnectionId);
await flushTask;
_trace.ConnectionResume(ConnectionId);
}
var result = flushTask.GetAwaiter().GetResult();
if (result.IsCompleted)
{
// Pipe consumer is shut down, do we stop writing
break;
}
}
}
private async Task<Exception> DoSend()
{
Exception error = null;
try
{
await ProcessSends();
}
catch (SocketException ex) when (ex.SocketErrorCode == SocketError.OperationAborted)
{
error = null;
}
catch (ObjectDisposedException)
{
error = null;
}
catch (IOException ex)
{
error = ex;
}
catch (Exception ex)
{
error = new IOException(ex.Message, ex);
}
finally
{
// Make sure to close the connection only after the _aborted flag is set.
// Without this, the RequestsCanBeAbortedMidRead test will sometimes fail when
// a BadHttpRequestException is thrown instead of a TaskCanceledException.
_aborted = true;
_trace.ConnectionWriteFin(ConnectionId);
_socket.Shutdown(SocketShutdown.Both);
}
return error;
}
private async Task ProcessSends()
{
while (true)
{
// Wait for data to write from the pipe producer
var result = await Output.ReadAsync();
var buffer = result.Buffer;
if (result.IsCanceled)
{
break;
}
var end = buffer.End;
var isCompleted = result.IsCompleted;
if (!buffer.IsEmpty)
{
await _sender.SendAsync(buffer);
}
Output.AdvanceTo(end);
if (isCompleted)
{
break;
}
}
}
}
}
| 1 | 15,406 | Nit: Might as well make this same change to AdaptedPipeline. | aspnet-KestrelHttpServer | .cs |
@@ -42,12 +42,13 @@ public class IcebergTimestampWithZoneObjectInspector extends AbstractPrimitiveJa
@Override
public OffsetDateTime convert(Object o) {
- return o == null ? null : OffsetDateTime.ofInstant(((Timestamp) o).toInstant(), ZoneOffset.UTC);
+ return o == null ? null : OffsetDateTime.of(((Timestamp) o).toLocalDateTime(), ZoneOffset.UTC);
}
@Override
public Timestamp getPrimitiveJavaObject(Object o) {
- return o == null ? null : Timestamp.from(((OffsetDateTime) o).toInstant());
+ return o == null ? null :
+ Timestamp.valueOf(((OffsetDateTime) o).withOffsetSameInstant(ZoneOffset.UTC).toLocalDateTime());
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.mr.hive.serde.objectinspector;
import java.sql.Timestamp;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveJavaObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
public class IcebergTimestampWithZoneObjectInspector extends AbstractPrimitiveJavaObjectInspector
implements TimestampObjectInspector, WriteObjectInspector {
private static final IcebergTimestampWithZoneObjectInspector INSTANCE = new IcebergTimestampWithZoneObjectInspector();
public static IcebergTimestampWithZoneObjectInspector get() {
return INSTANCE;
}
private IcebergTimestampWithZoneObjectInspector() {
super(TypeInfoFactory.timestampTypeInfo);
}
@Override
public OffsetDateTime convert(Object o) {
return o == null ? null : OffsetDateTime.ofInstant(((Timestamp) o).toInstant(), ZoneOffset.UTC);
}
@Override
public Timestamp getPrimitiveJavaObject(Object o) {
return o == null ? null : Timestamp.from(((OffsetDateTime) o).toInstant());
}
@Override
public TimestampWritable getPrimitiveWritableObject(Object o) {
Timestamp ts = getPrimitiveJavaObject(o);
return ts == null ? null : new TimestampWritable(ts);
}
@Override
public Object copyObject(Object o) {
if (o instanceof Timestamp) {
Timestamp ts = (Timestamp) o;
Timestamp copy = new Timestamp(ts.getTime());
copy.setNanos(ts.getNanos());
return copy;
} else if (o instanceof OffsetDateTime) {
OffsetDateTime odt = (OffsetDateTime) o;
return OffsetDateTime.ofInstant(odt.toInstant(), odt.getOffset());
} else {
return o;
}
}
}
| 1 | 34,260 | Just to clarify: I see that only the hive2 withZone object inspector is changed. Does that mean that the predicate pushdown problem only occurred on hive2? | apache-iceberg | java |
@@ -11,7 +11,7 @@ module Bolt
class ModuleInstaller
class Specs
class ForgeSpec
- NAME_REGEX = %r{\A[a-z][a-z0-9_]*[-/](?<name>[a-z][a-z0-9_]*)\z}.freeze
+ NAME_REGEX = %r{\A[a-zA-Z0-9]+[-/](?<name>[a-z][a-z0-9_]*)\z}.freeze
REQUIRED_KEYS = Set.new(%w[name]).freeze
KNOWN_KEYS = Set.new(%w[name version_requirement]).freeze
| 1 | # frozen_string_literal: true
require 'semantic_puppet'
require 'set'
require 'bolt/error'
# This class represents a Forge module specification.
#
module Bolt
class ModuleInstaller
class Specs
class ForgeSpec
NAME_REGEX = %r{\A[a-z][a-z0-9_]*[-/](?<name>[a-z][a-z0-9_]*)\z}.freeze
REQUIRED_KEYS = Set.new(%w[name]).freeze
KNOWN_KEYS = Set.new(%w[name version_requirement]).freeze
attr_reader :full_name, :name, :semantic_version, :type
def initialize(init_hash)
@full_name, @name = parse_name(init_hash['name'])
@version_requirement, @semantic_version = parse_version_requirement(init_hash['version_requirement'])
@type = :forge
end
def self.implements?(hash)
KNOWN_KEYS.superset?(hash.keys.to_set) && REQUIRED_KEYS.subset?(hash.keys.to_set)
end
# Formats the full name and extracts the module name.
#
private def parse_name(name)
unless (match = name.match(NAME_REGEX))
raise Bolt::ValidationError,
"Invalid name for Forge module specification: #{name}. Name must match "\
"'owner/name', must start with a lowercase letter, and may only include "\
"lowercase letters, digits, and underscores."
end
[name.tr('-', '/'), match[:name]]
end
# Parses the version into a Semantic Puppet version range.
#
private def parse_version_requirement(version_requirement)
[version_requirement, SemanticPuppet::VersionRange.parse(version_requirement || '>= 0')]
rescue StandardError
raise Bolt::ValidationError,
"Invalid version requirement for Forge module specification #{@full_name}: "\
"#{version_requirement.inspect}"
end
# Returns true if the specification is satisfied by the module.
#
def satisfied_by?(mod)
@type == mod.type &&
@full_name == mod.full_name &&
!mod.version.nil? &&
@semantic_version.cover?(mod.version)
end
# Returns a hash matching the module spec in bolt-project.yaml
#
def to_hash
{
'name' => @full_name,
'version_requirement' => @version_requirement
}.compact
end
# Creates a PuppetfileResolver::Puppetfile::ForgeModule object, which is
# used to generate a graph of resolved modules.
#
def to_resolver_module
require 'puppetfile-resolver'
PuppetfileResolver::Puppetfile::ForgeModule.new(@full_name).tap do |mod|
mod.version = @version_requirement
end
end
end
end
end
end
| 1 | 16,725 | Based on username requirements for forge.puppet.com - only letters and digits permitted. | puppetlabs-bolt | rb |
@@ -73,11 +73,6 @@ public class StructLikeWrapper {
return false;
}
- int len = struct.size();
- if (len != that.struct.size()) {
- return false;
- }
-
return comparator.compare(this.struct, that.struct) == 0;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.util;
import java.util.Comparator;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.types.Comparators;
import org.apache.iceberg.types.JavaHash;
import org.apache.iceberg.types.Types;
/**
* Wrapper to adapt StructLike for use in maps and sets by implementing equals and hashCode.
*/
public class StructLikeWrapper {
public static StructLikeWrapper forType(Types.StructType struct) {
return new StructLikeWrapper(struct);
}
private final Comparator<StructLike> comparator;
private final JavaHash<StructLike> structHash;
private Integer hashCode;
private StructLike struct;
private StructLikeWrapper(Types.StructType type) {
this.comparator = Comparators.forType(type);
this.structHash = JavaHash.forType(type);
this.hashCode = null;
}
public StructLikeWrapper set(StructLike newStruct) {
this.struct = newStruct;
this.hashCode = null;
return this;
}
public StructLike get() {
return struct;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (!(other instanceof StructLikeWrapper)) {
return false;
}
StructLikeWrapper that = (StructLikeWrapper) other;
if (this.struct == that.struct) {
return true;
}
if (this.struct == null ^ that.struct == null) {
return false;
}
int len = struct.size();
if (len != that.struct.size()) {
return false;
}
return comparator.compare(this.struct, that.struct) == 0;
}
@Override
public int hashCode() {
if (hashCode == null) {
this.hashCode = structHash.hash(struct);
}
return hashCode;
}
}
| 1 | 23,412 | Was this removed to ignore the extra columns coming from the file projection? | apache-iceberg | java |
@@ -1,3 +1,5 @@
+// +build !windows
+
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logutils
import (
"io"
"log/syslog"
"os"
"path"
"github.com/mipearson/rfw"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/config"
"github.com/projectcalico/libcalico-go/lib/logutils"
)
var (
counterDroppedLogs = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_logs_dropped",
Help: "Number of logs dropped because the output stream was blocked.",
})
counterLogErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_log_errors",
Help: "Number of errors encountered while logging.",
})
)
func init() {
prometheus.MustRegister(
counterDroppedLogs,
counterLogErrors,
)
}
const logQueueSize = 100
// ConfigureEarlyLogging installs our logging adapters, and enables early logging to screen
// if it is enabled by either the FELIX_EARLYLOGSEVERITYSCREEN or FELIX_LOGSEVERITYSCREEN
// environment variable.
func ConfigureEarlyLogging() {
// Replace logrus' formatter with a custom one using our time format,
// shared with the Python code.
log.SetFormatter(&logutils.Formatter{})
// Install a hook that adds file/line no information.
log.AddHook(&logutils.ContextHook{})
// First try the early-only environment variable. Since the normal
// config processing doesn't know about that variable, normal config
// will override it once it's loaded.
rawLogLevel := os.Getenv("FELIX_EARLYLOGSEVERITYSCREEN")
if rawLogLevel == "" {
// Early-only flag not set, look for the normal config-owned
// variable.
rawLogLevel = os.Getenv("FELIX_LOGSEVERITYSCREEN")
}
// Default to logging errors.
logLevelScreen := log.ErrorLevel
if rawLogLevel != "" {
parsedLevel, err := log.ParseLevel(rawLogLevel)
if err == nil {
logLevelScreen = parsedLevel
} else {
log.WithError(err).Error("Failed to parse early log level, defaulting to error.")
}
}
log.SetLevel(logLevelScreen)
log.Infof("Early screen log level set to %v", logLevelScreen)
}
// ConfigureLogging uses the resolved configuration to complete the logging
// configuration. It creates hooks for the relevant logging targets and
// attaches them to logrus.
func ConfigureLogging(configParams *config.Config) {
// Parse the log levels, defaulting to panic if in doubt.
logLevelScreen := logutils.SafeParseLogLevel(configParams.LogSeverityScreen)
logLevelFile := logutils.SafeParseLogLevel(configParams.LogSeverityFile)
logLevelSyslog := logutils.SafeParseLogLevel(configParams.LogSeveritySys)
// Work out the most verbose level that is being logged.
mostVerboseLevel := logLevelScreen
if logLevelFile > mostVerboseLevel {
mostVerboseLevel = logLevelFile
}
if logLevelSyslog > mostVerboseLevel {
mostVerboseLevel = logLevelScreen
}
// Disable all more-verbose levels using the global setting, this ensures that debug logs
// are filtered out as early as possible.
log.SetLevel(mostVerboseLevel)
// Screen target.
var dests []*logutils.Destination
if configParams.LogSeverityScreen != "" {
screenDest := logutils.NewStreamDestination(
logLevelScreen,
os.Stderr,
make(chan logutils.QueuedLog, logQueueSize),
configParams.DebugDisableLogDropping,
counterLogErrors,
)
dests = append(dests, screenDest)
}
// File target. We record any errors so we can log them out below after finishing set-up
// of the logger.
var fileDirErr, fileOpenErr error
if configParams.LogSeverityFile != "" && configParams.LogFilePath != "" {
fileDirErr = os.MkdirAll(path.Dir(configParams.LogFilePath), 0755)
var rotAwareFile io.Writer
rotAwareFile, fileOpenErr = rfw.Open(configParams.LogFilePath, 0644)
if fileDirErr == nil && fileOpenErr == nil {
fileDest := logutils.NewStreamDestination(
logLevelFile,
rotAwareFile,
make(chan logutils.QueuedLog, logQueueSize),
configParams.DebugDisableLogDropping,
counterLogErrors,
)
dests = append(dests, fileDest)
}
}
// Syslog target. Again, we record the error if we fail to connect to syslog.
var sysErr error
if configParams.LogSeveritySys != "" {
// Set net/addr to "" so we connect to the system syslog server rather
// than a remote one.
net := ""
addr := ""
// The priority parameter is a combination of facility and default
// severity. We want to log with the standard LOG_USER facility; the
// severity is actually irrelevant because the hook always overrides
// it.
priority := syslog.LOG_USER | syslog.LOG_INFO
tag := "calico-felix"
w, sysErr := syslog.Dial(net, addr, priority, tag)
if sysErr == nil {
syslogDest := logutils.NewSyslogDestination(
logLevelSyslog,
w,
make(chan logutils.QueuedLog, logQueueSize),
configParams.DebugDisableLogDropping,
counterLogErrors,
)
dests = append(dests, syslogDest)
}
}
hook := logutils.NewBackgroundHook(logutils.FilterLevels(mostVerboseLevel), logLevelSyslog, dests, counterDroppedLogs)
hook.Start()
log.AddHook(hook)
// Disable logrus' default output, which only supports a single destination. We use the
// hook above to fan out logs to multiple destinations.
log.SetOutput(&logutils.NullWriter{})
// Since we push our logs onto a second thread via a channel, we can disable the
// Logger's built-in mutex completely.
log.StandardLogger().SetNoLock()
// Do any deferred error logging.
if fileDirErr != nil {
log.WithError(fileDirErr).WithField("file", configParams.LogFilePath).
Fatal("Failed to create log file directory.")
}
if fileOpenErr != nil {
log.WithError(fileOpenErr).WithField("file", configParams.LogFilePath).
Fatal("Failed to open log file.")
}
if sysErr != nil {
// We don't bail out if we can't connect to syslog because our default is to try to
// connect but it's very common for syslog to be disabled when we're run in a
// container.
log.WithError(sysErr).Error(
"Failed to connect to syslog. To prevent this error, either set config " +
"parameter LogSeveritySys=none or configure a local syslog service.")
}
}
| 1 | 15,963 | Please can you pull out the shared function into a shared file? I think that'd be pretty easy to do for this module. I think you could: - pull out a function `configureSyslog` that is implemented on Linux, but stubbed on Windows - pull out a function `openLogFile` that is implemented differently on each - share everything else | projectcalico-felix | c |
@@ -26,6 +26,6 @@ public class NotificationServiceFactoryImpl implements NotificationServiceFactor
@Override
public NotificationService create() {
- return new EmailNotificationService();
+ return new EmailNotificationService(new AWSEmailProvider());
}
} | 1 | /*
* Copyright 2019 Oath Holdings Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.common.server.notification.impl;
import com.yahoo.athenz.common.server.notification.NotificationService;
import com.yahoo.athenz.common.server.notification.NotificationServiceFactory;
/*
* This is a reference implementation.
*/
public class NotificationServiceFactoryImpl implements NotificationServiceFactory {
@Override
public NotificationService create() {
return new EmailNotificationService();
}
}
| 1 | 4,931 | Instead of hardcoding the AWSEmailProvider here, it should come from properties, so that it can be replaced with another EmailProvider for ZMS vs ZTS | AthenZ-athenz | java |
@@ -0,0 +1,10 @@
+require_relative '../config/environment'
+
+login = ARGV[0] || :admin_user
+password = ARGV[1] || :admin_password
+email = ARGV[2] || '[email protected]'
+
+account = Account.create!(login: login, level: 10, activated_at: Time.current,
+ email: email, email_confirmation: email,
+ password: password, password_confirmation: password)
+ManualVerification.create!(account: account, auth_id: account.login) | 1 | 1 | 9,003 | +1 for creating this script to help people get boot strapped | blackducksoftware-ohloh-ui | rb |
|
@@ -7,6 +7,7 @@ export const ASYNC_RENDER = 3;
export const ATTR_KEY = '__preactattr_';
+export const ELT_KEY_PREFIX = 'preact-';
// DOM properties that should NOT have "px" added when numeric
export const IS_NON_DIMENSIONAL = /acit|ex(?:s|g|n|p|$)|rph|ows|mnc|ntw|ine[ch]|zoo|^ord/i; | 1 | // render modes
export const NO_RENDER = 0;
export const SYNC_RENDER = 1;
export const FORCE_RENDER = 2;
export const ASYNC_RENDER = 3;
export const ATTR_KEY = '__preactattr_';
// DOM properties that should NOT have "px" added when numeric
export const IS_NON_DIMENSIONAL = /acit|ex(?:s|g|n|p|$)|rph|ows|mnc|ntw|ine[ch]|zoo|^ord/i;
| 1 | 11,457 | Do you need the prefix? | preactjs-preact | js |
@@ -10558,7 +10558,12 @@ short CmpSeabaseDDL::unregisterNativeTable
objectNamePart.data(),
objType
);
-
+
+ // drop comment text
+ retcode = deleteFromTextTable(&cliInterface,
+ objUID,
+ ComTextType::COM_OBJECT_COMMENT_TEXT,
+ 0);
return 0;
}
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: CmpSeabaseDDLtable.cpp
* Description: Implements ddl operations for Seabase tables.
*
*
* Created: 6/30/2013
* Language: C++
*
*
*****************************************************************************
*/
#include "CmpSeabaseDDLincludes.h"
#include "CmpSeabaseDDLauth.h"
#include "ElemDDLColDefault.h"
#include "NumericType.h"
#include "ComUser.h"
#include "keycolumns.h"
#include "ElemDDLColRef.h"
#include "ElemDDLColName.h"
#include "CmpDDLCatErrorCodes.h"
#include "Globals.h"
#include "CmpMain.h"
#include "Context.h"
#include "PrivMgrCommands.h"
#include "PrivMgrRoles.h"
#include "PrivMgrComponentPrivileges.h"
#include "RelExeUtil.h"
#include "TrafDDLdesc.h"
// defined in CmpDescribe.cpp
extern short CmpDescribeSeabaseTable (
const CorrName &dtName,
short type, // 1, invoke. 2, showddl. 3, createLike
char* &outbuf,
ULng32 &outbuflen,
CollHeap *heap,
const char * pkeyStr = NULL,
NABoolean withPartns = FALSE,
NABoolean withoutSalt = FALSE,
NABoolean withoutDivisioning = FALSE,
NABoolean withoutRowFormat = FALSE,
NABoolean withoutLobColumns = FALSE,
UInt32 columnLengthLimit = UINT_MAX,
NABoolean noTrailingSemi = FALSE,
// used to add,rem,alter column definition from col list.
// valid for 'createLike' mode.
// Used for 'alter add/drop/alter col'.
char * colName = NULL,
short ada = 0, // 0,add. 1,drop. 2,alter
const NAColumn * nacol = NULL,
const NAType * natype = NULL,
Space *inSpace = NULL);
short CmpDescribeHiveTable (
const CorrName &dtName,
short type, // 1, invoke. 2, showddl. 3, createLike
char* &outbuf,
ULng32 &outbuflen,
CollHeap *heap,
UInt32 columnLengthLimit = UINT_MAX);
// type: 1, invoke. 2, showddl. 3, create_like
extern short cmpDisplayColumn(const NAColumn *nac,
char * inColName,
const NAType *inNAT,
short displayType,
Space *inSpace,
char * buf,
Lng32 &ii,
NABoolean namesOnly,
NABoolean &identityCol,
NABoolean isExternalTable,
NABoolean isAlignedRowFormat,
UInt32 columnLengthLimit,
NAList<const NAColumn *> * truncatedColumnList);
extern short cmpDisplayPrimaryKey(const NAColumnArray & naColArr,
Lng32 numKeys,
NABoolean displaySystemCols,
Space &space, char * buf,
NABoolean displayCompact,
NABoolean displayAscDesc,
NABoolean displayParens);
static bool checkSpecifiedPrivs(
ElemDDLPrivActArray & privActsArray,
const char * externalObjectName,
ComObjectType objectType,
NATable * naTable,
std::vector<PrivType> & objectPrivs,
std::vector<ColPrivSpec> & colPrivs);
static bool ElmPrivToPrivType(
OperatorTypeEnum elmPriv,
PrivType & privType,
bool forRevoke = false);
static bool hasValue(
const std::vector<ColPrivSpec> & container,
PrivType value);
static bool hasValue(
const std::vector<PrivType> & container,
PrivType value);
static bool isMDGrantRevokeOK(
const std::vector<PrivType> & objectPrivs,
const std::vector<ColPrivSpec> & colPrivs,
bool isGrant);
static bool isValidPrivTypeForObject(
ComObjectType objectType,
PrivType privType);
void CmpSeabaseDDL::convertVirtTableColumnInfoToDescStruct(
const ComTdbVirtTableColumnInfo * colInfo,
const ComObjectName * objectName,
TrafDesc * column_desc)
{
char * col_name = new(STMTHEAP) char[strlen(colInfo->colName) + 1];
strcpy(col_name, colInfo->colName);
column_desc->columnsDesc()->colname = col_name;
column_desc->columnsDesc()->colnumber = colInfo->colNumber;
column_desc->columnsDesc()->datatype = colInfo->datatype;
column_desc->columnsDesc()->length = colInfo->length;
if (!(DFS2REC::isInterval(colInfo->datatype)))
column_desc->columnsDesc()->scale = colInfo->scale;
else
column_desc->columnsDesc()->scale = 0;
column_desc->columnsDesc()->precision = colInfo->precision;
column_desc->columnsDesc()->datetimestart = (rec_datetime_field) colInfo->dtStart;
column_desc->columnsDesc()->datetimeend = (rec_datetime_field) colInfo->dtEnd;
if (DFS2REC::isDateTime(colInfo->datatype) || DFS2REC::isInterval(colInfo->datatype))
column_desc->columnsDesc()->datetimefractprec = colInfo->scale;
else
column_desc->columnsDesc()->datetimefractprec = 0;
if (DFS2REC::isInterval(colInfo->datatype))
column_desc->columnsDesc()->intervalleadingprec = colInfo->precision;
else
column_desc->columnsDesc()->intervalleadingprec = 0 ;
column_desc->columnsDesc()->setNullable(colInfo->nullable);
column_desc->columnsDesc()->setUpshifted(colInfo->upshifted);
column_desc->columnsDesc()->character_set = (CharInfo::CharSet) colInfo->charset;
switch (colInfo->columnClass)
{
case COM_USER_COLUMN:
column_desc->columnsDesc()->colclass = 'U';
break;
case COM_SYSTEM_COLUMN:
column_desc->columnsDesc()->colclass = 'S';
break;
default:
CMPASSERT(0);
}
column_desc->columnsDesc()->setDefaultClass(colInfo->defaultClass);
column_desc->columnsDesc()->colFlags = colInfo->colFlags;
column_desc->columnsDesc()->pictureText =
(char *)STMTHEAP->allocateMemory(340);
NAType::convertTypeToText(column_desc->columnsDesc()->pictureText, //OUT
column_desc->columnsDesc()->datatype,
column_desc->columnsDesc()->length,
column_desc->columnsDesc()->precision,
column_desc->columnsDesc()->scale,
column_desc->columnsDesc()->datetimeStart(),
column_desc->columnsDesc()->datetimeEnd(),
column_desc->columnsDesc()->datetimefractprec,
column_desc->columnsDesc()->intervalleadingprec,
column_desc->columnsDesc()->isUpshifted(),
column_desc->columnsDesc()->isCaseInsensitive(),
(CharInfo::CharSet)column_desc->columnsDesc()->character_set,
(CharInfo::Collation) 1, // default collation
NULL, // displayDataType
0); // displayCaseSpecific
column_desc->columnsDesc()->offset = -1; // not present in colInfo
column_desc->columnsDesc()->setCaseInsensitive(FALSE); // not present in colInfo
column_desc->columnsDesc()->encoding_charset = (CharInfo::CharSet) column_desc->columnsDesc()->character_set ; // not present in colInfo so we go with the column's charset here.
column_desc->columnsDesc()->collation_sequence = (CharInfo::Collation)1; // not present in colInfo, so we go with default collation here (used in buildEncodeTree for some error handling)
column_desc->columnsDesc()->defaultvalue = NULL ; // not present in colInfo
column_desc->columnsDesc()->computed_column_text = NULL; // not present in colInfo
}
TrafDesc * CmpSeabaseDDL::convertVirtTableColumnInfoArrayToDescStructs(
const ComObjectName * objectName,
const ComTdbVirtTableColumnInfo * colInfoArray,
Lng32 numCols)
{
TrafDesc * prev_column_desc = NULL;
TrafDesc * first_column_desc = NULL;
for (Int32 i = 0; i < numCols; i++)
{
const ComTdbVirtTableColumnInfo* colInfo = &(colInfoArray[i]);
// TrafAllocateDDLdesc() requires that HEAP (STMTHEAP)
// be used for operator new herein
TrafDesc * column_desc = TrafAllocateDDLdesc(DESC_COLUMNS_TYPE, NULL);
if (prev_column_desc != NULL)
prev_column_desc->next = column_desc;
else
first_column_desc = column_desc;
prev_column_desc = column_desc;
convertVirtTableColumnInfoToDescStruct(colInfo, objectName, column_desc);
}
return first_column_desc;
}
TrafDesc * CmpSeabaseDDL::convertVirtTableKeyInfoArrayToDescStructs(
const ComTdbVirtTableKeyInfo *keyInfoArray,
const ComTdbVirtTableColumnInfo *colInfoArray,
Lng32 numKeys)
{
TrafDesc * prev_key_desc = NULL;
TrafDesc * first_key_desc = NULL;
for (Int32 i = 0; i < numKeys; i++)
{
const ComTdbVirtTableColumnInfo * colInfo = &(colInfoArray[keyInfoArray[i].tableColNum]);
TrafDesc * key_desc = TrafAllocateDDLdesc(DESC_KEYS_TYPE, NULL);
if (prev_key_desc != NULL)
prev_key_desc->next = key_desc;
else
first_key_desc = key_desc;
prev_key_desc = key_desc;
key_desc->keysDesc()->tablecolnumber = keyInfoArray[i].tableColNum;
key_desc->keysDesc()->keyseqnumber = i;
key_desc->keysDesc()->setDescending(keyInfoArray[i].ordering != 0 ? TRUE : FALSE);
}
return first_key_desc;
}
void CmpSeabaseDDL::createSeabaseTableLike(ExeCliInterface * cliInterface,
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
ComObjectName tgtTableName(createTableNode->getTableName(), COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tgtTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString extTgtTableName = tgtTableName.getExternalName(TRUE);
ComObjectName srcTableName(createTableNode->getLikeSourceTableName(), COM_TABLE_NAME);
srcTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
NABoolean srcSchNameSpecified =
(NOT createTableNode->getOrigLikeSourceTableName().getSchemaName().isNull());
NAString srcCatNamePart = srcTableName.getCatalogNamePartAsAnsiString();
NAString srcSchNamePart = srcTableName.getSchemaNamePartAsAnsiString(TRUE);
NAString srcObjNamePart = srcTableName.getObjectNamePartAsAnsiString(TRUE);
NAString extSrcTableName = srcTableName.getExternalName(TRUE);
NAString srcTabName = srcTableName.getExternalName(TRUE);
retcode = lookForTableInMD(cliInterface,
srcCatNamePart, srcSchNamePart, srcObjNamePart,
srcSchNameSpecified, FALSE,
srcTableName, srcTabName, extSrcTableName);
if (retcode < 0)
{
processReturn();
return;
}
CorrName cn(srcObjNamePart,
STMTHEAP,
srcSchNamePart,
srcCatNamePart);
ElemDDLColRefArray &keyArray =
(createTableNode->getIsConstraintPKSpecified() ?
createTableNode->getPrimaryKeyColRefArray() :
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION ?
createTableNode->getKeyColumnArray() :
createTableNode->getPrimaryKeyColRefArray()));
NAString keyClause;
if ((keyArray.entries() > 0) &&
((createTableNode->getIsConstraintPKSpecified()) ||
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION)))
{
if (createTableNode->getIsConstraintPKSpecified())
keyClause = " primary key ( ";
else if (createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION)
keyClause = " store by ( ";
for (CollIndex i = 0; i < keyArray.entries(); i++)
{
NAString colName = keyArray[i]->getColumnName();
// Generate a delimited identifier if source colName is delimited
// Launchpad bug: 1383531
colName/*InExternalFormat*/ = ToAnsiIdentifier (colName/*InInternalFormat*/);
keyClause += colName;
if (i < (keyArray.entries() - 1))
keyClause += ", ";
}
keyClause += ")";
}
// Check for other common options that are currently not supported
// with CREATE TABLE LIKE. Those could all be passed into
// CmpDescribeSeabaseTable as strings if we wanted to support them.
if (NOT keyClause.isNull())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("PRIMARY KEY/STORE BY");
return;
}
if (createTableNode->isPartitionSpecified() ||
createTableNode->isPartitionBySpecified())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("PARTITION BY");
return;
}
if (createTableNode->isDivisionClauseSpecified())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("DIVISION BY");
return;
}
if (createTableNode->isHbaseOptionsSpecified())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("HBASE table options");
return;
}
ParDDLLikeOptsCreateTable &likeOptions = createTableNode->getLikeOptions();
char * buf = NULL;
ULng32 buflen = 0;
if (srcCatNamePart == HIVE_SYSTEM_CATALOG)
retcode = CmpDescribeHiveTable(cn, 3/*createlike*/, buf, buflen, STMTHEAP,
likeOptions.getIsLikeOptColumnLengthLimit());
else
retcode = CmpDescribeSeabaseTable(cn, 3/*createlike*/, buf, buflen, STMTHEAP,
NULL,
likeOptions.getIsWithHorizontalPartitions(),
likeOptions.getIsWithoutSalt(),
likeOptions.getIsWithoutDivision(),
likeOptions.getIsWithoutRowFormat(),
likeOptions.getIsWithoutLobColumns(),
likeOptions.getIsLikeOptColumnLengthLimit(),
TRUE);
if (retcode)
return;
NAString query = "create table ";
query += extTgtTableName;
query += " ";
NABoolean done = FALSE;
Lng32 curPos = 0;
while (NOT done)
{
short len = *(short*)&buf[curPos];
NAString frag(&buf[curPos+sizeof(short)],
len - ((buf[curPos+len-1]== '\n') ? 1 : 0));
query += frag;
curPos += ((((len+sizeof(short))-1)/8)+1)*8;
if (curPos >= buflen)
done = TRUE;
}
if (NOT keyClause.isNull())
{
// add the keyClause
query += keyClause;
}
const NAString * saltClause = likeOptions.getSaltClause();
if (saltClause)
{
query += saltClause->data();
}
// send any user CQDs down
Lng32 retCode = sendAllControls(FALSE, FALSE, TRUE);
Lng32 cliRC = 0;
cliRC = cliInterface->executeImmediate((char*)query.data());
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
return;
}
// ----------------------------------------------------------------------------
// Method: createSeabaseTableExternal
//
// This method creates a Trafodion table that represents a Hive or HBase table
//
// in:
// cliInterface - references to the cli execution structure
// createTableNode - representation of the CREATE TABLE statement
// tgtTableName - the Trafodion external table name to create
// srcTableName - the native source table
//
// returns: 0 - successful, -1 error
//
// any error detected is added to the diags area
// ----------------------------------------------------------------------------
short CmpSeabaseDDL::createSeabaseTableExternal(
ExeCliInterface &cliInterface,
StmtDDLCreateTable * createTableNode,
const ComObjectName &tgtTableName,
const ComObjectName &srcTableName)
{
Int32 retcode = 0;
NABoolean isHive = tgtTableName.isExternalHive();
// go create the schema - if it does not already exist.
NAString createSchemaStmt ("CREATE SCHEMA IF NOT EXISTS ");
createSchemaStmt += tgtTableName.getCatalogNamePartAsAnsiString();
createSchemaStmt += ".";
createSchemaStmt += tgtTableName.getSchemaNamePartAsAnsiString();
if (isAuthorizationEnabled())
{
createSchemaStmt += " AUTHORIZATION ";
createSchemaStmt += (isHive) ? DB__HIVEROLE : DB__HBASEROLE;
}
Lng32 cliRC = cliInterface.executeImmediate((char*)createSchemaStmt.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
const NAString catalogNamePart = tgtTableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tgtTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tgtTableName.getObjectNamePartAsAnsiString(TRUE);
// Make sure current user has privileges
Int32 objectOwnerID = SUPER_USER;
Int32 schemaOwnerID = SUPER_USER;
ComSchemaClass schemaClass;
retcode = verifyDDLCreateOperationAuthorized(&cliInterface,
SQLOperation::CREATE_TABLE,
catalogNamePart,
schemaNamePart,
schemaClass,
objectOwnerID,
schemaOwnerID);
if (retcode != 0)
{
handleDDLCreateAuthorizationError(retcode,catalogNamePart,schemaNamePart);
return -1;
}
if (createTableNode->mapToHbaseTable())
return 0;
const NAString extTgtTableName = tgtTableName.getExternalName(TRUE);
const NAString srcCatNamePart = srcTableName.getCatalogNamePartAsAnsiString();
const NAString srcSchNamePart = srcTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString srcObjNamePart = srcTableName.getObjectNamePartAsAnsiString(TRUE);
CorrName cnSrc(srcObjNamePart, STMTHEAP, srcSchNamePart, srcCatNamePart);
// build the structures needed to create the table
// tableInfo contains data inserted into OBJECTS and TABLES
ComTdbVirtTableTableInfo * tableInfo = new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = NULL;
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = 0;
tableInfo->isAudited = 1;
tableInfo->validDef = 1;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->numSaltPartns = 0;
tableInfo->rowFormat = COM_UNKNOWN_FORMAT_TYPE;
if (isHive)
{
tableInfo->objectFlags = SEABASE_OBJECT_IS_EXTERNAL_HIVE;
}
else
{
tableInfo->objectFlags = SEABASE_OBJECT_IS_EXTERNAL_HBASE;
if (createTableNode->isImplicitExternal())
tableInfo->objectFlags |= SEABASE_OBJECT_IS_IMPLICIT_EXTERNAL;
}
tableInfo->tablesFlags = 0;
if (isAuthorizationEnabled())
{
if (tgtTableName.isExternalHive())
{
tableInfo->objOwnerID = HIVE_ROLE_ID;
tableInfo->schemaOwnerID = HIVE_ROLE_ID;
}
else
{
tableInfo->objOwnerID = HBASE_ROLE_ID;
tableInfo->schemaOwnerID = HBASE_ROLE_ID;
}
}
else
{
tableInfo->objOwnerID = SUPER_USER;
tableInfo->schemaOwnerID = SUPER_USER;
}
// Column information
Lng32 datatype, length, precision, scale, dtStart, dtEnd, nullable, upshifted;
NAString charset;
CharInfo::Collation collationSequence = CharInfo::DefaultCollation;
ULng32 hbaseColFlags;
NABoolean alignedFormat = FALSE;
Lng32 serializedOption = -1;
Int32 numCols = 0;
ComTdbVirtTableColumnInfo * colInfoArray = NULL;
ElemDDLColDefArray &colArray = createTableNode->getColDefArray();
ElemDDLColRefArray &keyArray =
(createTableNode->getIsConstraintPKSpecified() ?
createTableNode->getPrimaryKeyColRefArray() :
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION ?
createTableNode->getKeyColumnArray() :
createTableNode->getPrimaryKeyColRefArray()));
// Get a description of the source table
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
NATable *naTable = bindWA.getNATable(cnSrc);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cnSrc.getExposedNameAsAnsiString());
return -1;
}
if ((naTable->isHiveTable()) &&
(naTable->getViewText()) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags()
<< DgSqlCode(-3242)
<< DgString0("Cannot create external table on a native Hive view.");
return -1;
}
// cqd HIVE_USE_EXT_TABLE_ATTRS:
// if OFF, col or key attrs cannot be specified during ext table creation.
// if ON, col attrs could be specified.
// if ALL, col and key attrs could be specified
NABoolean extTableAttrsSpecified = FALSE;
if (colArray.entries() > 0)
{
if (CmpCommon::getDefault(HIVE_USE_EXT_TABLE_ATTRS) == DF_OFF)
{
*CmpCommon::diags()
<< DgSqlCode(-3242)
<< DgString0("Cannot specify column attributes for external tables.");
return -1;
}
extTableAttrsSpecified = TRUE;
CmpSeabaseDDL::setMDflags
(tableInfo->tablesFlags, MD_TABLES_HIVE_EXT_COL_ATTRS);
}
if (keyArray.entries() > 0)
{
if (CmpCommon::getDefault(HIVE_USE_EXT_TABLE_ATTRS) != DF_ALL)
{
*CmpCommon::diags()
<< DgSqlCode(-3242)
<< DgString0("Cannot specify key attribute for external tables.");
return -1;
}
extTableAttrsSpecified = TRUE;
CmpSeabaseDDL::setMDflags
(tableInfo->tablesFlags, MD_TABLES_HIVE_EXT_KEY_ATTRS);
}
// convert column array from NATable into a ComTdbVirtTableColumnInfo struct
NAColumnArray naColArray;
const NAColumnArray &origColArray = naTable->getNAColumnArray();
for (CollIndex c=0; c<origColArray.entries(); c++)
naColArray.insert(origColArray[c]);
numCols = naColArray.entries();
// make sure all columns specified in colArray are part of naColArray
if (colArray.entries() > 0)
{
for (CollIndex colIndex = 0; colIndex < colArray.entries(); colIndex++)
{
const ElemDDLColDef *edcd = colArray[colIndex];
if (naColArray.getColumnPosition((NAString&)edcd->getColumnName()) < 0)
{
// not found. return error.
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName(ToAnsiIdentifier(edcd->getColumnName()));
return -1;
}
}
}
colInfoArray = new(STMTHEAP) ComTdbVirtTableColumnInfo[numCols];
for (CollIndex index = 0; index < numCols; index++)
{
const NAColumn *naCol = naColArray[index];
const NAType * type = naCol->getType();
// if colArray has been specified, then look for this column in
// that array and use the type specified there.
Int32 colIndex = -1;
if ((colArray.entries() > 0) &&
((colIndex = colArray.getColumnIndex(naCol->getColName())) >= 0))
{
ElemDDLColDef *edcd = colArray[colIndex];
type = edcd->getColumnDataType();
}
// call: CmpSeabaseDDL::getTypeInfo to get column details
retcode = getTypeInfo(type, alignedFormat, serializedOption,
datatype, length, precision, scale, dtStart, dtEnd, upshifted, nullable,
charset, collationSequence, hbaseColFlags);
if (retcode)
return -1;
if (length > CmpCommon::getDefaultNumeric(TRAF_MAX_CHARACTER_COL_LENGTH))
{
*CmpCommon::diags()
<< DgSqlCode(-4247)
<< DgInt0(length)
<< DgInt1(CmpCommon::getDefaultNumeric(TRAF_MAX_CHARACTER_COL_LENGTH))
<< DgString0(naCol->getColName().data());
return -1;
}
colInfoArray[index].colName = naCol->getColName().data();
colInfoArray[index].colNumber = index;
colInfoArray[index].columnClass = COM_USER_COLUMN;
colInfoArray[index].datatype = datatype;
colInfoArray[index].length = length;
colInfoArray[index].nullable = nullable;
colInfoArray[index].charset = (SQLCHARSET_CODE)CharInfo::getCharSetEnum(charset);
colInfoArray[index].precision = precision;
colInfoArray[index].scale = scale;
colInfoArray[index].dtStart = dtStart;
colInfoArray[index].dtEnd = dtEnd;
colInfoArray[index].upshifted = upshifted;
colInfoArray[index].colHeading = NULL;
colInfoArray[index].hbaseColFlags = naCol->getHbaseColFlags();
colInfoArray[index].defaultClass = COM_NULL_DEFAULT;
colInfoArray[index].defVal = NULL;
colInfoArray[index].hbaseColFam = naCol->getHbaseColFam();
colInfoArray[index].hbaseColQual = naCol->getHbaseColQual();
strcpy(colInfoArray[index].paramDirection, COM_UNKNOWN_PARAM_DIRECTION_LIT);
colInfoArray[index].isOptional = FALSE;
colInfoArray[index].colFlags = 0;
}
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
Lng32 numKeys = 0;
numKeys = keyArray.entries();
if (numKeys > 0)
{
if (isHive)
{
*CmpCommon::diags()
<< DgSqlCode(-4222)
<< DgString0("\"PRIMARY KEY on external hive table\"");
return -1;
}
keyInfoArray = new(STMTHEAP) ComTdbVirtTableKeyInfo[numKeys];
if (buildKeyInfoArray(NULL, (NAColumnArray*)&naColArray, &keyArray,
colInfoArray, keyInfoArray, TRUE))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTgtTableName);
return -1;
}
}
NABoolean registerHiveObject =
(cnSrc.isHive() &&
CmpCommon::getDefault(HIVE_NO_REGISTER_OBJECTS) == DF_OFF);
// if source table is a hive table and not already registered, register
// it in traf metadata
if (registerHiveObject)
{
char buf[2000];
str_sprintf(buf, "register internal hive table if not exists %s.%s.%s",
srcCatNamePart.data(),
srcSchNamePart.data(),
srcObjNamePart.data());
Lng32 cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
} // ishive
Int64 objUID = -1;
cliRC = 0;
// Update traf MD tables with info about this table.
// But do not insert priv info if object is to be registered.
// That will happen during hive object registration.
if (updateSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
COM_NO_LIT,
tableInfo,
numCols,
colInfoArray,
0 /*numKeys*/,
NULL /*keyInfoArray*/,
0, NULL,
objUID /*returns generated UID*/,
(NOT registerHiveObject)))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTgtTableName);
return -1;
}
cliRC = updateObjectValidDef(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, COM_YES_LIT);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTgtTableName);
return -1;
}
// remove cached definition - this code exists in other create stmte,
// is it required?
CorrName cnTgt(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cnTgt,
ComQiScope::REMOVE_MINE_ONLY,
COM_BASE_TABLE_OBJECT,
createTableNode->ddlXns(), FALSE);
return 0;
}
short CmpSeabaseDDL::genPKeyName(StmtDDLAddConstraintPK *addPKNode,
const char * catName,
const char * schName,
const char * objName,
NAString &pkeyName)
{
ComObjectName tableName( (addPKNode ? addPKNode->getTableName() : " "), COM_TABLE_NAME);
ElemDDLConstraintPK *constraintNode =
(addPKNode ? (addPKNode->getConstraint())->castToElemDDLConstraintPK() : NULL);
ComString specifiedConstraint;
ComString constraintName;
if( !constraintNode || (constraintNode->getConstraintName().isNull()))
{
specifiedConstraint.append( catName);
specifiedConstraint.append(".");
specifiedConstraint.append("\"");
specifiedConstraint.append( schName);
specifiedConstraint.append("\"");
specifiedConstraint.append(".");
ComString oName = "\"";
oName += objName;
oName += "\"";
Lng32 status = ToInternalIdentifier ( oName // in/out - from external- to internal-format
, TRUE // in - NABoolean upCaseInternalNameIfRegularIdent
, TRUE // in - NABoolean acceptCircumflex
);
ComDeriveRandomInternalName ( ComGetNameInterfaceCharSet()
, /*internalFormat*/oName // in - const ComString &
, /*internalFormat*/constraintName // out - ComString &
, STMTHEAP
);
// Generate a delimited identifier if objectName was delimited
constraintName/*InExternalFormat*/ = ToAnsiIdentifier (constraintName/*InInternalFormat*/);
specifiedConstraint.append(constraintName);
}
else
{
specifiedConstraint = constraintNode->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
}
pkeyName = specifiedConstraint;
return 0;
}
short CmpSeabaseDDL::updatePKeyInfo(
StmtDDLAddConstraintPK *addPKNode,
const char * catName,
const char * schName,
const char * objName,
const Int32 ownerID,
const Int32 schemaOwnerID,
Lng32 numKeys,
Int64 * outPkeyUID,
Int64 * outTableUID,
const ComTdbVirtTableKeyInfo * keyInfoArray,
ExeCliInterface *cliInterface)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char buf[4000];
// update primary key constraint info
NAString pkeyStr;
if (genPKeyName(addPKNode, catName, schName, objName, pkeyStr))
{
return -1;
}
Int64 createTime = NA_JulianTimestamp();
ComUID comUID;
comUID.make_UID();
Int64 pkeyUID = comUID.get_value();
if (outPkeyUID)
*outPkeyUID = pkeyUID;
ComObjectName pkeyName(pkeyStr);
const NAString catalogNamePart = pkeyName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = pkeyName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = pkeyName.getObjectNamePartAsAnsiString(TRUE);
NAString quotedSchName;
ToQuotedString(quotedSchName, NAString(schemaNamePart), FALSE);
NAString quotedObjName;
ToQuotedString(quotedObjName, NAString(objectNamePart), FALSE);
str_sprintf(buf, "insert into %s.\"%s\".%s values ('%s', '%s', '%s', '%s', %ld, %ld, %ld, '%s', '%s', %d, %d, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catalogNamePart.data(), quotedSchName.data(), quotedObjName.data(),
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT,
pkeyUID,
createTime,
createTime,
" ",
COM_NO_LIT,
ownerID,
schemaOwnerID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
*CmpCommon::diags() << DgSqlCode(-1423)
<< DgString0(SEABASE_OBJECTS);
return -1;
}
Int64 tableUID =
getObjectUID(cliInterface,
catName, schName, objName,
COM_BASE_TABLE_OBJECT_LIT);
if (outTableUID)
*outTableUID = tableUID;
Int64 validatedTime = NA_JulianTimestamp();
Int64 flags = 0;
// if pkey is specified to be not_serialized, then set it.
// if this is an hbase mapped table and pkey serialization is not specified,
// then set to not_serialized.
NABoolean notSerializedPK = FALSE;
if ((addPKNode->getAlterTableAction()->castToElemDDLConstraintPK()->ser() ==
ComPkeySerialization::COM_NOT_SERIALIZED) ||
(ComIsHbaseMappedSchemaName(schName) &&
(addPKNode->getAlterTableAction()->castToElemDDLConstraintPK()->ser() ==
ComPkeySerialization::COM_SER_NOT_SPECIFIED)))
notSerializedPK = TRUE;
if (notSerializedPK)
{
CmpSeabaseDDL::setMDflags
(flags, CmpSeabaseDDL::MD_TABLE_CONSTRAINTS_PKEY_NOT_SERIALIZED_FLG);
}
Int64 indexUID = 0;
str_sprintf(buf, "insert into %s.\"%s\".%s values (%ld, %ld, '%s', '%s', '%s', '%s', '%s', '%s', %ld, %d, %ld, %ld )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
tableUID, pkeyUID,
COM_PRIMARY_KEY_CONSTRAINT_LIT,
COM_NO_LIT,
COM_NO_LIT,
COM_NO_LIT,
COM_YES_LIT,
COM_YES_LIT,
validatedTime,
numKeys,
indexUID,
flags);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
*CmpCommon::diags() << DgSqlCode(-1423)
<< DgString0(SEABASE_TABLE_CONSTRAINTS);
return -1;
}
if (keyInfoArray)
{
for (Lng32 i = 0; i < numKeys; i++)
{
str_sprintf(buf, "insert into %s.\"%s\".%s values (%ld, '%s', %d, %d, %d, %d, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
pkeyUID,
keyInfoArray[i].colName,
i+1,
keyInfoArray[i].tableColNum,
0,
0);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
*CmpCommon::diags() << DgSqlCode(-1423)
<< DgString0(SEABASE_KEYS);
return -1;
}
}
}
return 0;
}
// ----------------------------------------------------------------------------
// Method: getPKeyInfoForTable
//
// This method reads the metadata to get the primary key constraint name and UID
// for a table.
//
// Params:
// In: catName, schName, objName describing the table
// In: cliInterface - pointer to the cli handle
// Out: constrName and constrUID
//
// Returns 0 if found, -1 otherwise
// ComDiags is set up with error
// ----------------------------------------------------------------------------
short CmpSeabaseDDL::getPKeyInfoForTable (
const char *catName,
const char *schName,
const char *objName,
ExeCliInterface *cliInterface,
NAString &constrName,
Int64 &constrUID)
{
char query[4000];
constrUID = -1;
// get constraint info
str_sprintf(query, "select O.object_name, C.constraint_uid "
"from %s.\"%s\".%s O, %s.\"%s\".%s C "
"where O.object_uid = C.constraint_uid "
" and C.constraint_type = '%s' and C.table_uid = "
" (select object_uid from %s.\"%s\".%s "
" where catalog_name = '%s' "
" and schema_name = '%s' "
" and object_name = '%s')",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
COM_PRIMARY_KEY_CONSTRAINT_LIT,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catName, schName, objName);
Queue * constrInfoQueue = NULL;
Lng32 cliRC = cliInterface->fetchAllRows(constrInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return -1;
}
assert (constrInfoQueue->numEntries() == 1);
constrInfoQueue->position();
OutputInfo * vi = (OutputInfo*)constrInfoQueue->getNext();
char * pConstrName = (char*)vi->get(0);
constrName = pConstrName;
constrUID = *(Int64*)vi->get(1);
return 0;
}
short CmpSeabaseDDL::constraintErrorChecks(
ExeCliInterface * cliInterface,
StmtDDLAddConstraint *addConstrNode,
NATable * naTable,
ComConstraintType ct,
NAList<NAString> &keyColList)
{
const NAString &addConstrName = addConstrNode->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
// make sure that there is no other constraint on this table with this name.
NABoolean foundConstr = FALSE;
const CheckConstraintList &checkList = naTable->getCheckConstraints();
for (Int32 i = 0; i < checkList.entries(); i++)
{
CheckConstraint *checkConstr = (CheckConstraint*)checkList[i];
const NAString &tableConstrName =
checkConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (addConstrName == tableConstrName)
{
foundConstr = TRUE;
}
} // for
if (NOT foundConstr)
{
const AbstractRIConstraintList &ariList = naTable->getUniqueConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = (AbstractRIConstraint*)ariList[i];
const NAString &tableConstrName =
ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (addConstrName == tableConstrName)
{
foundConstr = TRUE;
}
} // for
}
if (NOT foundConstr)
{
const AbstractRIConstraintList &ariList = naTable->getRefConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = (AbstractRIConstraint*)ariList[i];
const NAString &tableConstrName =
ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (addConstrName == tableConstrName)
{
foundConstr = TRUE;
}
} // for
}
if (NOT foundConstr)
{
const NAString &constrCatName = addConstrNode->
getConstraintNameAsQualifiedName().getCatalogName();
const NAString &constrSchName = addConstrNode->
getConstraintNameAsQualifiedName().getSchemaName();
const NAString &constrObjName = addConstrNode->
getConstraintNameAsQualifiedName().getObjectName();
// check to see if this constraint was defined on some other table and
// exists in metadata
Lng32 retcode = existsInSeabaseMDTable(cliInterface,
constrCatName, constrSchName, constrObjName,
COM_UNKNOWN_OBJECT, FALSE, FALSE);
if (retcode == 1) // exists
{
foundConstr = TRUE;
}
}
if (foundConstr)
{
*CmpCommon::diags()
<< DgSqlCode(-1043)
<< DgConstraintName(addConstrName);
processReturn();
return -1;
}
if ((ct == COM_UNIQUE_CONSTRAINT) ||
(ct == COM_FOREIGN_KEY_CONSTRAINT) ||
(ct == COM_PRIMARY_KEY_CONSTRAINT))
{
const NAColumnArray & naColArray = naTable->getNAColumnArray();
// Now process each column defined in the parseColList to see if
// it exists in the table column list and it isn't a duplicate.
NABitVector seenIt;
NAString keyColNameStr;
for (CollIndex i = 0; i < keyColList.entries(); i++)
{
NAColumn * nac = naColArray.getColumn(keyColList[i]);
if (! nac)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName( ToAnsiIdentifier(keyColList[i]));
return -1;
}
if (nac->isSystemColumn())
{
*CmpCommon::diags() << DgSqlCode((ct == COM_FOREIGN_KEY_CONSTRAINT) ?
-CAT_SYSTEM_COL_NOT_ALLOWED_IN_RI_CNSTRNT :
-CAT_SYSTEM_COL_NOT_ALLOWED_IN_UNIQUE_CNSTRNT)
<< DgColumnName(ToAnsiIdentifier(keyColList[i]))
<< DgTableName(addConstrNode->getTableName());
return -1;
}
// If column is a LOB column , error
Lng32 datatype = nac->getType()->getFSDatatype();
if ((datatype == REC_BLOB) || (datatype == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COL_CANNOT_BE_INDEX_OR_KEY)
<< DgColumnName( ToAnsiIdentifier(keyColList[i]));
processReturn();
return -1;
}
Lng32 colNumber = nac->getPosition();
// If the column has already been found, return error
if( seenIt.contains(colNumber))
{
*CmpCommon::diags() << DgSqlCode(-CAT_REDUNDANT_COLUMN_REF_PK)
<< DgColumnName( ToAnsiIdentifier(keyColList[i]));
return -1;
}
seenIt.setBit(colNumber);
}
if (ct == COM_UNIQUE_CONSTRAINT)
{
// Compare the column list from parse tree to the unique and primary
// key constraints already defined for the table. The new unique
// constraint list must be distinct. The order of the existing constraint
// does not have to be in the same order as the new constraint.
//
if (naTable->getCorrespondingConstraint(keyColList,
TRUE, // unique constraint
NULL))
{
*CmpCommon::diags() << DgSqlCode(-CAT_DUPLICATE_UNIQUE_CONSTRAINT_ON_SAME_COL);
return -1;
}
}
}
return 0;
}
short CmpSeabaseDDL::genUniqueName(StmtDDLAddConstraint *addUniqueNode,
NAString &uniqueName)
{
ComObjectName tableName( addUniqueNode->getTableName(), COM_TABLE_NAME);
ElemDDLConstraint *constraintNode =
(addUniqueNode->getConstraint())->castToElemDDLConstraint();
ComString specifiedConstraint;
ComString constraintName;
if( constraintNode->getConstraintName().isNull() )
{
specifiedConstraint.append( tableName.getCatalogNamePartAsAnsiString() );
specifiedConstraint.append(".");
specifiedConstraint.append( tableName.getSchemaNamePartAsAnsiString() );
specifiedConstraint.append(".");
ComString oName = tableName.getObjectNamePartAsAnsiString() ;
Lng32 status = ToInternalIdentifier ( oName // in/out - from external- to internal-format
, TRUE // in - NABoolean upCaseInternalNameIfRegularIdent
, TRUE // in - NABoolean acceptCircumflex
);
ComDeriveRandomInternalName ( ComGetNameInterfaceCharSet()
, /*internalFormat*/oName // in - const ComString &
, /*internalFormat*/constraintName // out - ComString &
, STMTHEAP
);
// Generate a delimited identifier if objectName was delimited
constraintName/*InExternalFormat*/ = ToAnsiIdentifier (constraintName/*InInternalFormat*/);
specifiedConstraint.append(constraintName);
}
else
{
specifiedConstraint = constraintNode->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
}
uniqueName = specifiedConstraint;
return 0;
}
short CmpSeabaseDDL::updateConstraintMD(
NAList<NAString> &keyColList,
NAList<NAString> &keyColOrderList,
NAString &uniqueStr,
Int64 tableUID,
Int64 constrUID,
NATable * naTable,
ComConstraintType ct,
NABoolean enforced,
ExeCliInterface *cliInterface)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char buf[4000];
const NAColumnArray & naColArray = naTable->getNAColumnArray();
Int64 createTime = NA_JulianTimestamp();
ComObjectName uniqueName(uniqueStr);
const NAString catalogNamePart = uniqueName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = uniqueName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = uniqueName.getObjectNamePartAsAnsiString(TRUE);
NAString quotedSchName;
ToQuotedString(quotedSchName, NAString(schemaNamePart), FALSE);
NAString quotedObjName;
ToQuotedString(quotedObjName, NAString(objectNamePart), FALSE);
str_sprintf(buf, "insert into %s.\"%s\".%s values ('%s', '%s', '%s', '%s', %ld, %ld, %ld, '%s', '%s', %d, %d, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catalogNamePart.data(), quotedSchName.data(), quotedObjName.data(),
((ct == COM_UNIQUE_CONSTRAINT) ? COM_UNIQUE_CONSTRAINT_OBJECT_LIT :
((ct == COM_FOREIGN_KEY_CONSTRAINT) ? COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT : COM_CHECK_CONSTRAINT_OBJECT_LIT)),
constrUID,
createTime,
createTime,
" ",
COM_NO_LIT,
naTable->getOwner(),
naTable->getSchemaOwner());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
Int64 indexUID = 0;
str_sprintf(buf, "insert into %s.\"%s\".%s values (%ld, %ld, '%s', '%s', '%s', '%s', '%s', '%s', %ld, %d, %ld, 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
tableUID, constrUID,
((ct == COM_UNIQUE_CONSTRAINT) ? COM_UNIQUE_CONSTRAINT_LIT :
((ct == COM_FOREIGN_KEY_CONSTRAINT) ? COM_FOREIGN_KEY_CONSTRAINT_LIT : COM_CHECK_CONSTRAINT_LIT)),
COM_NO_LIT,
COM_NO_LIT,
COM_NO_LIT,
(enforced ? COM_YES_LIT : COM_NO_LIT),
COM_YES_LIT,
createTime,
keyColList.entries(),
indexUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
for (Lng32 i = 0; i < keyColList.entries(); i++)
{
NAColumn * nac = naColArray.getColumn(keyColList[i]);
Lng32 colNumber = nac->getPosition();
str_sprintf(buf, "insert into %s.\"%s\".%s values (%ld, '%s', %d, %d, %d, %d, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
constrUID,
keyColList[i].data(),
i+1,
colNumber,
(keyColOrderList[i] == "DESC" ? 1 : 0),
0);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
}
return 0;
}
short CmpSeabaseDDL::updateRIConstraintMD(
Int64 ringConstrUID,
Int64 refdConstrUID,
ExeCliInterface *cliInterface)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char buf[4000];
str_sprintf(buf, "insert into %s.\"%s\".%s values (%ld, %ld, '%s', '%s', '%s', 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_REF_CONSTRAINTS,
ringConstrUID, refdConstrUID,
COM_FULL_MATCH_OPTION_LIT,
COM_RESTRICT_UPDATE_RULE_LIT,
COM_RESTRICT_DELETE_RULE_LIT);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
str_sprintf(buf, "insert into %s.\"%s\".%s values (%ld, %ld, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_UNIQUE_REF_CONSTR_USAGE,
refdConstrUID, ringConstrUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
return 0;
}
short CmpSeabaseDDL::updateIndexInfo(
NAList<NAString> &ringKeyColList,
NAList<NAString> &ringKeyColOrderList,
NAList<NAString> &refdKeyColList,
NAString &uniqueStr,
Int64 constrUID,
const char * catName,
const char * schName,
const char * objName,
NATable * naTable,
NABoolean isUnique, // TRUE: uniq constr. FALSE: ref constr.
NABoolean noPopulate,
NABoolean isEnforced,
NABoolean sameSequenceOfCols,
ExeCliInterface *cliInterface)
{
// Now we need to determine if an index has to be created for
// the unique or ref constraint.
NABoolean createIndex = TRUE;
NAString existingIndexName;
if (naTable->getCorrespondingIndex(ringKeyColList,
TRUE, // explicit index only
isUnique, //TRUE, look for unique index.
TRUE, //isUnique, //TRUE, look for primary key.
(NOT isUnique), // TRUE, look for any index or pkey
TRUE, // exclude system computed cols like salt, division
sameSequenceOfCols,
&existingIndexName))
createIndex = FALSE;
// if constraint is not to be enforced, then do not create an index.
if (createIndex && (NOT isEnforced))
return 0;
ComObjectName indexName(createIndex ? uniqueStr : existingIndexName);
const NAString catalogNamePart = indexName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = indexName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = indexName.getObjectNamePartAsAnsiString(TRUE);
NAString quotedSchName;
ToQuotedString(quotedSchName, NAString(schemaNamePart), FALSE);
NAString quotedObjName;
ToQuotedString(quotedObjName, NAString(objectNamePart), FALSE);
char buf[5000];
Lng32 cliRC;
Int64 tableUID = naTable->objectUid().castToInt64();
if (createIndex)
{
NAString keyColNameStr;
for (CollIndex i = 0; i < ringKeyColList.entries(); i++)
{
keyColNameStr += "\"";
keyColNameStr += ringKeyColList[i];
keyColNameStr += "\" ";
keyColNameStr += ringKeyColOrderList[i];
if (i+1 < ringKeyColList.entries())
keyColNameStr += ", ";
}
char noPopStr[100];
if (noPopulate)
strcpy(noPopStr, " no populate ");
else
strcpy(noPopStr, " ");
if (isUnique)
str_sprintf(buf, "create unique index \"%s\" on \"%s\".\"%s\".\"%s\" ( %s ) %s",
quotedObjName.data(),
catName, schName, objName,
keyColNameStr.data(),
noPopStr);
else
str_sprintf(buf, "create index \"%s\" on \"%s\".\"%s\".\"%s\" ( %s ) %s",
quotedObjName.data(),
catName, schName, objName,
keyColNameStr.data(),
noPopStr);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// update indexes table and mark this index as an implicit index.
str_sprintf(buf, "update %s.\"%s\".%s set is_explicit = 0 where base_table_uid = %ld and index_uid = (select object_uid from %s.\"%s\".%s where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = 'IX') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_INDEXES,
tableUID,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catName, schemaNamePart.data(), objectNamePart.data());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
if (noPopulate)
{
if (updateObjectValidDef(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_INDEX_OBJECT_LIT,
COM_YES_LIT))
{
return -1;
}
}
}
// update table_constraints table with the uid of this index.
Int64 indexUID =
getObjectUID(cliInterface,
catName, schemaNamePart, objectNamePart,
COM_INDEX_OBJECT_LIT);
if (indexUID < 0)
{
// primary key. Clear diags area since getObjectUID sets up diags entry.
CmpCommon::diags()->clear();
}
str_sprintf(buf, "update %s.\"%s\".%s set index_uid = %ld where table_uid = %ld and constraint_uid = %ld and constraint_type = '%s'",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
indexUID,
tableUID, constrUID,
(isUnique ? COM_UNIQUE_CONSTRAINT_LIT : COM_FOREIGN_KEY_CONSTRAINT_LIT));
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
return 0;
}
short CmpSeabaseDDL::setupAndErrorChecks
(NAString &tabName, QualifiedName &origTableName,
NAString &currCatName, NAString &currSchName,
NAString &catalogNamePart, NAString &schemaNamePart, NAString &objectNamePart,
NAString &extTableName, NAString &extNameForHbase,
CorrName &cn,
NATable* *naTable,
NABoolean volTabSupported,
NABoolean hbaseMapSupported,
ExeCliInterface *cliInterface,
const ComObjectType objectType,
SQLOperation operation,
NABoolean isExternal)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
NABoolean schNameSpecified = (NOT origTableName.getSchemaName().isNull());
if (isExternal)
{
// Convert the native name to its Trafodion form
tabName = ComConvertNativeNameToTrafName
(tableName.getCatalogNamePartAsAnsiString(),
tableName.getSchemaNamePartAsAnsiString(),
tableName.getObjectNamePartAsAnsiString());
ComObjectName adjustedName(tabName, COM_TABLE_NAME);
tableName = adjustedName;
}
catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
extTableName = tableName.getExternalName(TRUE);
extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return -1;
}
retcode = lookForTableInMD(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
schNameSpecified, FALSE,
tableName, tabName, extTableName,
objectType);
if (retcode < 0)
{
processReturn();
return -1;
}
if (retcode == 0) // doesn't exist
{
if (objectType == COM_BASE_TABLE_OBJECT)
*CmpCommon::diags() << DgSqlCode(-1127)
<< DgTableName(extTableName);
else
*CmpCommon::diags() << DgSqlCode(-1389)
<< DgString0(extTableName);
processReturn();
return -1;
}
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
if (naTable)
{
ActiveSchemaDB()->getNATableDB()->useCache();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
cn = CorrName(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
*naTable = bindWA.getNATableInternal(cn);
if (*naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return -1;
}
objectOwnerID = (*naTable)->getOwner();
schemaOwnerID = (*naTable)->getSchemaOwner();
// Make sure user has the privilege to perform the alter column
if (!isDDLOperationAuthorized(operation,
objectOwnerID, schemaOwnerID))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return -1;
}
// return an error if trying to alter a column from a volatile table
if ((NOT volTabSupported) && (naTable && (*naTable)->isVolatileTable()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_REGULAR_OPERATION_ON_VOLATILE_OBJECT);
processReturn ();
return -1;
}
if ((NOT hbaseMapSupported) && (naTable && (*naTable)->isHbaseMapTable()))
{
// not supported
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("This feature not available for an HBase mapped table.");
processReturn();
return -1;
}
}
return 0;
}
static void resetHbaseSerialization(NABoolean hbaseSerialization,
NAString &hbVal)
{
if (hbaseSerialization)
{
ActiveSchemaDB()->getDefaults().validateAndInsert
("hbase_serialization", hbVal, FALSE);
}
}
// RETURN: -1, no need to cleanup. -2, caller need to call cleanup
// 0, all ok.
short CmpSeabaseDDL::createSeabaseTable2(
ExeCliInterface &cliInterface,
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName,
NABoolean isCompound,
Int64 &outObjUID)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(createTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
// external table to be mapped to an existing hbase table
NABoolean hbaseMapFormat = FALSE;
// format of data in hbase mapped table: native or string.
// See ComRowFormat in common/ComSmallDefs.h for details.
NABoolean hbaseMappedDataFormatIsString = FALSE;
// Make some additional checks if creating an external hive table
ComObjectName *srcTableName = NULL;
if ((createTableNode->isExternal()) &&
(NOT createTableNode->mapToHbaseTable()))
{
// The schema name of the target table, if specified, must match the
// schema name of the source table
NAString origSchemaName =
createTableNode->getOrigTableNameAsQualifiedName().getSchemaName();
srcTableName = new(STMTHEAP) ComObjectName
(createTableNode->getLikeSourceTableName(), COM_TABLE_NAME);
srcTableName->applyDefaults(currCatAnsiName, currSchAnsiName);
if (srcTableName->getCatalogNamePartAsAnsiString() == HBASE_SYSTEM_CATALOG)
{
*CmpCommon::diags()
<< DgSqlCode(-3242)
<< DgString0("Cannot create external table on a native HBase table without the MAP TO option.");
return -1;
}
// Convert the native table name to its trafodion name
NAString tabName = ComConvertNativeNameToTrafName
(srcTableName->getCatalogNamePartAsAnsiString(),
srcTableName->getSchemaNamePartAsAnsiString(),
tableName.getObjectNamePartAsAnsiString());
ComObjectName adjustedName(tabName, COM_TABLE_NAME);
NAString type = "HIVE";
tableName = adjustedName;
// Verify that the name with prepending is not too long
if (tableName.getSchemaNamePartAsAnsiString(TRUE).length() >
ComMAX_ANSI_IDENTIFIER_INTERNAL_LEN)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_SCHEMA_NAME_TOO_LONG)
<< DgString0(type.data())
<< DgTableName(tableName.getSchemaNamePartAsAnsiString(FALSE))
<< DgInt0(ComMAX_ANSI_IDENTIFIER_INTERNAL_LEN - sizeof(HIVE_EXT_SCHEMA_PREFIX));
return -1;
}
if ((origSchemaName.length() > 0)&&
(origSchemaName != srcTableName->getSchemaNamePart().getExternalName()))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_NAME_MISMATCH)
<< DgString0 (type.data())
<< DgTableName(origSchemaName)
<< DgString1((srcTableName->getSchemaNamePart().getExternalName()));
return -1;
}
// For now the object name of the target table must match the
// object name of the source table
if (tableName.getObjectNamePart().getExternalName() !=
srcTableName->getObjectNamePart().getExternalName())
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_NAME_MISMATCH)
<< DgString0 (type.data())
<< DgTableName(tableName.getObjectNamePart().getExternalName())
<< DgString1((srcTableName->getObjectNamePart().getExternalName()));
return -1;
}
} // external hive table
// Make some additional checks if creating an external hbase mapped table
if ((createTableNode->isExternal()) &&
(createTableNode->mapToHbaseTable()))
{
if (CmpCommon::getDefault(TRAF_HBASE_MAPPED_TABLES) == DF_OFF)
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("HBase mapped tables not supported.");
return -1;
}
srcTableName = new(STMTHEAP) ComObjectName
(createTableNode->getLikeSourceTableName(), COM_TABLE_NAME);
ComAnsiNamePart hbCat(HBASE_SYSTEM_CATALOG);
ComAnsiNamePart hbSch(HBASE_SYSTEM_SCHEMA);
srcTableName->applyDefaults(hbCat, hbSch);
hbaseMapFormat = TRUE;
hbaseMappedDataFormatIsString =
createTableNode->isHbaseDataFormatString();
ComAnsiNamePart trafCat(TRAFODION_SYSCAT_LIT);
ComAnsiNamePart trafSch(NAString(HBASE_EXT_MAP_SCHEMA),
ComAnsiNamePart::INTERNAL_FORMAT);
tableName.setCatalogNamePart(trafCat);
tableName.setSchemaNamePart(trafSch);
// For now the object name of the target table must match the
// object name of the source table
if (tableName.getObjectNamePart().getExternalName() !=
srcTableName->getObjectNamePart().getExternalName())
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_NAME_MISMATCH)
<< DgString0 ("HBASE")
<< DgTableName(tableName.getObjectNamePart().getExternalName())
<< DgString1((srcTableName->getObjectNamePart().getExternalName()));
return -1;
}
} // external hbase mapped table
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase =
(hbaseMapFormat ? "" :
catalogNamePart + "." + schemaNamePart + "." + objectNamePart);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return -1;
}
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CREATE_TABLE_NOT_ALLOWED_IN_SMD)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_UNKNOWN_OBJECT, FALSE);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (retcode == 1) // already exists
{
if (NOT createTableNode->createIfNotExists())
{
if (createTableNode->isVolatile())
*CmpCommon::diags() << DgSqlCode(-1390)
<< DgString0(objectNamePart);
else
*CmpCommon::diags() << DgSqlCode(-1390)
<< DgString0(extTableName);
}
deallocEHI(ehi);
processReturn();
return -1;
}
// If creating an external table, go perform operation
if (createTableNode->isExternal())
{
retcode = createSeabaseTableExternal
(cliInterface, createTableNode, tableName, *srcTableName);
if (retcode != 0 && CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("creating external HIVE table");
if (NOT hbaseMapFormat)
{
deallocEHI(ehi);
processReturn();
return retcode;
}
}
// make sure the table to be mapped exists
if (hbaseMapFormat)
{
HbaseStr hbaseTable;
hbaseTable.val = (char*)objectNamePart.data();
hbaseTable.len = objectNamePart.length();
if (ehi->exists(hbaseTable) == 0) // does not exist in hbase
{
*CmpCommon::diags() << DgSqlCode(-4260)
<< DgString0(objectNamePart);
deallocEHI(ehi);
processReturn();
return -1;
}
}
ElemDDLColDefArray &colArray = createTableNode->getColDefArray();
ElemDDLColRefArray &keyArray =
(createTableNode->getIsConstraintPKSpecified() ?
createTableNode->getPrimaryKeyColRefArray() :
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION ?
createTableNode->getKeyColumnArray() :
createTableNode->getPrimaryKeyColRefArray()));
if ((NOT ((createTableNode->isExternal()) &&
(createTableNode->mapToHbaseTable()))) &&
((createTableNode->getIsConstraintPKSpecified()) &&
(createTableNode->getAddConstraintPK()) &&
(createTableNode->getAddConstraintPK()->getAlterTableAction()->castToElemDDLConstraintPK()->notSerialized())))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("NOT SERIALIZED option cannot be specified for primary key of this table.");
return -1;
}
Int32 objectOwnerID = SUPER_USER;
Int32 schemaOwnerID = SUPER_USER;
ComSchemaClass schemaClass;
retcode = verifyDDLCreateOperationAuthorized(&cliInterface,
SQLOperation::CREATE_TABLE,
catalogNamePart,
schemaNamePart,
schemaClass,
objectOwnerID,
schemaOwnerID);
if (retcode != 0)
{
handleDDLCreateAuthorizationError(retcode,catalogNamePart,schemaNamePart);
deallocEHI(ehi);
processReturn();
return -1;
}
// If the schema name specified is external HIVE or HBase name, users cannot
// create them.
if (ComIsTrafodionExternalSchemaName(schemaNamePart) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) &&
(NOT hbaseMapFormat))
{
// error.
*SqlParser_Diags << DgSqlCode(-CAT_CREATE_TABLE_NOT_ALLOWED_IN_SMD)
<< DgTableName(extTableName.data());
return -1;
}
if (createTableNode->getIsLikeOptionSpecified())
{
createSeabaseTableLike(&cliInterface,
createTableNode, currCatName, currSchName);
deallocEHI(ehi);
processReturn();
return -1;
}
// For shared schemas, histogram tables should be owned by the schema owner,
// not the first user to run UPDATE STATISTICS in the schema.
if (schemaClass == COM_SCHEMA_CLASS_SHARED && isHistogramTable(objectNamePart))
objectOwnerID = schemaOwnerID;
// check if SYSKEY is specified as a column name.
for (Lng32 i = 0; i < colArray.entries(); i++)
{
if ((CmpCommon::getDefault(TRAF_ALLOW_RESERVED_COLNAMES) == DF_OFF) &&
(ComTrafReservedColName(colArray[i]->getColumnName())))
{
*CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_COLUMN_NAME)
<< DgString0(colArray[i]->getColumnName());
deallocEHI(ehi);
processReturn();
return -1;
}
}
NABoolean implicitPK = FALSE;
NAString syskeyColName("SYSKEY");
SQLLargeInt * syskeyType = new(STMTHEAP) SQLLargeInt(STMTHEAP, TRUE, FALSE);
ElemDDLColDef syskeyColDef(NULL, &syskeyColName, syskeyType, NULL,
STMTHEAP);
ElemDDLColRef edcr("SYSKEY", COM_ASCENDING_ORDER);
syskeyColDef.setColumnClass(COM_SYSTEM_COLUMN);
NAString hbRowIdColName("ROW_ID");
SQLVarChar * hbRowIdType = new(STMTHEAP) SQLVarChar(STMTHEAP, 1000, FALSE);
ElemDDLColDef hbRowIdColDef(NULL, &hbRowIdColName, hbRowIdType, NULL,
STMTHEAP);
ElemDDLColRef hbcr("ROW_ID", COM_ASCENDING_ORDER);
hbRowIdColDef.setColumnClass(COM_SYSTEM_COLUMN);
CollIndex numSysCols = 0;
CollIndex numSaltCols = 0;
CollIndex numDivCols = 0;
if (((createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION) &&
(NOT createTableNode->getIsConstraintPKSpecified())) ||
(keyArray.entries() == 0))
{
if (hbaseMapFormat)
{
// for now, return error if pkey is not specified.
*CmpCommon::diags() << DgSqlCode(-4259);
deallocEHI(ehi);
processReturn();
return -1;
colArray.insertAt(0, &hbRowIdColDef);
keyArray.insert(&hbcr);
}
else
{
colArray.insertAt(0, &syskeyColDef);
keyArray.insert(&edcr);
}
implicitPK = TRUE;
numSysCols++;
}
int numSaltPartns = 0; // # of "_SALT_" values
int numSplits = 0; // # of initial region splits
Lng32 numSaltPartnsFromCQD =
CmpCommon::getDefaultNumeric(TRAF_NUM_OF_SALT_PARTNS);
if ((createTableNode->getSaltOptions()) ||
((numSaltPartnsFromCQD > 0) &&
(NOT implicitPK)))
{
if (hbaseMapFormat)
{
// salt option not supported on hbase map table
*CmpCommon::diags() << DgSqlCode(-4259);
deallocEHI(ehi);
processReturn();
return -1;
}
// add a system column SALT INTEGER NOT NULL with a computed
// default value HASH2PARTFUNC(<salting cols> FOR <num salt partitions>)
ElemDDLSaltOptionsClause * saltOptions = createTableNode->getSaltOptions();
ElemDDLColRefArray *saltArray = createTableNode->getSaltColRefArray();
NAString saltExprText("HASH2PARTFUNC(");
NABoolean firstSaltCol = TRUE;
char numSaltPartnsStr[20];
if (saltArray == NULL || saltArray->entries() == 0)
{
// if no salting columns are specified, use all key columns
saltArray = &keyArray;
}
else
{
// Validate that salting columns refer to real key columns
for (CollIndex s=0; s<saltArray->entries(); s++)
if (keyArray.getColumnIndex((*saltArray)[s]->getColumnName()) < 0)
{
*CmpCommon::diags() << DgSqlCode(-1195)
<< DgColumnName((*saltArray)[s]->getColumnName());
deallocEHI(ehi);
processReturn();
return -1;
}
}
for (CollIndex i=0; i<saltArray->entries(); i++)
{
const NAString &colName = (*saltArray)[i]->getColumnName();
ComAnsiNamePart cnp(colName, ComAnsiNamePart::INTERNAL_FORMAT);
Lng32 colIx = colArray.getColumnIndex(colName);
if (colIx < 0)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName(colName);
deallocEHI(ehi);
processReturn();
return -1;
}
NAType *colType = colArray[colIx]->getColumnDataType();
NAString typeText;
short rc = colType->getMyTypeAsText(&typeText, FALSE);
// don't include SYSKEY in the list of salt columns
if (colName != "SYSKEY")
{
if (firstSaltCol)
firstSaltCol = FALSE;
else
saltExprText += ",";
saltExprText += "CAST(";
if (NOT cnp.isDelimitedIdentifier())
saltExprText += "\"";
saltExprText += cnp.getExternalName();
if (NOT cnp.isDelimitedIdentifier())
saltExprText += "\"";
saltExprText += " AS ";
saltExprText += typeText;
if (!colType->supportsSQLnull())
saltExprText += " NOT NULL";
saltExprText += ")";
if (colType->getTypeQualifier() == NA_NUMERIC_TYPE &&
!(((NumericType *) colType)->isExact()))
{
*CmpCommon::diags() << DgSqlCode(-1120);
deallocEHI(ehi);
processReturn();
return -1;
}
}
else if (saltArray != &keyArray || saltArray->entries() == 1)
{
// SYSKEY was explicitly specified in salt column or is the only column,
// this is an error
*CmpCommon::diags() << DgSqlCode(-1195)
<< DgColumnName((*saltArray)[i]->getColumnName());
deallocEHI(ehi);
processReturn();
return -1;
}
}
numSaltPartns =
(saltOptions ? saltOptions->getNumPartitions() : numSaltPartnsFromCQD);
saltExprText += " FOR ";
sprintf(numSaltPartnsStr,"%d", numSaltPartns);
saltExprText += numSaltPartnsStr;
saltExprText += ")";
if (numSaltPartns <= 1 || numSaltPartns > 1024)
{
// number of salt partitions is out of bounds
*CmpCommon::diags() << DgSqlCode(-CAT_INVALID_NUM_OF_SALT_PARTNS)
<< DgInt0(2)
<< DgInt1(1024);
deallocEHI(ehi);
processReturn();
return -1;
}
NAString saltColName(ElemDDLSaltOptionsClause::getSaltSysColName());
SQLInt * saltType = new(STMTHEAP) SQLInt(STMTHEAP, FALSE, FALSE);
ElemDDLColDefault *saltDef =
new(STMTHEAP) ElemDDLColDefault(
ElemDDLColDefault::COL_COMPUTED_DEFAULT);
saltDef->setComputedDefaultExpr(saltExprText);
ElemDDLColDef * saltColDef =
new(STMTHEAP) ElemDDLColDef(NULL, &saltColName, saltType, saltDef,
STMTHEAP);
ElemDDLColRef * edcrs =
new(STMTHEAP) ElemDDLColRef(saltColName, COM_ASCENDING_ORDER);
saltColDef->setColumnClass(COM_SYSTEM_COLUMN);
// add this new salt column at the end
// and also as key column 0
colArray.insert(saltColDef);
keyArray.insertAt(0, edcrs);
numSysCols++;
numSaltCols++;
numSplits = numSaltPartns - 1;
}
// is hbase data stored in varchar format
if (hbaseMapFormat && hbaseMappedDataFormatIsString)
{
// cannot specify serialized primary key
if (createTableNode->getAddConstraintPK()->getAlterTableAction()->
castToElemDDLConstraintPK()->serialized())
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("SERIALIZED option cannot be specified for primary key of this table.");
return -1;
}
// must have only one varchar primary key col
if (keyArray.entries() > 1)
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Only one column can be specified as the primary key of this table.");
return -1;
}
Lng32 tableColNum =
(Lng32)colArray.getColumnIndex(keyArray[0]->getColumnName());
NAType *colType = colArray[tableColNum]->getColumnDataType();
if (NOT DFS2REC::isAnyVarChar(colType->getFSDatatype()))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Primary key column must be specified as varchar datatype for this table.");
return -1;
}
}
// create table in seabase
ParDDLFileAttrsCreateTable &fileAttribs =
createTableNode->getFileAttributes();
NABoolean alignedFormat = FALSE;
if (fileAttribs.isRowFormatSpecified() == TRUE)
{
if (fileAttribs.getRowFormat() == ElemDDLFileAttrRowFormat::eALIGNED)
{
alignedFormat = TRUE;
}
}
else if(CmpCommon::getDefault(TRAF_ALIGNED_ROW_FORMAT) == DF_ON)
{
if ( NOT isSeabaseReservedSchema(tableName))
{
// aligned format default does not apply to hbase map tables
if (NOT hbaseMapFormat)
alignedFormat = TRUE;
}
}
if (hbaseMapFormat && alignedFormat)
{
// not supported
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Aligned format cannot be specified for an HBase mapped table.");
deallocEHI(ehi);
processReturn();
return -1;
}
const NAString &defaultColFam = fileAttribs.getColFam();
// allow nullable clustering key or unique constraints based on the
// CQD settings. If a volatile table is being created and cqd
// VOLATILE_TABLE_FIND_SUITABLE_KEY is ON, then allow it.
// If ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT is set, then allow it.
NABoolean allowNullableUniqueConstr = FALSE;
if (((CmpCommon::getDefault(VOLATILE_TABLE_FIND_SUITABLE_KEY) != DF_OFF) &&
(createTableNode->isVolatile())) ||
(CmpCommon::getDefault(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT) == DF_ON))
allowNullableUniqueConstr = TRUE;
int numIterationsToCompleteColumnList = 1;
Lng32 numCols = 0;
Lng32 numKeys = 0;
ComTdbVirtTableColumnInfo * colInfoArray = NULL;
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
Lng32 identityColPos = -1;
std::vector<NAString> userColFamVec;
std::vector<NAString> trafColFamVec;
// if hbase map format, turn off global serialization default.
NABoolean hbaseSerialization = FALSE;
NAString hbVal;
if (hbaseMapFormat)
{
if (CmpCommon::getDefault(HBASE_SERIALIZATION) == DF_ON)
{
NAString value("OFF");
hbVal = "ON";
ActiveSchemaDB()->getDefaults().validateAndInsert(
"hbase_serialization", value, FALSE);
hbaseSerialization = TRUE;
}
}
// build colInfoArray and keyInfoArray, this may take two
// iterations if we need to add a divisioning column
for (int iter=0; iter < numIterationsToCompleteColumnList; iter++)
{
numCols = colArray.entries();
numKeys = keyArray.entries();
colInfoArray = new(STMTHEAP) ComTdbVirtTableColumnInfo[numCols];
keyInfoArray = new(STMTHEAP) ComTdbVirtTableKeyInfo[numKeys];
if (buildColInfoArray(COM_BASE_TABLE_OBJECT,
FALSE, // not a metadata, histogram or repository object
&colArray, colInfoArray, implicitPK,
alignedFormat, &identityColPos,
(hbaseMapFormat ? NULL : &userColFamVec),
&trafColFamVec,
defaultColFam.data()))
{
resetHbaseSerialization(hbaseSerialization, hbVal);
processReturn();
return -1;
}
if (buildKeyInfoArray(&colArray, NULL,
&keyArray, colInfoArray, keyInfoArray,
allowNullableUniqueConstr))
{
resetHbaseSerialization(hbaseSerialization, hbVal);
processReturn();
return -1;
}
if (iter == 0 && createTableNode->isDivisionClauseSpecified())
{
// We need the colArray to be able to bind the divisioning
// expression, check it and compute its type. Once we have the
// type, we will add a divisioning column of that type and
// also add that column to the key. Then we will need to go
// through this loop once more and create the updated colArray.
numIterationsToCompleteColumnList = 2;
NAColumnArray *naColArrayForBindingDivExpr = new(STMTHEAP) NAColumnArray(STMTHEAP);
NAColumnArray *keyColArrayForBindingDivExpr = new(STMTHEAP) NAColumnArray(STMTHEAP);
ItemExprList * divExpr = createTableNode->getDivisionExprList();
ElemDDLColRefArray *divColNamesFromDDL = createTableNode->getDivisionColRefArray();
CmpSeabaseDDL::convertColAndKeyInfoArrays(
numCols,
colInfoArray,
numKeys,
keyInfoArray,
naColArrayForBindingDivExpr,
keyColArrayForBindingDivExpr);
for (CollIndex d=0; d<divExpr->entries(); d++)
{
NABoolean exceptionOccurred = FALSE;
ComColumnOrdering divKeyOrdering = COM_ASCENDING_ORDER;
ItemExpr *boundDivExpr =
bindDivisionExprAtDDLTime((*divExpr)[d],
keyColArrayForBindingDivExpr,
STMTHEAP);
if (!boundDivExpr)
{
resetHbaseSerialization(hbaseSerialization, hbVal);
processReturn();
return -1;
}
if (boundDivExpr->getOperatorType() == ITM_INVERSE)
{
divKeyOrdering = COM_DESCENDING_ORDER;
boundDivExpr = boundDivExpr->child(0);
if (boundDivExpr->getOperatorType() == ITM_INVERSE)
{
// in rare cases we could have two inverse operators
// stacked on top of each other, indicating ascending
divKeyOrdering = COM_ASCENDING_ORDER;
boundDivExpr = boundDivExpr->child(0);
}
}
try
{
// put this into a try/catch block because it could throw
// an exception when type synthesis fails and that would leave
// the transaction begun by the DDL operation in limbo
boundDivExpr->synthTypeAndValueId();
}
catch (...)
{
// diags area should be set, if not, set it
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode(-4243)
<< DgString0("(expression with unknown type)");
exceptionOccurred = TRUE;
}
if (exceptionOccurred ||
boundDivExpr->getValueId() == NULL_VALUE_ID)
{
resetHbaseSerialization(hbaseSerialization, hbVal);
processReturn();
return -1;
}
if (validateDivisionByExprForDDL(boundDivExpr))
{
resetHbaseSerialization(hbaseSerialization, hbVal);
processReturn();
return -1;
}
// Add a divisioning column to the list of columns and the key
char buf[16];
snprintf(buf, sizeof(buf), "_DIVISION_%d_", d+1);
NAString divColName(buf);
// if the division column name was specified in the DDL, use that instead
if (divColNamesFromDDL && divColNamesFromDDL->entries() > d)
divColName = (*divColNamesFromDDL)[d]->getColumnName();
NAType * divColType =
boundDivExpr->getValueId().getType().newCopy(STMTHEAP);
ElemDDLColDefault *divColDefault =
new(STMTHEAP) ElemDDLColDefault(
ElemDDLColDefault::COL_COMPUTED_DEFAULT);
NAString divExprText;
boundDivExpr->unparse(divExprText, PARSER_PHASE, COMPUTED_COLUMN_FORMAT);
divColDefault->setComputedDefaultExpr(divExprText);
ElemDDLColDef * divColDef =
new(STMTHEAP) ElemDDLColDef(NULL, &divColName, divColType, divColDefault,
STMTHEAP);
ElemDDLColRef * edcrs =
new(STMTHEAP) ElemDDLColRef(divColName, divKeyOrdering);
divColDef->setColumnClass(COM_SYSTEM_COLUMN);
divColDef->setDivisionColumnFlag(TRUE);
divColDef->setDivisionColumnSequenceNumber(d);
// add this new divisioning column to the end of the row
// and also to the key, right after any existing salt and divisioning columns
colArray.insert(divColDef);
keyArray.insertAt(numSaltCols+numDivCols, edcrs);
numSysCols++;
numDivCols++;
}
}
} // iterate 1 or 2 times to get all columns, including divisioning columns
if (hbaseSerialization)
{
ActiveSchemaDB()->getDefaults().validateAndInsert
("hbase_serialization", hbVal, FALSE);
}
Int32 keyLength = 0;
for(CollIndex i = 0; i < keyArray.entries(); i++)
{
const NAString &colName = keyArray[i]->getColumnName();
Lng32 colIx = colArray.getColumnIndex(colName);
if (colIx < 0)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName(colName);
deallocEHI(ehi);
processReturn();
return -1;
}
NAType *colType = colArray[colIx]->getColumnDataType();
if (colType->getFSDatatype() == REC_BLOB || colType->getFSDatatype() == REC_CLOB)
//Cannot allow LOB in primary or clustering key
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COL_CANNOT_BE_INDEX_OR_KEY)
<< DgColumnName(colName);
deallocEHI(ehi);
processReturn();
return -1;
}
keyLength += colType->getEncodedKeyLength();
}
//check the key length
if(keyLength > MAX_HBASE_ROWKEY_LEN )
{
*CmpCommon::diags() << DgSqlCode(-CAT_ROWKEY_LEN_TOO_LARGE)
<< DgInt0(keyLength)
<< DgInt1(MAX_HBASE_ROWKEY_LEN);
deallocEHI(ehi);
processReturn();
return -1;
}
if (hbaseMapFormat)
{
for(CollIndex i = 0; i < colArray.entries(); i++)
{
const NAString colName = colInfoArray[i].colName;
Lng32 colIx = keyArray.getColumnIndex(colName);
if (colIx < 0) // not a key col
{
if (colInfoArray[i].defaultClass != COM_NULL_DEFAULT)
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Non-key columns of an HBase mapped table must be nullable with default value of NULL.");
deallocEHI(ehi);
processReturn();
return -1;
}
// must have a default value if not nullable
if (! colInfoArray[i].nullable)
{
if (colInfoArray[i].defaultClass == COM_NO_DEFAULT)
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Non-key non-nullable columns of an HBase mapped table must have a default value.");
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
} // for
}
char ** encodedKeysBuffer = NULL;
if (numSplits > 0) {
TrafDesc * colDescs =
convertVirtTableColumnInfoArrayToDescStructs(&tableName,
colInfoArray,
numCols) ;
TrafDesc * keyDescs =
convertVirtTableKeyInfoArrayToDescStructs(keyInfoArray,
colInfoArray,
numKeys) ;
if (createEncodedKeysBuffer(encodedKeysBuffer/*out*/,
numSplits /*out*/,
colDescs, keyDescs,
numSaltPartns,
numSplits,
NULL,
numKeys,
keyLength, FALSE))
{
processReturn();
return -1;
}
}
ComTdbVirtTableTableInfo * tableInfo = new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = NULL;
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = 0;
tableInfo->isAudited = (fileAttribs.getIsAudit() ? 1 : 0);
tableInfo->validDef = 1;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->objectFlags = 0;
tableInfo->tablesFlags = 0;
if (fileAttribs.isOwnerSpecified())
{
// Fixed bug: if BY CLAUSE specified an unregistered user, then the object
// owner is set to 0 in metadata. Once 0, the table could not be dropped.
NAString owner = fileAttribs.getOwner();
Int16 retcode = (ComUser::getUserIDFromUserName(owner.data(),objectOwnerID));
if (retcode == FENOTFOUND)
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR)
<< DgString0(owner.data());
processReturn();
return -1;
}
else if (retcode != FEOK)
{
*CmpCommon::diags() << DgSqlCode (-CAT_INTERNAL_EXCEPTION_ERROR)
<< DgString0(__FILE__)
<< DgInt0(__LINE__)
<< DgString1("verifying grantee");
processReturn();
return -1;
}
if (schemaClass == COM_SCHEMA_CLASS_PRIVATE &&
objectOwnerID != schemaOwnerID)
{
*CmpCommon::diags() << DgSqlCode(-CAT_BY_CLAUSE_IN_PRIVATE_SCHEMA);
deallocEHI(ehi);
processReturn();
return -1;
}
}
tableInfo->objOwnerID = objectOwnerID;
tableInfo->schemaOwnerID = schemaOwnerID;
tableInfo->numSaltPartns = (numSplits > 0 ? numSplits+1 : 0);
if (hbaseMapFormat && hbaseMappedDataFormatIsString)
tableInfo->rowFormat = COM_HBASE_STR_FORMAT_TYPE;
else if (alignedFormat)
tableInfo->rowFormat = COM_ALIGNED_FORMAT_TYPE;
else
tableInfo->rowFormat = COM_HBASE_FORMAT_TYPE;
NAList<HbaseCreateOption*> hbaseCreateOptions(STMTHEAP);
NAString hco;
short retVal = setupHbaseOptions(createTableNode->getHbaseOptionsClause(),
numSplits, extTableName,
hbaseCreateOptions, hco);
if (retVal)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (alignedFormat)
{
hco += "ROW_FORMAT=>ALIGNED ";
}
tableInfo->hbaseCreateOptions = (hco.isNull() ? NULL : hco.data());
tableInfo->defaultColFam = NULL;
tableInfo->allColFams = NULL;
Int64 objUID = -1;
if (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
const char* v = ActiveSchemaDB()->getDefaults().
getValue(TRAF_CREATE_TABLE_WITH_UID);
if ((v) and (strlen(v) > 0))
{
objUID = str_atoi(v, strlen(v));
}
}
if (updateSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
COM_NO_LIT,
tableInfo,
numCols,
colInfoArray,
numKeys,
keyInfoArray,
0, NULL,
objUID))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
outObjUID = objUID;
// update TEXT table with column families.
// Column families are stored separated by a blank space character.
NAString allColFams;
NABoolean addToTextTab = FALSE;
if (defaultColFam != SEABASE_DEFAULT_COL_FAMILY)
addToTextTab = TRUE;
else if (userColFamVec.size() > 1)
addToTextTab = TRUE;
else if ((userColFamVec.size() == 1) && (userColFamVec[0] != SEABASE_DEFAULT_COL_FAMILY))
addToTextTab = TRUE;
if (addToTextTab)
{
allColFams = defaultColFam + " ";
for (int i = 0; i < userColFamVec.size(); i++)
{
allColFams += userColFamVec[i];
allColFams += " ";
}
cliRC = updateTextTable(&cliInterface, objUID,
COM_HBASE_COL_FAMILY_TEXT, 0,
allColFams);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
if (createTableNode->getAddConstraintPK())
{
if (updatePKeyInfo(createTableNode->getAddConstraintPK(),
catalogNamePart, schemaNamePart, objectNamePart,
objectOwnerID,
schemaOwnerID,
keyArray.entries(),
NULL,
NULL,
keyInfoArray,
&cliInterface))
{
return -1;
}
}
if (identityColPos >= 0)
{
ElemDDLColDef *colDef = colArray[identityColPos];
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart, colDef->getColumnName(),
seqName);
if (colDef->getSGOptions())
{
colDef->getSGOptions()->setFSDataType((ComFSDataType)colDef->getColumnDataType()->getFSDatatype());
if (colDef->getSGOptions()->validate(2/*identity*/))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
SequenceGeneratorAttributes sga;
colDef->getSGOptions()->genSGA(sga);
NAString idOptions;
sga.display(NULL, &idOptions, TRUE);
char buf[4000];
str_sprintf(buf, "create internal sequence %s.\"%s\".\"%s\" %s",
catalogNamePart.data(), schemaNamePart.data(), seqName.data(),
idOptions.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -1;
}
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_MINE_ONLY, COM_BASE_TABLE_OBJECT,
createTableNode->ddlXns(), FALSE);
// update datatype for this sequence
str_sprintf(buf, "update %s.\"%s\".%s set fs_data_type = %d where seq_type = '%s' and seq_uid = (select object_uid from %s.\"%s\".\"%s\" where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = '%s') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_SEQ_GEN,
colDef->getColumnDataType()->getFSDatatype(),
COM_INTERNAL_SG_LIT,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catalogNamePart.data(), schemaNamePart.data(), seqName.data(),
COM_SEQUENCE_GENERATOR_OBJECT_LIT);
Int64 rowsAffected = 0;
cliRC = cliInterface.executeImmediate(buf, NULL, NULL, FALSE, &rowsAffected);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -1;
}
}
NABoolean ddlXns = createTableNode->ddlXns();
if (NOT extNameForHbase.isNull())
{
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
if (createHbaseTable(ehi, &hbaseTable, trafColFamVec,
&hbaseCreateOptions,
numSplits, keyLength,
encodedKeysBuffer,
FALSE, ddlXns
) == -1)
{
deallocEHI(ehi);
processReturn();
return -2;
}
}
// if this table has lob columns, create the lob files
short *lobNumList = new (STMTHEAP) short[numCols];
short *lobTypList = new (STMTHEAP) short[numCols];
char **lobLocList = new (STMTHEAP) char*[numCols];
char **lobColNameList = new (STMTHEAP) char*[numCols];
Lng32 j = 0;
Int64 lobMaxSize = CmpCommon::getDefaultNumeric(LOB_MAX_SIZE)*1024*1024;
for (Int32 i = 0; i < colArray.entries(); i++)
{
ElemDDLColDef *column = colArray[i];
Lng32 datatype = column->getColumnDataType()->getFSDatatype();
if ((datatype == REC_BLOB) ||
(datatype == REC_CLOB))
{
lobNumList[j] = i; //column->getColumnNumber();
lobTypList[j] = (short)(column->getLobStorage());
char * loc = new (STMTHEAP) char[1024];
const char* f = ActiveSchemaDB()->getDefaults().
getValue(LOB_STORAGE_FILE_DIR);
strcpy(loc, f);
lobLocList[j] = loc;
char *colname = new (STMTHEAP) char[256];
strcpy(colname,column->getColumnName());
lobColNameList[j] = colname;
j++;
}
}
char lobHdfsServer[256] ; // max length determined by dfs.namenode.fs-limits.max-component-length(255)
memset(lobHdfsServer,0,256);
strncpy(lobHdfsServer,CmpCommon::getDefaultString(LOB_HDFS_SERVER),sizeof(lobHdfsServer)-1);
Int32 lobHdfsPort = (Lng32)CmpCommon::getDefaultNumeric(LOB_HDFS_PORT);
if (j > 0)
{
Int32 rc = sendAllControls(FALSE, FALSE, TRUE);
//if the table is a volatile table return an error
if (createTableNode->isVolatile())
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_LOB_COLUMN_IN_VOLATILE_TABLE)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComString newSchName = "\"";
newSchName += catalogNamePart;
newSchName.append("\".\"");
newSchName.append(schemaNamePart);
newSchName += "\"";
NABoolean lobTrace=FALSE;
if (getenv("TRACE_LOB_ACTIONS"))
lobTrace=TRUE;
rc = SQL_EXEC_LOBddlInterface((char*)newSchName.data(),
newSchName.length(),
objUID,
j,
LOB_CLI_CREATE,
lobNumList,
lobTypList,
lobLocList,
lobColNameList,
lobHdfsServer,
lobHdfsPort,
lobMaxSize,
lobTrace);
if (rc < 0)
{
// retrieve the cli diags here.
CmpCommon::diags()->mergeAfter(*(GetCliGlobals()->currContext()->getDiagsArea()));
*CmpCommon::diags() << DgSqlCode(-CAT_CREATE_OBJECT_ERROR)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -2;
}
}
// if not a compound create, update valid def to true.
if (NOT ((createTableNode->getAddConstraintUniqueArray().entries() > 0) ||
(createTableNode->getAddConstraintRIArray().entries() > 0) ||
(createTableNode->getAddConstraintCheckArray().entries() > 0)))
{
cliRC = updateObjectValidDef(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, COM_YES_LIT);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -2;
}
}
if (NOT isCompound)
{
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_MINE_ONLY,
COM_BASE_TABLE_OBJECT,
createTableNode->ddlXns(), FALSE);
}
processReturn();
return 0;
label_error:
if (hbaseSerialization)
{
ActiveSchemaDB()->getDefaults().validateAndInsert
("hbase_serialization", hbVal, FALSE);
}
return -1;
} // createSeabaseTable2
void CmpSeabaseDDL::createSeabaseTable(
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName,
NABoolean isCompound,
Int64 *retObjUID)
{
NABoolean xnWasStartedHere = FALSE;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ComObjectName tableName(createTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return;
Int64 objUID = 0;
short rc =
createSeabaseTable2(cliInterface, createTableNode, currCatName, currSchName,
isCompound, objUID);
if ((CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) &&
(rc < 0))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
if (rc == -2) // cleanup before returning error..
{
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
createTableNode->ddlXns());
}
return;
}
if (retObjUID)
*retObjUID = objUID;
if (NOT isCompound)
{
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
processReturn();
return;
}
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
return;
}
void CmpSeabaseDDL::addConstraints(
ComObjectName &tableName,
ComAnsiNamePart &currCatAnsiName,
ComAnsiNamePart &currSchAnsiName,
StmtDDLNode * ddlNode,
StmtDDLAddConstraintPK * pkConstr,
StmtDDLAddConstraintUniqueArray &uniqueConstrArr,
StmtDDLAddConstraintRIArray &riConstrArr,
StmtDDLAddConstraintCheckArray &checkConstrArr,
NABoolean isCompound)
{
Lng32 cliRC = 0;
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
char buf[5000];
if (pkConstr)
{
StmtDDLAddConstraintUnique *uniqConstr = pkConstr;
NAString uniqueName;
genUniqueName(uniqConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
ElemDDLConstraintUnique *constraintNode =
( uniqConstr->getConstraint() )->castToElemDDLConstraintUnique();
ElemDDLColRefArray &keyColumnArray = constraintNode->getKeyColumnArray();
NAString keyColNameStr;
for (CollIndex i = 0; i < keyColumnArray.entries(); i++)
{
keyColNameStr += "\"";
keyColNameStr += keyColumnArray[i]->getColumnName();
keyColNameStr += "\"";
if (keyColumnArray[i]->getColumnOrdering() == COM_DESCENDING_ORDER)
keyColNameStr += "DESC";
else
keyColNameStr += "ASC";
if (i+1 < keyColumnArray.entries())
keyColNameStr += ", ";
}
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" unique (%s)",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
keyColNameStr.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_return;
}
}
if (uniqueConstrArr.entries() > 0)
{
for (Lng32 i = 0; i < uniqueConstrArr.entries(); i++)
{
StmtDDLAddConstraintUnique *uniqConstr =
uniqueConstrArr[i];
NAString uniqueName;
genUniqueName(uniqConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
ElemDDLConstraintUnique *constraintNode =
( uniqConstr->getConstraint() )->castToElemDDLConstraintUnique();
ElemDDLColRefArray &keyColumnArray = constraintNode->getKeyColumnArray();
NAString keyColNameStr;
for (CollIndex i = 0; i < keyColumnArray.entries(); i++)
{
keyColNameStr += "\"";
keyColNameStr += keyColumnArray[i]->getColumnName();
keyColNameStr += "\"";
if (keyColumnArray[i]->getColumnOrdering() == COM_DESCENDING_ORDER)
keyColNameStr += "DESC";
else
keyColNameStr += "ASC";
if (i+1 < keyColumnArray.entries())
keyColNameStr += ", ";
}
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" unique (%s)",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
keyColNameStr.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_return;
}
} // for
} // if
if (riConstrArr.entries() > 0)
{
for (Lng32 i = 0; i < riConstrArr.entries(); i++)
{
StmtDDLAddConstraintRI *refConstr = riConstrArr[i];
ComObjectName refdTableName(refConstr->getReferencedTableName(), COM_TABLE_NAME);
refdTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString refdCatNamePart = refdTableName.getCatalogNamePartAsAnsiString();
const NAString refdSchNamePart = refdTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString refdObjNamePart = refdTableName.getObjectNamePartAsAnsiString(TRUE);
NAString uniqueName;
genUniqueName(refConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
const NAString &addConstrName = constrName.getExternalName();
ElemDDLConstraintRI *constraintNode =
( refConstr->getConstraint() )->castToElemDDLConstraintRI();
ElemDDLColNameArray &ringColumnArray = constraintNode->getReferencingColumns();
NAString ringColNameStr;
for (CollIndex i = 0; i < ringColumnArray.entries(); i++)
{
ringColNameStr += "\"";
ringColNameStr += ringColumnArray[i]->getColumnName();
ringColNameStr += "\"";
if (i+1 < ringColumnArray.entries())
ringColNameStr += ", ";
}
ElemDDLColNameArray &refdColumnArray = constraintNode->getReferencedColumns();
NAString refdColNameStr;
if (refdColumnArray.entries() > 0)
refdColNameStr = "(";
for (CollIndex i = 0; i < refdColumnArray.entries(); i++)
{
refdColNameStr += "\"";
refdColNameStr += refdColumnArray[i]->getColumnName();
refdColNameStr += "\"";
if (i+1 < refdColumnArray.entries())
refdColNameStr += ", ";
}
if (refdColumnArray.entries() > 0)
refdColNameStr += ")";
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" foreign key (%s) references \"%s\".\"%s\".\"%s\" %s %s",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
ringColNameStr.data(),
refdCatNamePart.data(), refdSchNamePart.data(), refdObjNamePart.data(),
(refdColumnArray.entries() > 0 ? refdColNameStr.data() : " "),
(NOT constraintNode->isEnforced() ? " not enforced " : ""));
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
}
if (NOT isCompound)
{
CorrName cn2(refdObjNamePart.data(),
STMTHEAP,
refdSchNamePart.data(),
refdCatNamePart.data());
// remove natable for the table being referenced
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn2,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
ddlNode->ddlXns(), FALSE);
}
if (cliRC < 0)
goto label_return;
if (NOT constraintNode->isEnforced())
{
*CmpCommon::diags()
<< DgSqlCode(1313)
<< DgString0(addConstrName);
}
} // for
} // if
if (checkConstrArr.entries() > 0)
{
for (Lng32 i = 0; i < checkConstrArr.entries(); i++)
{
StmtDDLAddConstraintCheck *checkConstr = checkConstrArr[i];
NAString uniqueName;
genUniqueName(checkConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
NAString constrText;
getCheckConstraintText(checkConstr, constrText);
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" check %s",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
constrText.data()
);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_return;
}
}
}
label_return:
if (NOT isCompound)
{
// remove NATable cache entries for this table
CorrName cn(objectNamePart.data(),
STMTHEAP,
schemaNamePart.data(),
catalogNamePart.data());
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
ddlNode->ddlXns(), FALSE);
}
return;
}
void CmpSeabaseDDL::createSeabaseTableCompound(
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ComObjectName tableName(createTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
NABoolean xnWasStartedHere = FALSE;
Int64 objUID = 0;
if ((createTableNode->isVolatile()) &&
((createTableNode->getAddConstraintUniqueArray().entries() > 0) ||
(createTableNode->getAddConstraintRIArray().entries() > 0) ||
(createTableNode->getAddConstraintCheckArray().entries() > 0)))
{
*CmpCommon::diags() << DgSqlCode(-1283);
processReturn();
goto label_error;
}
createSeabaseTable(createTableNode, currCatName, currSchName, TRUE, &objUID);
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_))
{
return;
}
cliRC = cliInterface.holdAndSetCQD("TRAF_NO_CONSTR_VALIDATION", "ON");
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_error;
}
addConstraints(tableName, currCatAnsiName, currSchAnsiName,
createTableNode,
NULL,
createTableNode->getAddConstraintUniqueArray(),
createTableNode->getAddConstraintRIArray(),
createTableNode->getAddConstraintCheckArray(),
TRUE);
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_))
{
if (cliInterface.statusXn() == 0) // xn in progress
{
rollbackXn(&cliInterface);
}
*CmpCommon::diags() << DgSqlCode(-1029)
<< DgTableName(extTableName);
processReturn();
goto label_error;
}
cliRC = cliInterface.restoreCQD("traf_no_constr_validation");
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
goto label_error;
cliRC = updateObjectValidDef(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, COM_YES_LIT);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
goto label_error;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
goto label_error;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
{
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
createTableNode->ddlXns(), FALSE);
}
return;
label_error:
cliRC = cliInterface.restoreCQD("traf_no_constr_validation");
if (NOT createTableNode->isVolatile())
{
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
createTableNode->ddlXns());
return;
}
}
// return: 0, does not exist. 1, exists. -1, error.
short CmpSeabaseDDL::lookForTableInMD(
ExeCliInterface *cliInterface,
NAString &catNamePart, NAString &schNamePart, NAString &objNamePart,
NABoolean schNameSpecified, NABoolean hbaseMapSpecified,
ComObjectName &tableName, NAString &tabName, NAString &extTableName,
const ComObjectType objectType)
{
short retcode = 0;
if ((schNamePart == HBASE_EXT_MAP_SCHEMA) &&
(! Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-4261)
<< DgSchemaName(schNamePart);
return -1;
}
if (NOT hbaseMapSpecified)
retcode = existsInSeabaseMDTable
(cliInterface,
catNamePart, schNamePart, objNamePart,
objectType, //COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
return -1; // error
}
if (retcode == 1)
return 1; // exists
if ((retcode == 0) && // does not exist
(NOT schNameSpecified))
{
// if explicit schema name was not specified,
// check to see if this is an hbase mapped table.
retcode = existsInSeabaseMDTable
(cliInterface,
catNamePart, HBASE_EXT_MAP_SCHEMA, objNamePart,
objectType, //COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
return -1; // error
}
if (retcode != 0) // exists
{
schNamePart = HBASE_EXT_MAP_SCHEMA;
ComAnsiNamePart mapSchAnsiName
(schNamePart, ComAnsiNamePart::INTERNAL_FORMAT);
tableName.setSchemaNamePart(mapSchAnsiName);
extTableName = tableName.getExternalName(TRUE);
tabName = tableName.getExternalName();
return 1; // exists
}
}
return 0; // does not exist
}
// RETURN: -1, no need to cleanup. -2, caller need to call cleanup
// 0, all ok.
short CmpSeabaseDDL::dropSeabaseTable2(
ExeCliInterface *cliInterface,
StmtDDLDropTable * dropTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
NAString tabName = (NAString&)dropTableNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComObjectName volTabName;
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
NABoolean schNameSpecified =
(NOT dropTableNode->getOrigTableNameAsQualifiedName().getSchemaName().isNull());
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
if (dropTableNode->isExternal())
{
// Convert the native name to its Trafodion form
tabName = ComConvertNativeNameToTrafName
(tableName.getCatalogNamePartAsAnsiString(),
tableName.getSchemaNamePartAsAnsiString(),
tableName.getObjectNamePartAsAnsiString());
ComObjectName adjustedName(tabName, COM_TABLE_NAME);
tableName = adjustedName;
}
NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
// allowExternalTables: true to allow an NATable entry to be created for an external table
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return -1;
}
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_USER_CANNOT_DROP_SMD_TABLE)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
NABoolean isVolatile = FALSE;
if ((dropTableNode->isVolatile()) &&
(CmpCommon::context()->sqlSession()->volatileSchemaInUse()))
{
volTabName = tableName;
isVolatile = TRUE;
}
if ((NOT dropTableNode->isVolatile()) &&
(CmpCommon::context()->sqlSession()->volatileSchemaInUse()))
{
// updateVolatileQualifiedName qualifies the object name with a
// volatile catalog and schema name (if a volatile schema exists)
QualifiedName *qn =
CmpCommon::context()->sqlSession()->
updateVolatileQualifiedName
(dropTableNode->getOrigTableNameAsQualifiedName().getObjectName());
// don't believe it is possible to get a null pointer returned
if (qn == NULL)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_DROP_OBJECT)
<< DgTableName(dropTableNode->getOrigTableNameAsQualifiedName().
getQualifiedNameAsAnsiString(TRUE));
deallocEHI(ehi);
processReturn();
return -1;
}
volTabName = qn->getQualifiedNameAsAnsiString();
volTabName.applyDefaults(currCatAnsiName, currSchAnsiName);
NAString vtCatNamePart = volTabName.getCatalogNamePartAsAnsiString();
NAString vtSchNamePart = volTabName.getSchemaNamePartAsAnsiString(TRUE);
NAString vtObjNamePart = volTabName.getObjectNamePartAsAnsiString(TRUE);
retcode = existsInSeabaseMDTable(cliInterface,
vtCatNamePart, vtSchNamePart, vtObjNamePart,
COM_BASE_TABLE_OBJECT);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (retcode == 1)
{
// table found in volatile schema
// Validate volatile table name.
if (CmpCommon::context()->sqlSession()->
validateVolatileQualifiedName
(dropTableNode->getOrigTableNameAsQualifiedName()))
{
// Valid volatile table. Drop it.
tabName = volTabName.getExternalName(TRUE);
catalogNamePart = vtCatNamePart;
schemaNamePart = vtSchNamePart;
objectNamePart = vtObjNamePart;
isVolatile = TRUE;
}
else
{
// volatile table found but the name is not a valid
// volatile name. Look for the input name in the regular
// schema.
// But first clear the diags area.
CmpCommon::diags()->clear();
}
}
else
{
CmpCommon::diags()->clear();
}
}
retcode = lookForTableInMD(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
schNameSpecified,
(dropTableNode->isExternal() &&
(schemaNamePart == HBASE_EXT_MAP_SCHEMA)),
tableName, tabName, extTableName);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (retcode == 0) // does not exist
{
if (NOT dropTableNode->dropIfExists())
{
CmpCommon::diags()->clear();
if (isVolatile)
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(objectNamePart);
else
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
}
deallocEHI(ehi);
processReturn();
return -1;
}
// if this table does not exist in hbase but exists in metadata, return error.
// This is an internal inconsistency which needs to be fixed by running cleanup.
// If this is an external (native HIVE or HBASE) table, then skip
if (!isSeabaseExternalSchema(catalogNamePart, schemaNamePart))
{
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
if ((NOT isVolatile)&& (ehi->exists(hbaseTable) == 0)) // does not exist in hbase
{
*CmpCommon::diags() << DgSqlCode(-4254)
<< DgString0(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
// Check to see if the user has the authority to drop the table
ComObjectName verifyName;
if (isVolatile)
verifyName = volTabName;
else
verifyName = tableName;
if (CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_OFF)
ActiveSchemaDB()->getNATableDB()->useCache();
// save the current parserflags setting
ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF);
Set_SqlParser_Flags(ALLOW_VOLATILE_SCHEMA_IN_TABLE_NAME);
CorrName cn(objectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
if (dropTableNode->isExternal())
bindWA.setExternalTableDrop(TRUE);
NATable *naTable = bindWA.getNATableInternal(cn, TRUE, NULL, TRUE);
bindWA.setExternalTableDrop(FALSE);
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
// Restore parser flags settings to what they originally were
Set_SqlParser_Flags (savedParserFlags);
if (naTable == NULL || bindWA.errStatus())
{
if (NOT dropTableNode->dropIfExists())
{
if (isVolatile)
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(objectNamePart);
else
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
}
else
CmpCommon::diags()->clear();
deallocEHI(ehi);
processReturn();
return -1;
}
if ((dropTableNode->isVolatile()) &&
(NOT CmpCommon::context()->sqlSession()->isValidVolatileSchemaName(schemaNamePart)))
{
*CmpCommon::diags() << DgSqlCode(-1279);
deallocEHI(ehi);
processReturn();
return -1;
}
Int64 objUID = naTable->objectUid().castToInt64();
// Make sure user has necessary privileges to perform drop
if (!isDDLOperationAuthorized(SQLOperation::DROP_TABLE,
naTable->getOwner(),
naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return -1;
}
Queue * usingViewsQueue = NULL;
if (dropTableNode->getDropBehavior() == COM_RESTRICT_DROP_BEHAVIOR)
{
NAString usingObjName;
cliRC = getUsingObject(cliInterface, objUID, usingObjName);
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (cliRC != 100) // found an object
{
*CmpCommon::diags() << DgSqlCode(-CAT_DEPENDENT_VIEW_EXISTS)
<< DgTableName(usingObjName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
else if (dropTableNode->getDropBehavior() == COM_CASCADE_DROP_BEHAVIOR)
{
cliRC = getAllUsingViews(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
usingViewsQueue);
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
const AbstractRIConstraintList &uniqueList = naTable->getUniqueConstraints();
// return error if cascade is not specified and a referential constraint exists on
// any of the unique constraints.
if (dropTableNode->getDropBehavior() == COM_RESTRICT_DROP_BEHAVIOR)
{
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
AbstractRIConstraint *ariConstr = uniqueList[i];
if (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT)
continue;
UniqueConstraint * uniqConstr = (UniqueConstraint*)ariConstr;
if (uniqConstr->hasRefConstraintsReferencingMe())
{
const ComplementaryRIConstraint * rc = uniqConstr->getRefConstraintReferencingMe(0);
if (rc->getTableName() != naTable->getTableName())
{
const NAString &constrName =
(rc ? rc->getConstraintName().getObjectName() : " ");
*CmpCommon::diags() << DgSqlCode(-1059)
<< DgConstraintName(constrName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
}
// Drop referencing objects
char query[4000];
// drop the views.
// usingViewsQueue contain them in ascending order of their create
// time. Drop them from last to first.
if (usingViewsQueue)
{
for (int idx = usingViewsQueue->numEntries()-1; idx >= 0; idx--)
{
OutputInfo * vi = (OutputInfo*)usingViewsQueue->get(idx);
char * viewName = vi->get(0);
if (dropOneTableorView(*cliInterface,viewName,COM_VIEW_OBJECT,false))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
// drop all referential constraints referencing me.
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
AbstractRIConstraint *ariConstr = uniqueList[i];
if (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT)
continue;
UniqueConstraint * uniqConstr = (UniqueConstraint*)ariConstr;
// We will only reach here is cascade option is specified.
// drop all constraints referencing me.
if (uniqConstr->hasRefConstraintsReferencingMe())
{
for (Lng32 j = 0; j < uniqConstr->getNumRefConstraintsReferencingMe(); j++)
{
const ComplementaryRIConstraint * rc =
uniqConstr->getRefConstraintReferencingMe(j);
str_sprintf(query, "alter table \"%s\".\"%s\".\"%s\" drop constraint \"%s\".\"%s\".\"%s\"",
rc->getTableName().getCatalogName().data(),
rc->getTableName().getSchemaName().data(),
rc->getTableName().getObjectName().data(),
rc->getConstraintName().getCatalogName().data(),
rc->getConstraintName().getSchemaName().data(),
rc->getConstraintName().getObjectName().data());
cliRC = cliInterface->executeImmediate(query);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -2;
}
} // for
} // if
} // for
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
AbstractRIConstraint *ariConstr = uniqueList[i];
if (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT)
continue;
UniqueConstraint * uniqConstr = (UniqueConstraint*)ariConstr;
const NAString& constrCatName =
uniqConstr->getConstraintName().getCatalogName();
const NAString& constrSchName =
uniqConstr->getConstraintName().getSchemaName();
NAString constrObjName =
(NAString) uniqConstr->getConstraintName().getObjectName();
// Get the constraint UID
Int64 constrUID = -1;
// If the table being dropped is from a metadata schema, setup
// an UniqueConstraint entry for the table being dropped describing its
// primary key. This is temporary until metadata is changed to create
// primary keys with a known name.
if (isSeabasePrivMgrMD(catalogNamePart, schemaNamePart) ||
isSeabaseMD(catalogNamePart, schemaNamePart, objectNamePart))
{
assert (uniqueList.entries() == 1);
assert (uniqueList[0]->getOperatorType() == ITM_UNIQUE_CONSTRAINT);
UniqueConstraint * uniqConstr = (UniqueConstraint*)uniqueList[0];
assert (uniqConstr->isPrimaryKeyConstraint());
NAString adjustedConstrName;
if (getPKeyInfoForTable (catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
cliInterface,
constrObjName,
constrUID) == -1)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
// Read the metadata to get the constraint UID
else
{
constrUID = getObjectUID(cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
(uniqConstr->isPrimaryKeyConstraint() ?
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT :
COM_UNIQUE_CONSTRAINT_OBJECT_LIT));
if (constrUID == -1)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
if (deleteConstraintInfoFromSeabaseMDTables(cliInterface,
naTable->objectUid().castToInt64(),
0,
constrUID,
0,
constrCatName,
constrSchName,
constrObjName,
(uniqConstr->isPrimaryKeyConstraint() ?
COM_PRIMARY_KEY_CONSTRAINT_OBJECT :
COM_UNIQUE_CONSTRAINT_OBJECT)))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
// drop all referential constraints from metadata
const AbstractRIConstraintList &refList = naTable->getRefConstraints();
for (Int32 i = 0; i < refList.entries(); i++)
{
AbstractRIConstraint *ariConstr = refList[i];
if (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT)
continue;
RefConstraint * refConstr = (RefConstraint*)ariConstr;
// if self referencing constraint, then it was already dropped as part of
// dropping 'ri constraints referencing me' earlier.
if (refConstr->selfRef())
continue;
const NAString& constrCatName =
refConstr->getConstraintName().getCatalogName();
const NAString& constrSchName =
refConstr->getConstraintName().getSchemaName();
const NAString& constrObjName =
refConstr->getConstraintName().getObjectName();
Int64 constrUID = getObjectUID(cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT);
if (constrUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
NATable *otherNaTable = NULL;
CorrName otherCN(refConstr->getUniqueConstraintReferencedByMe().getTableName());
otherNaTable = bindWA.getNATable(otherCN);
if (otherNaTable == NULL || bindWA.errStatus())
{
deallocEHI(ehi);
processReturn();
return -1;
}
AbstractRIConstraint * otherConstr =
refConstr->findConstraint(&bindWA, refConstr->getUniqueConstraintReferencedByMe());
const NAString& otherSchName =
otherConstr->getConstraintName().getSchemaName();
const NAString& otherConstrName =
otherConstr->getConstraintName().getObjectName();
Int64 otherConstrUID = getObjectUID(cliInterface,
constrCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_UNIQUE_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
CmpCommon::diags()->clear();
otherConstrUID = getObjectUID(cliInterface,
constrCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
if (deleteConstraintInfoFromSeabaseMDTables(cliInterface,
naTable->objectUid().castToInt64(),
otherNaTable->objectUid().castToInt64(),
constrUID,
otherConstrUID,
constrCatName,
constrSchName,
constrObjName,
COM_REFERENTIAL_CONSTRAINT_OBJECT))
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (updateObjectRedefTime
(cliInterface,
otherNaTable->getTableName().getCatalogName(),
otherNaTable->getTableName().getSchemaName(),
otherNaTable->getTableName().getObjectName(),
COM_BASE_TABLE_OBJECT_LIT, -1,
otherNaTable->objectUid().castToInt64()))
{
processReturn();
deallocEHI(ehi);
return -1;
}
}
// drop all check constraints from metadata if 'no check' is not specified.
if (NOT (dropTableNode->getDropBehavior() == COM_NO_CHECK_DROP_BEHAVIOR))
{
const CheckConstraintList &checkList = naTable->getCheckConstraints();
for (Int32 i = 0; i < checkList.entries(); i++)
{
CheckConstraint *checkConstr = checkList[i];
const NAString& constrCatName =
checkConstr->getConstraintName().getCatalogName();
const NAString& constrSchName =
checkConstr->getConstraintName().getSchemaName();
const NAString& constrObjName =
checkConstr->getConstraintName().getObjectName();
Int64 constrUID = getObjectUID(cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
COM_CHECK_CONSTRAINT_OBJECT_LIT);
if (constrUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (deleteConstraintInfoFromSeabaseMDTables(cliInterface,
naTable->objectUid().castToInt64(),
0,
constrUID,
0,
constrCatName,
constrSchName,
constrObjName,
COM_CHECK_CONSTRAINT_OBJECT))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
const NAFileSetList &indexList = naTable->getIndexList();
// first drop all index objects from metadata.
Queue * indexInfoQueue = NULL;
if (getAllIndexes(cliInterface, objUID, TRUE, indexInfoQueue))
{
deallocEHI(ehi);
processReturn();
return -1;
}
SQL_QIKEY *qiKeys = new (STMTHEAP) SQL_QIKEY[indexInfoQueue->numEntries()];
indexInfoQueue->position();
for (int idx = 0; idx < indexInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)indexInfoQueue->getNext();
NAString idxCatName = (char*)vi->get(0);
NAString idxSchName = (char*)vi->get(1);
NAString idxObjName = (char*)vi->get(2);
// set up a qiKey for this index, later we will removed the
// index cache entry from concurrent processes
Int64 objUID = *(Int64*)vi->get(3);
qiKeys[idx].ddlObjectUID = objUID;
qiKeys[idx].operation[0] = 'O';
qiKeys[idx].operation[1] = 'R';
NAString qCatName = "\"" + idxCatName + "\"";
NAString qSchName = "\"" + idxSchName + "\"";
NAString qObjName = "\"" + idxObjName + "\"";
ComObjectName coName(qCatName, qSchName, qObjName);
NAString ansiName = coName.getExternalName(TRUE);
if (dropSeabaseObject(ehi, ansiName,
idxCatName, idxSchName, COM_INDEX_OBJECT,
dropTableNode->ddlXns(),
TRUE, FALSE))
{
NADELETEBASIC (qiKeys, STMTHEAP);
deallocEHI(ehi);
processReturn();
return -1;
}
} // for
// Remove index entries from other processes cache
// Fix for bug 1396774 & bug 1396746
if (indexInfoQueue->numEntries() > 0)
SQL_EXEC_SetSecInvalidKeys(indexInfoQueue->numEntries(), qiKeys);
NADELETEBASIC (qiKeys, STMTHEAP);
// if there is an identity column, drop sequence corresponding to it.
NABoolean found = FALSE;
Lng32 idPos = 0;
NAColumn *col = NULL;
while ((NOT found) && (idPos < naTable->getColumnCount()))
{
col = naTable->getNAColumnArray()[idPos];
if (col->isIdentityColumn())
{
found = TRUE;
continue;
}
idPos++;
}
if (found)
{
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart, col->getColName(),
seqName);
char buf[4000];
str_sprintf(buf, "drop sequence %s.\"%s\".\"%s\"",
catalogNamePart.data(), schemaNamePart.data(), seqName.data());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -1;
}
}
// drop SB_HISTOGRAMS and SB_HISTOGRAM_INTERVALS entries, if any
// if the table that we are dropping itself is not a SB_HISTOGRAMS
// or SB_HISTOGRAM_INTERVALS table or sampling tables
// TBD: need to change once we start updating statistics for external
// tables
if (! (tableName.isExternalHive() || tableName.isExternalHbase()) )
{
if (objectNamePart != "SB_HISTOGRAMS" &&
objectNamePart != "SB_HISTOGRAM_INTERVALS" &&
objectNamePart != "SB_PERSISTENT_SAMPLES" &&
strncmp(objectNamePart.data(),TRAF_SAMPLE_PREFIX,sizeof(TRAF_SAMPLE_PREFIX)) != 0)
{
if (dropSeabaseStats(cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objUID))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
// if metadata drop succeeds, drop indexes from hbase.
indexInfoQueue->position();
for (int idx = 0; idx < indexInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)indexInfoQueue->getNext();
NAString idxCatName = (char*)vi->get(0);
NAString idxSchName = (char*)vi->get(1);
NAString idxObjName = (char*)vi->get(2);
NAString qCatName = "\"" + idxCatName + "\"";
NAString qSchName = "\"" + idxSchName + "\"";
NAString qObjName = "\"" + idxObjName + "\"";
ComObjectName coName(qCatName, qSchName, qObjName);
NAString ansiName = coName.getExternalName(TRUE);
if (dropSeabaseObject(ehi, ansiName,
idxCatName, idxSchName, COM_INDEX_OBJECT,
dropTableNode->ddlXns(),
FALSE, TRUE))
{
deallocEHI(ehi);
processReturn();
return -2;
}
CorrName cni(qObjName, STMTHEAP, qSchName, qCatName);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cni,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_INDEX_OBJECT,
dropTableNode->ddlXns(), FALSE);
cni.setSpecialType(ExtendedQualName::INDEX_TABLE);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cni,
ComQiScope::REMOVE_MINE_ONLY, COM_INDEX_OBJECT,
dropTableNode->ddlXns(), FALSE);
} // for
// If blob/clob columns are present, drop all the dependent files.
Lng32 numCols = nacolArr.entries();
// if this table has lob columns, drop the lob files
short *lobNumList = new (STMTHEAP) short[numCols];
short *lobTypList = new (STMTHEAP) short[numCols];
char **lobLocList = new (STMTHEAP) char*[numCols];
char lobHdfsServer[256] ; // max length determined by dfs.namenode.fs-limits.max-component-length(255)
memset(lobHdfsServer,0,256);
strncpy(lobHdfsServer,CmpCommon::getDefaultString(LOB_HDFS_SERVER),sizeof(lobHdfsServer)-1);
Int32 lobHdfsPort = (Lng32)CmpCommon::getDefaultNumeric(LOB_HDFS_PORT);
Lng32 j = 0;
for (Int32 i = 0; i < nacolArr.entries(); i++)
{
NAColumn *naColumn = nacolArr[i];
Lng32 datatype = naColumn->getType()->getFSDatatype();
if ((datatype == REC_BLOB) ||
(datatype == REC_CLOB))
{
lobNumList[j] = i; //column->getColumnNumber();
lobTypList[j] =
(short)(naColumn->lobStorageType() == Lob_Invalid_Storage
? Lob_HDFS_File : naColumn->lobStorageType());
// lobTypList[j] = (short)
// CmpCommon::getDefaultNumeric(LOB_STORAGE_TYPE);
char * loc = new (STMTHEAP) char[1024];
const char* f = ActiveSchemaDB()->getDefaults().
getValue(LOB_STORAGE_FILE_DIR);
strcpy(loc, f);
lobLocList[j] = loc;
j++;
}
}
if (j > 0)
{
Int32 rc = sendAllControls(FALSE, FALSE, TRUE);
Int64 objUID = getObjectUID(cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComString newSchName = "\"";
newSchName += catalogNamePart;
newSchName.append("\".\"");
newSchName.append(schemaNamePart);
newSchName += "\"";
NABoolean lobTrace = FALSE;
if (getenv("TRACE_LOB_ACTIONS"))
lobTrace=TRUE;
rc = SQL_EXEC_LOBddlInterface((char*)newSchName.data(),
newSchName.length(),
objUID,
j,
LOB_CLI_DROP,
lobNumList,
lobTypList,
lobLocList,NULL,lobHdfsServer, lobHdfsPort,0,lobTrace);
if (rc < 0)
{
// retrieve the cli diags here.
CmpCommon::diags()->mergeAfter( *(GetCliGlobals()->currContext()->getDiagsArea()));
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_DROP_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -2;
}
}
//Finally drop the table
NABoolean dropFromMD = TRUE;
NABoolean dropFromHbase = (NOT tableName.isExternalHbase());
if (dropTableNode->getDropBehavior() == COM_NO_CHECK_DROP_BEHAVIOR)
dropFromHbase = FALSE;
if (dropSeabaseObject(ehi, tabName,
currCatName, currSchName, COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(),
dropFromMD, dropFromHbase))
{
deallocEHI(ehi);
processReturn();
return -2;
}
deallocEHI(ehi);
processReturn();
CorrName cn2(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn2,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(), FALSE);
// if hive external table, remove hive entry from NATable cache as well
if ((dropTableNode->isExternal()) &&
(dropTableNode->getTableNameAsQualifiedName().isHive()))
{
CorrName hcn(dropTableNode->getTableNameAsQualifiedName());
ActiveSchemaDB()->getNATableDB()->removeNATable
(hcn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(), FALSE);
}
for (Int32 i = 0; i < refList.entries(); i++)
{
AbstractRIConstraint *ariConstr = refList[i];
if (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT)
continue;
RefConstraint * refConstr = (RefConstraint*)ariConstr;
CorrName otherCN(refConstr->getUniqueConstraintReferencedByMe().getTableName());
ActiveSchemaDB()->getNATableDB()->removeNATable
(otherCN,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(), FALSE);
}
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
UniqueConstraint * uniqConstr = (UniqueConstraint*)uniqueList[i];
// We will only reach here is cascade option is specified.
// drop all constraints referencing me.
if (uniqConstr->hasRefConstraintsReferencingMe())
{
for (Lng32 j = 0; j < uniqConstr->getNumRefConstraintsReferencingMe(); j++)
{
const ComplementaryRIConstraint * rc =
uniqConstr->getRefConstraintReferencingMe(j);
// remove this ref constr entry from natable cache
CorrName cnr(rc->getTableName().getObjectName().data(), STMTHEAP,
rc->getTableName().getSchemaName().data(),
rc->getTableName().getCatalogName().data());
ActiveSchemaDB()->getNATableDB()->removeNATable
(cnr,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(), FALSE);
} // for
} // if
} // for
return 0;
}
void CmpSeabaseDDL::dropSeabaseTable(
StmtDDLDropTable * dropTableNode,
NAString &currCatName, NAString &currSchName)
{
NABoolean xnWasStartedHere = FALSE;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return;
short rc =
dropSeabaseTable2(&cliInterface, dropTableNode, currCatName, currSchName);
if ((CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) &&
(rc < 0))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
if (rc == -2) // cleanup before returning error..
{
ComObjectName tableName(dropTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns());
}
return;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
return;
}
short CmpSeabaseDDL::invalidateStats(Int64 tableUID)
{
SQL_QIKEY qiKey;
strncpy(qiKey.operation,COM_QI_STATS_UPDATED_LIT,sizeof(qiKey.operation));
qiKey.ddlObjectUID = tableUID;
return SQL_EXEC_SetSecInvalidKeys(1, &qiKey);
}
void CmpSeabaseDDL::renameSeabaseTable(
StmtDDLAlterTableRename * renameTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
// variables needed to find identity column (have to be declared before
// the first goto or C++ will moan because of the initializers)
NABoolean found = FALSE;
Lng32 idPos = 0;
NAColumn *col = NULL;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = renameTableNode->getTableName();
NAString catalogNamePart;
NAString schemaNamePart;
NAString objectNamePart;
NAString extTableName;
NAString extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
renameTableNode->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
ComObjectName newTableName(renameTableNode->getNewNameAsAnsiString());
newTableName.applyDefaults(catalogNamePart, schemaNamePart);
const NAString newObjectNamePart = newTableName.getObjectNamePartAsAnsiString(TRUE);
const NAString newExtTableName = newTableName.getExternalName(TRUE);
const NAString newExtNameForHbase = catalogNamePart + "." + schemaNamePart + "." + newObjectNamePart;
CorrName newcn(newObjectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
NATable *newNaTable = bindWA.getNATable(newcn);
if (newNaTable != NULL && (NOT bindWA.errStatus()))
{
// an object already exists with the new name
*CmpCommon::diags() << DgSqlCode(-1390)
<< DgString0(newExtTableName);
processReturn();
return;
}
else if (newNaTable == NULL &&
bindWA.errStatus() &&
(!CmpCommon::diags()->contains(-4082) || CmpCommon::diags()->getNumber() > 1))
{
// there is some error other than the usual -4082, object
// does not exist
// If there is also -4082 error, remove that as it is misleading
// to the user. The user would see, "new name does not exist"
// and wonder, what is wrong with that?
for (CollIndex i = CmpCommon::diags()->returnIndex(-4082);
i != NULL_COLL_INDEX;
i = CmpCommon::diags()->returnIndex(-4082))
{
CmpCommon::diags()->deleteError(i);
}
if (CmpCommon::diags()->getNumber() > 0) // still anything there?
{
processReturn(); // error is already in the diags
return;
}
}
CmpCommon::diags()->clear();
// cannot rename a view
if (naTable->getViewText())
{
*CmpCommon::diags()
<< DgSqlCode(-1427)
<< DgString0("Reason: Operation not allowed on a view.");
processReturn();
return;
}
// cascade option not supported
if (renameTableNode->isCascade())
{
*CmpCommon::diags() << DgSqlCode(-1427)
<< DgString0("Reason: Cascade option not supported.");
processReturn();
return;
}
const CheckConstraintList &checkList = naTable->getCheckConstraints();
if (checkList.entries() > 0)
{
*CmpCommon::diags()
<< DgSqlCode(-1427)
<< DgString0("Reason: Operation not allowed if check constraints are present. Drop the constraints and recreate them after rename.");
processReturn();
return;
}
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
if (objUID < 0)
{
processReturn();
return;
}
if (!renameTableNode->skipViewCheck())
{
// cannot rename if views are using this table
Queue * usingViewsQueue = NULL;
cliRC = getUsingViews(&cliInterface, objUID, usingViewsQueue);
if (cliRC < 0)
{
processReturn();
return;
}
if (usingViewsQueue->numEntries() > 0)
{
*CmpCommon::diags() << DgSqlCode(-1427)
<< DgString0("Reason: Operation not allowed if dependent views exist. Drop the views and recreate them after rename.");
processReturn();
return;
}
}
// this operation cannot be done if a xn is already in progress.
if (xnInProgress(&cliInterface))
{
*CmpCommon::diags() << DgSqlCode(-20125)
<< DgString0("This ALTER");
processReturn();
return;
}
NABoolean ddlXns = renameTableNode->ddlXns();
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
HbaseStr newHbaseTable;
newHbaseTable.val = (char*)newExtNameForHbase.data();
newHbaseTable.len = newExtNameForHbase.length();
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
NABoolean xnWasStartedHere = FALSE;
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return;
cliRC = updateObjectName(&cliInterface,
objUID,
catalogNamePart.data(), schemaNamePart.data(),
newObjectNamePart.data());
if (cliRC < 0)
{
processReturn();
goto label_error_2;
}
// if there is an identity column, rename the sequence corresponding to it
found = FALSE;
while ((NOT found) && (idPos < naTable->getColumnCount()))
{
col = naTable->getNAColumnArray()[idPos];
if (col->isIdentityColumn())
{
found = TRUE;
continue;
}
idPos++;
}
if (found)
{
NAString oldSeqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart, col->getColName(),
oldSeqName);
NAString newSeqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, newObjectNamePart, col->getColName(),
newSeqName);
Int64 seqUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
oldSeqName.data(),
COM_SEQUENCE_GENERATOR_OBJECT_LIT);
if (seqUID < 0)
{
processReturn();
goto label_error_2;
}
cliRC = updateObjectName(&cliInterface,
seqUID,
catalogNamePart.data(), schemaNamePart.data(),
newSeqName.data());
if (cliRC < 0)
{
processReturn();
goto label_error_2;
}
}
// rename the underlying hbase object
retcode = ehi->copy(hbaseTable, newHbaseTable);
if (retcode < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::copy()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr());
processReturn();
cliRC = -1;
goto label_error;
}
retcode = dropHbaseTable(ehi, &hbaseTable, FALSE, ddlXns);
if (retcode < 0)
{
cliRC = -1;
goto label_error;
}
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, newObjectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID);
if (cliRC < 0)
{
processReturn();
goto label_error;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
renameTableNode->ddlXns(), FALSE);
deallocEHI(ehi);
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
return;
label_error: // come here after HBase copy
retcode = dropHbaseTable(ehi, &newHbaseTable, FALSE, FALSE);
label_error_2: // come here after beginXnIfNotInProgress but before HBase copy
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
deallocEHI(ehi);
return;
}
void CmpSeabaseDDL::alterSeabaseTableStoredDesc(
StmtDDLAlterTableStoredDesc * alterStoredDesc,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(alterStoredDesc->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SMD_CANNOT_BE_ALTERED)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
if (CmpCommon::context()->sqlSession()->volatileSchemaInUse())
{
QualifiedName *qn =
CmpCommon::context()->sqlSession()->
updateVolatileQualifiedName
(alterStoredDesc->getTableNameAsQualifiedName().getObjectName());
if (qn == NULL)
{
*CmpCommon::diags()
<< DgSqlCode(-1427);
processReturn();
return;
}
ComObjectName volTabName (qn->getQualifiedNameAsAnsiString());
volTabName.applyDefaults(currCatAnsiName, currSchAnsiName);
NAString vtCatNamePart = volTabName.getCatalogNamePartAsAnsiString();
NAString vtSchNamePart = volTabName.getSchemaNamePartAsAnsiString(TRUE);
NAString vtObjNamePart = volTabName.getObjectNamePartAsAnsiString(TRUE);
retcode = existsInSeabaseMDTable(&cliInterface,
vtCatNamePart, vtSchNamePart, vtObjNamePart,
COM_BASE_TABLE_OBJECT);
if (retcode < 0)
{
processReturn();
return;
}
if (retcode == 1)
{
// table found in volatile schema. cannot alter it.
*CmpCommon::diags()
<< DgSqlCode(-3242)
<< DgString0("Operation not allowed on volatile tables.");
processReturn();
return;
}
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(objectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
CmpCommon::diags()->clear();
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
processReturn();
return;
}
// Make sure user has the privilege to perform the alter
if (alterStoredDesc->getType() != StmtDDLAlterTableStoredDesc::CHECK)
{
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return;
}
}
Int64 objUID = naTable->objectUid().castToInt64();
if (alterStoredDesc->getType() == StmtDDLAlterTableStoredDesc::GENERATE)
{
cliRC =
updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT,
-1, objUID, TRUE);
if (cliRC < 0)
{
processReturn ();
return;
}
}
else if (alterStoredDesc->getType() == StmtDDLAlterTableStoredDesc::DELETE)
{
cliRC = deleteFromTextTable
(&cliInterface, objUID, COM_STORED_DESC_TEXT, 0);
if (cliRC < 0)
{
processReturn ();
return;
}
Int64 flags = MD_OBJECTS_STORED_DESC | MD_OBJECTS_DISABLE_STORED_DESC;
cliRC = updateObjectFlags(&cliInterface, objUID, flags, TRUE);
if (cliRC < 0)
{
processReturn ();
return;
}
}
else if (alterStoredDesc->getType() == StmtDDLAlterTableStoredDesc::ENABLE)
{
Int64 flags = MD_OBJECTS_DISABLE_STORED_DESC;
cliRC = updateObjectFlags(&cliInterface, objUID, flags, TRUE);
if (cliRC < 0)
{
processReturn ();
return;
}
}
else if (alterStoredDesc->getType() == StmtDDLAlterTableStoredDesc::DISABLE)
{
Int64 flags = MD_OBJECTS_DISABLE_STORED_DESC;
cliRC = updateObjectFlags(&cliInterface, objUID, flags, FALSE);
if (cliRC < 0)
{
processReturn ();
return;
}
}
else if (alterStoredDesc->getType() == StmtDDLAlterTableStoredDesc::CHECK)
{
checkAndGetStoredObjectDesc(&cliInterface, objUID, NULL);
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterStoredDesc->ddlXns(), FALSE);
return;
}
void CmpSeabaseDDL::alterSeabaseTableHBaseOptions(
StmtDDLAlterTableHBaseOptions * hbaseOptionsNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = hbaseOptionsNode->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
hbaseOptionsNode->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
CmpCommon::diags()->clear();
// Get the object UID so we can update the metadata
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
if (objUID < 0)
{
processReturn();
return;
}
// update HBase options in the metadata
ElemDDLHbaseOptions * edhbo = hbaseOptionsNode->getHBaseOptions();
short result = updateHbaseOptionsInMetadata(&cliInterface,objUID,edhbo);
if (result < 0)
{
processReturn();
return;
}
// tell HBase to change the options
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
result = alterHbaseTable(ehi,
&hbaseTable,
naTable->allColFams(),
&(edhbo->getHbaseOptions()),
hbaseOptionsNode->ddlXns());
if (result < 0)
{
processReturn();
deallocEHI(ehi);
return;
}
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return;
}
// invalidate cached NATable info on this table for all users
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
hbaseOptionsNode->ddlXns(), FALSE);
deallocEHI(ehi);
return;
}
short CmpSeabaseDDL::createSeabaseTableLike2(
CorrName &cn,
const NAString &likeTableName,
NABoolean withPartns,
NABoolean withoutSalt,
NABoolean withoutDivision,
NABoolean withoutRowFormat)
{
Lng32 retcode = 0;
char * buf = NULL;
ULng32 buflen = 0;
retcode = CmpDescribeSeabaseTable(cn, 3/*createlike*/, buf, buflen, STMTHEAP,
NULL,
withPartns, withoutSalt, withoutDivision,
withoutRowFormat,
FALSE, // include LOB columns (if any)
UINT_MAX,
TRUE);
if (retcode)
return -1;
NAString query = "create table ";
query += likeTableName;
query += " ";
NABoolean done = FALSE;
Lng32 curPos = 0;
while (NOT done)
{
short len = *(short*)&buf[curPos];
NAString frag(&buf[curPos+sizeof(short)],
len - ((buf[curPos+len-1]== '\n') ? 1 : 0));
query += frag;
curPos += ((((len+sizeof(short))-1)/8)+1)*8;
if (curPos >= buflen)
done = TRUE;
}
query += ";";
// send any user CQDs down
Lng32 retCode = sendAllControls(FALSE, FALSE, TRUE);
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Lng32 cliRC = 0;
cliRC = cliInterface.executeImmediate((char*)query.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
return 0;
}
short CmpSeabaseDDL::cloneHbaseTable(
const NAString &srcTable, const NAString &clonedTable,
ExpHbaseInterface * inEHI)
{
HbaseStr hbaseTable;
hbaseTable.val = (char*)srcTable.data();
hbaseTable.len = srcTable.length();
HbaseStr clonedHbaseTable;
clonedHbaseTable.val = (char*)clonedTable.data();
clonedHbaseTable.len = clonedTable.length();
ExpHbaseInterface * ehi = (inEHI ? inEHI : allocEHI());
if (ehi == NULL) {
processReturn();
return -1;
}
// copy hbaseTable as clonedHbaseTable
if (ehi->copy(hbaseTable, clonedHbaseTable, TRUE))
{
if (! inEHI)
deallocEHI(ehi);
processReturn();
return -1;
}
if (! inEHI)
deallocEHI(ehi);
return 0;
}
short CmpSeabaseDDL::cloneSeabaseTable(
const NAString &srcTableNameStr,
Int64 srcTableUID,
const NAString &clonedTableNameStr,
const NATable * naTable,
ExpHbaseInterface * inEHI,
ExeCliInterface * cliInterface,
NABoolean withCreate)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ComObjectName srcTableName(srcTableNameStr, COM_TABLE_NAME);
const NAString srcCatNamePart = srcTableName.getCatalogNamePartAsAnsiString();
const NAString srcSchNamePart = srcTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString srcObjNamePart = srcTableName.getObjectNamePartAsAnsiString(TRUE);
CorrName srcCN(srcObjNamePart, STMTHEAP, srcSchNamePart, srcCatNamePart);
ComObjectName clonedTableName(clonedTableNameStr, COM_TABLE_NAME);
const NAString clonedCatNamePart = clonedTableName.getCatalogNamePartAsAnsiString();
const NAString clonedSchNamePart = clonedTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString clonedObjNamePart = clonedTableName.getObjectNamePartAsAnsiString(TRUE);
char buf[2000];
if (withCreate)
{
retcode = createSeabaseTableLike2(srcCN, clonedTableNameStr,
FALSE, TRUE, TRUE);
if (retcode)
return -1;
Int64 clonedTableUID =
getObjectUID
(cliInterface,
clonedCatNamePart.data(),
clonedSchNamePart.data(),
clonedObjNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
// if there are added or altered columns in the source table, then cloned
// table metadata need to reflect that.
// Update metadata and set the cloned column class to be the same as source.
str_sprintf(buf, "merge into %s.\"%s\".%s using (select column_name, column_class from %s.\"%s\".%s where object_uid = %ld) x on (object_uid = %ld and column_name = x.column_name) when matched then update set column_class = x.column_class;",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
srcTableUID,
clonedTableUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return -1;
}
}
if (NOT withCreate)
{
// truncate cloned(tgt) table before upsert
if (truncateHbaseTable(clonedCatNamePart,
clonedSchNamePart,
clonedObjNamePart,
(NATable*)naTable, inEHI))
{
return -1;
}
}
NAString quotedSrcCatName;
ToQuotedString(quotedSrcCatName,
NAString(srcCN.getQualifiedNameObj().getCatalogName()), FALSE);
NAString quotedSrcSchName;
ToQuotedString(quotedSrcSchName,
NAString(srcCN.getQualifiedNameObj().getSchemaName()), FALSE);
NAString quotedSrcObjName;
ToQuotedString(quotedSrcObjName,
NAString(srcCN.getQualifiedNameObj().getObjectName()), FALSE);
str_sprintf(buf, "upsert using load into %s select * from %s.%s.%s",
clonedTableNameStr.data(),
quotedSrcCatName.data(),
quotedSrcSchName.data(),
quotedSrcObjName.data());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return -1;
}
return 0;
}
void CmpSeabaseDDL::alterSeabaseTableAddColumn(
StmtDDLAlterTableAddColumn * alterAddColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = alterAddColNode->getTableName();
NAString catalogNamePart;
NAString schemaNamePart;
NAString objectNamePart;
NAString extTableName;
NAString extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterAddColNode->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, TRUE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
ElemDDLColDefArray ColDefArray = alterAddColNode->getColDefArray();
ElemDDLColDef *pColDef = ColDefArray[0];
// Do not allow to using a NOT NULL constraint without a default
// clause. Do not allow DEFAULT NULL together with NOT NULL.
if (pColDef->getIsConstraintNotNullSpecified())
{
if (pColDef->getDefaultClauseStatus() != ElemDDLColDef::DEFAULT_CLAUSE_SPEC)
{
*CmpCommon::diags() << DgSqlCode(-CAT_DEFAULT_REQUIRED);
processReturn();
return;
}
ConstValue *pDefVal = (ConstValue *)pColDef->getDefaultValueExpr();
if ((pDefVal) &&
(pDefVal->origOpType() != ITM_CURRENT_USER) &&
(pDefVal->origOpType() != ITM_CURRENT_TIMESTAMP) &&
(pDefVal->origOpType() != ITM_UNIX_TIMESTAMP) &&
(pDefVal->origOpType() != ITM_UNIQUE_ID) &&
(pDefVal->origOpType() != ITM_CAST))
{
if (pDefVal->isNull())
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_BE_DEFAULT_NULL_AND_NOT_NULL);
processReturn();
return;
}
}
}
//Do not allow NO DEFAULT
if (pColDef->getDefaultClauseStatus() == ElemDDLColDef::NO_DEFAULT_CLAUSE_SPEC)
{
*CmpCommon::diags() << DgSqlCode(-CAT_DEFAULT_REQUIRED);
processReturn();
return;
}
if (pColDef->getSGOptions())
{
*CmpCommon::diags() << DgSqlCode(-1514);
processReturn();
return;
}
char query[4000];
NAString colFamily;
NAString colName;
Lng32 datatype, length, precision, scale, dt_start, dt_end, nullable, upshifted;
ComColumnClass colClass;
ComColumnDefaultClass defaultClass;
NAString charset, defVal;
NAString heading;
ULng32 hbaseColFlags;
Int64 colFlags;
LobsStorage lobStorage;
// if hbase map format, turn off global serialization default to off.
NABoolean hbaseSerialization = FALSE;
NAString hbVal;
if (naTable->isHbaseMapTable())
{
if (CmpCommon::getDefault(HBASE_SERIALIZATION) == DF_ON)
{
NAString value("OFF");
hbVal = "ON";
ActiveSchemaDB()->getDefaults().validateAndInsert(
"hbase_serialization", value, FALSE);
hbaseSerialization = TRUE;
}
}
retcode = getColInfo(pColDef,
FALSE, // not a metadata, histogram or repository column
colFamily,
colName,
naTable->isSQLMXAlignedTable(),
datatype, length, precision, scale, dt_start, dt_end, upshifted, nullable,
charset, colClass, defaultClass, defVal, heading, lobStorage, hbaseColFlags, colFlags);
if (hbaseSerialization)
{
ActiveSchemaDB()->getDefaults().validateAndInsert
("hbase_serialization", hbVal, FALSE);
}
if (retcode)
return;
if ((CmpCommon::getDefault(TRAF_ALLOW_RESERVED_COLNAMES) == DF_OFF) &&
(ComTrafReservedColName(colName)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_COLUMN_NAME)
<< DgString0(colName);
deallocEHI(ehi);
processReturn();
return;
}
if (colFamily.isNull())
{
colFamily = naTable->defaultColFam();
}
NABoolean addFam = FALSE;
NAString trafColFam;
if ((colFamily == SEABASE_DEFAULT_COL_FAMILY) ||
(naTable->isHbaseMapTable()))
trafColFam = colFamily;
else
{
CollIndex idx = naTable->allColFams().index(colFamily);
if (idx == NULL_COLL_INDEX) // doesnt exist, add it
{
idx = naTable->allColFams().entries();
addFam = TRUE;
}
genTrafColFam(idx, trafColFam);
}
const NAColumn * nacol = nacolArr.getColumn(colName);
if (nacol)
{
// column exists. Error or return, depending on 'if not exists' option.
if (NOT alterAddColNode->addIfNotExists())
{
*CmpCommon::diags() << DgSqlCode(-CAT_DUPLICATE_COLUMNS)
<< DgColumnName(colName);
}
processReturn();
return;
}
// If column is a LOB column , error
if ((datatype == REC_BLOB) || (datatype == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
char * col_name = new(STMTHEAP) char[colName.length() + 1];
strcpy(col_name, (char*)colName.data());
ULng32 maxColQual = nacolArr.getMaxTrafHbaseColQualifier();
NAString quotedHeading;
if (NOT heading.isNull())
{
ToQuotedString(quotedHeading, heading, FALSE);
}
NAString quotedDefVal;
if (NOT defVal.isNull())
{
ToQuotedString(quotedDefVal, defVal, FALSE);
}
Int64 objUID = naTable->objectUid().castToInt64();
Int32 newColNum = naTable->getColumnCount();
for (Int32 cc = nacolArr.entries()-1; cc >= 0; cc--)
{
const NAColumn *nac = nacolArr[cc];
if ((NOT naTable->isSQLMXAlignedTable()) &&
(nac->isComputedColumn()))
{
str_sprintf(query, "update %s.\"%s\".%s set column_number = column_number + 1 where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
nac->getPosition());
cliRC = cliInterface.executeImmediate(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
str_sprintf(query, "update %s.\"%s\".%s set column_number = column_number + 1 where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
objUID,
nac->getPosition());
cliRC = cliInterface.executeImmediate(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
str_sprintf(query, "update %s.\"%s\".%s set sub_id = sub_id + 1 where text_uid = %ld and text_type = %d and sub_id = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TEXT,
objUID,
COM_COMPUTED_COL_TEXT,
nac->getPosition());
cliRC = cliInterface.executeImmediate(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
// keys for indexes refer to base table column number.
// modify it so they now refer to new column numbers.
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
const NAFileSet * naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const QualifiedName &indexName = naFS->getFileSetName();
str_sprintf(query, "update %s.\"%s\".%s set column_number = column_number + 1 where column_number = %d and object_uid = (select object_uid from %s.\"%s\".%s where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = 'IX') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
nac->getPosition(),
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
indexName.getCatalogName().data(),
indexName.getSchemaName().data(),
indexName.getObjectName().data());
cliRC = cliInterface.executeImmediate(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_return;
}
} // for
} // secondary indexes present
newColNum--;
}
}
str_sprintf(query, "insert into %s.\"%s\".%s values (%ld, '%s', %d, '%s', %d, '%s', %d, %d, %d, %d, %d, '%s', %d, %d, '%s', %d, '%s', '%s', '%s', '%u', '%s', '%s', %ld )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
col_name,
newColNum, //naTable->getColumnCount(),
COM_ADDED_USER_COLUMN_LIT,
datatype,
getAnsiTypeStrFromFSType(datatype),
length,
precision,
scale,
dt_start,
dt_end,
(upshifted ? "Y" : "N"),
hbaseColFlags,
nullable,
(char*)charset.data(),
(Lng32)defaultClass,
(quotedDefVal.isNull() ? "" : quotedDefVal.data()),
(quotedHeading.isNull() ? "" : quotedHeading.data()),
trafColFam.data(),
maxColQual+1,
COM_UNKNOWN_PARAM_DIRECTION_LIT,
"N",
colFlags);
cliRC = cliInterface.executeImmediate(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
// if column family of added col doesnt exist in the table, add it
if (addFam)
{
NAString currColFams;
if (getTextFromMD(&cliInterface, objUID, COM_HBASE_COL_FAMILY_TEXT,
0, currColFams))
{
deallocEHI(ehi);
processReturn();
return;
}
Lng32 cliRC = deleteFromTextTable(&cliInterface, objUID,
COM_HBASE_COL_FAMILY_TEXT, 0);
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
NAString allColFams = currColFams + " " + colFamily;
cliRC = updateTextTable(&cliInterface, objUID,
COM_HBASE_COL_FAMILY_TEXT, 0,
allColFams);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
HbaseCreateOption hbco("NAME", trafColFam.data());
NAList<HbaseCreateOption*> hbcol(STMTHEAP);
hbcol.insert(&hbco);
ElemDDLHbaseOptions edhbo(&hbcol, STMTHEAP);
NAList<NAString> nal(STMTHEAP);
nal.insert(trafColFam);
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
cliRC = alterHbaseTable(ehi,
&hbaseTable,
nal,
&(edhbo.getHbaseOptions()),
alterAddColNode->ddlXns());
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterAddColNode->ddlXns(), FALSE);
if (alterAddColNode->getAddConstraintPK())
{
// if table already has a primary key, return error.
if ((naTable->getClusteringIndex()) &&
(NOT naTable->getClusteringIndex()->hasOnlySyskey()))
{
*CmpCommon::diags()
<< DgSqlCode(-1256)
<< DgString0(extTableName);
processReturn();
return;
}
}
if ((alterAddColNode->getAddConstraintPK()) OR
(alterAddColNode->getAddConstraintCheckArray().entries() NEQ 0) OR
(alterAddColNode->getAddConstraintUniqueArray().entries() NEQ 0) OR
(alterAddColNode->getAddConstraintRIArray().entries() NEQ 0))
{
addConstraints(tableName, currCatAnsiName, currSchAnsiName,
alterAddColNode,
alterAddColNode->getAddConstraintPK(),
alterAddColNode->getAddConstraintUniqueArray(),
alterAddColNode->getAddConstraintRIArray(),
alterAddColNode->getAddConstraintCheckArray());
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_))
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID))
{
processReturn();
return;
}
label_return:
processReturn();
return;
}
short CmpSeabaseDDL::updateMDforDropCol(ExeCliInterface &cliInterface,
const NATable * naTable,
Lng32 dropColNum)
{
Lng32 cliRC = 0;
Int64 objUID = naTable->objectUid().castToInt64();
char buf[4000];
str_sprintf(buf, "delete from %s.\"%s\".%s where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
dropColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
str_sprintf(buf, "update %s.\"%s\".%s set column_number = column_number - 1 where object_uid = %ld and column_number >= %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
dropColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
str_sprintf(buf, "update %s.\"%s\".%s set column_number = column_number - 1 where object_uid = %ld and column_number >= %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
objUID,
dropColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
//delete comment in TEXT table for column
str_sprintf(buf, "delete from %s.\"%s\".%s where text_uid = %ld and text_type = %d and sub_id = %d ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TEXT,
objUID,
COM_COLUMN_COMMENT_TEXT,
dropColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
str_sprintf(buf, "update %s.\"%s\".%s set sub_id = sub_id - 1 where text_uid = %ld and ( text_type = %d or text_type = %d ) and sub_id > %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TEXT,
objUID,
COM_COMPUTED_COL_TEXT,
COM_COLUMN_COMMENT_TEXT,
dropColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// keys for pkey constraint refer to base table column number.
// modify it so they now refer to new column numbers.
str_sprintf(buf, "update %s.\"%s\".%s K set column_number = column_number - 1 where K.column_number >= %d and K.object_uid = (select C.constraint_uid from %s.\"%s\".%s C where C.table_uid = %ld and C.constraint_type = 'P')",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
dropColNum,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
objUID);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// keys for indexes refer to base table column number.
// modify it so they now refer to new column numbers.
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
const NAFileSet * naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const QualifiedName &indexName = naFS->getFileSetName();
str_sprintf(buf, "update %s.\"%s\".%s set column_number = column_number - 1 where column_number >= %d and object_uid = (select object_uid from %s.\"%s\".%s where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = 'IX') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
dropColNum,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
indexName.getCatalogName().data(),
indexName.getSchemaName().data(),
indexName.getObjectName().data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
} // for
} // secondary indexes present
return 0;
}
///////////////////////////////////////////////////////////////////////
//
// An aligned table constains all columns in one hbase cell.
// To drop a column, we need to read each row, create a
// new row with the removed column and insert into the original table.
//
// Steps to drop a column from an aligned table:
//
// -- make a copy of the source aligned table using hbase copy
// -- truncate the source table
// -- Update metadata and remove the dropped column.
// -- bulk load data from copied table into the source table
// -- drop the copied temp table
//
// If an error happens after the source table has been truncated, then
// it will be restored from the copied table.
//
///////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::alignedFormatTableDropColumn
(
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
const NATable * naTable,
const NAString &altColName,
ElemDDLColDef *pColDef,
NABoolean ddlXns,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList)
{
Lng32 cliRC = 0;
const NAFileSet * naf = naTable->getClusteringIndex();
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ComUID comUID;
comUID.make_UID();
Int64 objUID = comUID.get_value();
char objUIDbuf[100];
NAString tempTable(naTable->getTableName().getQualifiedNameAsAnsiString());
tempTable += "_";
tempTable += str_ltoa(objUID, objUIDbuf);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
return -1;
ExeCliInterface cliInterface
(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int64 tableUID = naTable->objectUid().castToInt64();
const NAColumnArray &naColArr = naTable->getNAColumnArray();
const NAColumn * altNaCol = naColArr.getColumn(altColName);
Lng32 altColNum = altNaCol->getPosition();
NAString tgtCols;
NAString srcCols;
NABoolean xnWasStartedHere = FALSE;
NABoolean identityGenAlways = FALSE;
char buf[4000];
// identity 'generated always' columns do not permit inserting user specified
// values. Override it since we want to move original values to tgt.
for (Int32 c = 0; c < naColArr.entries(); c++)
{
const NAColumn * nac = naColArr[c];
if (nac->isIdentityColumnAlways())
{
identityGenAlways = TRUE;
break;
}
} // for
if (identityGenAlways)
{
cliRC = cliInterface.holdAndSetCQD("override_generated_identity_values", "ON");
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_restore;
}
}
if (cloneSeabaseTable(naTable->getTableName().getQualifiedNameAsAnsiString(), //cn,
naTable->objectUid().castToInt64(),
tempTable,
naTable,
ehi, &cliInterface, TRUE))
{
cliRC = -1;
goto label_drop;
}
if (truncateHbaseTable(catalogNamePart, schemaNamePart, objectNamePart,
(NATable*)naTable, ehi))
{
cliRC = -1;
goto label_restore;
}
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
{
cliRC = -1;
goto label_restore;
}
if (updateMDforDropCol(cliInterface, naTable, altColNum))
{
cliRC = -1;
goto label_restore;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT, ddlXns, FALSE);
for (Int32 c = 0; c < naColArr.entries(); c++)
{
const NAColumn * nac = naColArr[c];
if (nac->getColName() == altColName)
continue;
if (nac->isComputedColumn())
continue;
if (nac->isSystemColumn())
continue;
tgtCols += "\"" + nac->getColName() + "\"";
tgtCols += ",";
} // for
tgtCols = tgtCols.strip(NAString::trailing, ',');
if (tgtCols.isNull())
{
*CmpCommon::diags() << DgSqlCode(-1424)
<< DgColumnName(altColName);
goto label_restore;
}
if (naTable->hasSecondaryIndexes()) // user indexes
{
cliRC = cliInterface.holdAndSetCQD("hide_indexes", "ALL");
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_restore;
}
}
str_sprintf(buf, "upsert using load into %s(%s) select %s from %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
tgtCols.data(),
tgtCols.data(),
tempTable.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_restore;
}
if (identityGenAlways)
cliInterface.restoreCQD("override_generated_identity_values");
if (naTable->hasSecondaryIndexes()) // user indexes
cliInterface.restoreCQD("hide_indexes");
if ((cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
ddlXns)) < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(altColName)
<< DgString0(reason);
goto label_restore;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
str_sprintf(buf, "drop table %s", tempTable.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_restore;
}
deallocEHI(ehi);
return 0;
label_restore:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
if (identityGenAlways)
cliInterface.restoreCQD("override_generated_identity_values");
if (naTable->hasSecondaryIndexes()) // user indexes
cliInterface.restoreCQD("hide_indexes");
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT, FALSE, FALSE);
if (cloneSeabaseTable(tempTable, -1,
naTable->getTableName().getQualifiedNameAsAnsiString(),
naTable,
ehi, &cliInterface, FALSE))
{
cliRC = -1;
goto label_drop;
}
label_drop:
str_sprintf(buf, "drop table %s", tempTable.data());
Lng32 cliRC2 = cliInterface.executeImmediate(buf);
deallocEHI(ehi);
return (cliRC < 0 ? -1 : 0);
}
short CmpSeabaseDDL::hbaseFormatTableDropColumn(
ExpHbaseInterface *ehi,
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
const NATable * naTable,
const NAString &dropColName,
const NAColumn * nacol,
NABoolean ddlXns,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList)
{
Lng32 cliRC = 0;
const NAString extNameForHbase =
(naTable->isHbaseMapTable() ? objectNamePart :
(catalogNamePart + "." + schemaNamePart + "." + objectNamePart));
ExeCliInterface cliInterface(
STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Lng32 colNumber = nacol->getPosition();
NABoolean xnWasStartedHere = FALSE;
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
{
cliRC = -1;
goto label_error;
}
if (updateMDforDropCol(cliInterface, naTable, colNumber))
{
cliRC = -1;
goto label_error;
}
// remove column from all rows of the base table
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
{
NAString column(nacol->getHbaseColFam(), heap_);
column.append(":");
if (naTable->isHbaseMapTable())
{
column.append(dropColName);
}
else
{
char * colQualPtr = (char*)nacol->getHbaseColQual().data();
Lng32 colQualLen = nacol->getHbaseColQual().length();
Int64 colQval = str_atoi(colQualPtr, colQualLen);
if (colQval <= UCHAR_MAX)
{
unsigned char c = (unsigned char)colQval;
column.append((char*)&c, 1);
}
else if (colQval <= USHRT_MAX)
{
unsigned short s = (unsigned short)colQval;
column.append((char*)&s, 2);
}
else if (colQval <= ULONG_MAX)
{
Lng32 l = (Lng32)colQval;
column.append((char*)&l, 4);
}
else
column.append((char*)&colQval, 8);
}
HbaseStr colNameStr;
char * col = (char *) heap_->allocateMemory(column.length() + 1, FALSE);
if (col)
{
memcpy(col, column.data(), column.length());
col[column.length()] = 0;
colNameStr.val = col;
colNameStr.len = column.length();
}
else
{
cliRC = -EXE_NO_MEM_TO_EXEC;
*CmpCommon::diags() << DgSqlCode(-EXE_NO_MEM_TO_EXEC); // error -8571
goto label_error;
}
cliRC = ehi->deleteColumns(hbaseTable, colNameStr);
if (cliRC < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::deleteColumns()")
<< DgString1(getHbaseErrStr(-cliRC))
<< DgInt0(-cliRC)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr());
goto label_error;
}
}
if ((cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
ddlXns)) < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(dropColName)
<< DgString0(reason);
goto label_error;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
return 0;
label_error:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
return -1;
}
void CmpSeabaseDDL::alterSeabaseTableDropColumn(
StmtDDLAlterTableDropColumn * alterDropColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = alterDropColNode->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterDropColNode->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, TRUE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = alterDropColNode->getColName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
// column doesnt exist. Error or return, depending on 'if exists' option.
if (NOT alterDropColNode->dropIfExists())
{
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
}
processReturn();
return;
}
// If column is a LOB column , error
Int32 datatype = nacol->getType()->getFSDatatype();
if ((datatype == REC_BLOB) || (datatype == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
const NAFileSet * naFS = naTable->getClusteringIndex();
const NAColumnArray &naKeyColArr = naFS->getIndexKeyColumns();
if (naKeyColArr.getColumn(colName))
{
// key column cannot be dropped
*CmpCommon::diags() << DgSqlCode(-1420)
<< DgColumnName(colName);
processReturn();
return;
}
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const NAColumnArray &naIndexColArr = naFS->getAllColumns();
if (naIndexColArr.getColumn(colName))
{
// secondary index column cannot be dropped
*CmpCommon::diags() << DgSqlCode(-1421)
<< DgColumnName(colName)
<< DgTableName(naFS->getExtFileSetName());
processReturn();
return;
}
} // for
} // secondary indexes present
if ((naTable->getClusteringIndex()->hasSyskey()) &&
(nacolArr.entries() == 2))
{
// this table has one SYSKEY column and one other column.
// Dropping that column will leave the table with no user column.
// Return an error.
*CmpCommon::diags() << DgSqlCode(-1424)
<< DgColumnName(colName);
processReturn();
return;
}
// this operation cannot be done if a xn is already in progress.
if (xnInProgress(&cliInterface))
{
*CmpCommon::diags() << DgSqlCode(-20125)
<< DgString0("This ALTER");
processReturn();
return;
}
ExpHbaseInterface * ehi = NULL;
Int64 objUID = naTable->objectUid().castToInt64();
NAList<NAString> viewNameList(STMTHEAP);
NAList<NAString> viewDefnList(STMTHEAP);
if (saveAndDropUsingViews(objUID, &cliInterface, viewNameList, viewDefnList))
{
NAString reason = "Error occurred while saving views.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
NABoolean xnWasStartedHere = FALSE;
Lng32 colNumber = nacol->getPosition();
char *col = NULL;
if (naTable->isSQLMXAlignedTable())
{
if (alignedFormatTableDropColumn
(
catalogNamePart, schemaNamePart, objectNamePart,
naTable,
alterDropColNode->getColName(),
NULL, alterDropColNode->ddlXns(),
viewNameList, viewDefnList))
{
cliRC = -1;
goto label_error;
}
}
else
{
ehi = allocEHI();
if (hbaseFormatTableDropColumn
(
ehi,
catalogNamePart, schemaNamePart, objectNamePart,
naTable,
alterDropColNode->getColName(),
nacol, alterDropColNode->ddlXns(),
viewNameList, viewDefnList))
{
cliRC = -1;
goto label_error;
}
} // hbase format table
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID);
if (cliRC < 0)
{
goto label_error;
}
label_return:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterDropColNode->ddlXns(), FALSE);
return;
label_error:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
if ((cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
alterDropColNode->ddlXns())) < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
}
deallocEHI(ehi);
heap_->deallocateMemory(col);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterDropColNode->ddlXns(), FALSE);
processReturn();
return;
}
void CmpSeabaseDDL::alterSeabaseTableAlterIdentityColumn(
StmtDDLAlterTableAlterColumnSetSGOption * alterIdentityColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = alterIdentityColNode->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterIdentityColNode->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = alterIdentityColNode->getColumnName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
processReturn();
return;
}
if (! nacol->isIdentityColumn())
{
*CmpCommon::diags() << DgSqlCode(-1590)
<< DgColumnName(colName);
processReturn();
return;
}
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart,
alterIdentityColNode->getColumnName(),
seqName);
ElemDDLSGOptions * sgo = alterIdentityColNode->getSGOptions();
NAString options;
if (sgo)
{
char tmpBuf[1000];
if (sgo->isIncrementSpecified())
{
str_sprintf(tmpBuf, " increment by %ld", sgo->getIncrement());
options += tmpBuf;
}
if (sgo->isMaxValueSpecified())
{
if (sgo->isNoMaxValue())
strcpy(tmpBuf, " no maxvalue ");
else
str_sprintf(tmpBuf, " maxvalue %ld", sgo->getMaxValue());
options += tmpBuf;
}
if (sgo->isMinValueSpecified())
{
if (sgo->isNoMinValue())
strcpy(tmpBuf, " no maxvalue ");
else
str_sprintf(tmpBuf, " minvalue %ld", sgo->getMinValue());
options += tmpBuf;
}
if (sgo->isStartValueSpecified())
{
str_sprintf(tmpBuf, " start with %ld", sgo->getStartValue());
options += tmpBuf;
}
if (sgo->isCacheSpecified())
{
if (sgo->isNoCache())
str_sprintf(tmpBuf, " no cache ");
else
str_sprintf(tmpBuf, " cache %ld ", sgo->getCache());
options += tmpBuf;
}
if (sgo->isCycleSpecified())
{
if (sgo->isNoCycle())
str_sprintf(tmpBuf, " no cycle ");
else
str_sprintf(tmpBuf, " cycle ");
options += tmpBuf;
}
if (sgo->isResetSpecified())
{
str_sprintf(tmpBuf, " reset ");
options += tmpBuf;
}
char buf[4000];
str_sprintf(buf, "alter internal sequence %s.\"%s\".\"%s\" %s",
catalogNamePart.data(), schemaNamePart.data(), seqName.data(),
options.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
}
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, naTable->objectUid().castToInt64());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterIdentityColNode->ddlXns(), FALSE);
return;
}
short CmpSeabaseDDL::saveAndDropUsingViews(Int64 objUID,
ExeCliInterface *cliInterface,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList)
{
Lng32 cliRC = 0;
NAString catName, schName, objName;
cliRC = getObjectName(cliInterface, objUID,
catName, schName, objName);
if (cliRC < 0)
{
processReturn();
return -1;
}
Queue * usingViewsQueue = NULL;
cliRC = getAllUsingViews(cliInterface,
catName, schName, objName,
usingViewsQueue);
if (cliRC < 0)
{
processReturn();
return -1;
}
if (usingViewsQueue->numEntries() == 0)
return 0;
NABoolean xnWasStartedHere = FALSE;
if (beginXnIfNotInProgress(cliInterface, xnWasStartedHere))
return -1;
// find out any views on this table.
// save their definition and drop them.
// they will be recreated before return.
usingViewsQueue->position();
for (int idx = 0; idx < usingViewsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)usingViewsQueue->getNext();
char * viewName = vi->get(0);
viewNameList.insert(viewName);
ComObjectName viewCO(viewName, COM_TABLE_NAME);
const NAString catName = viewCO.getCatalogNamePartAsAnsiString();
const NAString schName = viewCO.getSchemaNamePartAsAnsiString(TRUE);
const NAString objName = viewCO.getObjectNamePartAsAnsiString(TRUE);
Int64 viewUID = getObjectUID(cliInterface,
catName.data(), schName.data(), objName.data(),
COM_VIEW_OBJECT_LIT);
if (viewUID < 0 )
{
endXnIfStartedHere(cliInterface, xnWasStartedHere, -1);
return -1;
}
NAString viewText;
if (getTextFromMD(cliInterface, viewUID, COM_VIEW_TEXT, 0, viewText))
{
endXnIfStartedHere(cliInterface, xnWasStartedHere, -1);
return -1;
}
viewDefnList.insert(viewText);
}
// drop the views.
// usingViewsQueue contain them in ascending order of their create
// time. Drop them from last to first.
for (int idx = usingViewsQueue->numEntries()-1; idx >= 0; idx--)
{
OutputInfo * vi = (OutputInfo*)usingViewsQueue->get(idx);
char * viewName = vi->get(0);
if (dropOneTableorView(*cliInterface,viewName,COM_VIEW_OBJECT,false))
{
endXnIfStartedHere(cliInterface, xnWasStartedHere, -1);
processReturn();
return -1;
}
}
endXnIfStartedHere(cliInterface, xnWasStartedHere, 0);
return 0;
}
short CmpSeabaseDDL::recreateUsingViews(ExeCliInterface *cliInterface,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList,
NABoolean ddlXns)
{
Lng32 cliRC = 0;
if (viewDefnList.entries() == 0)
return 0;
NABoolean xnWasStartedHere = FALSE;
if (beginXnIfNotInProgress(cliInterface, xnWasStartedHere))
return -1;
for (Lng32 i = 0; i < viewDefnList.entries(); i++)
{
cliRC = cliInterface->executeImmediate(viewDefnList[i]);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
cliRC = -1;
goto label_return;
}
}
cliRC = 0;
label_return:
for (Lng32 i = 0; i < viewDefnList.entries(); i++)
{
ComObjectName tableName(viewNameList[i], COM_TABLE_NAME);
const NAString catalogNamePart =
tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart =
tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart =
tableName.getObjectNamePartAsAnsiString(TRUE);
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_MINE_ONLY, COM_VIEW_OBJECT,
ddlXns, FALSE);
}
endXnIfStartedHere(cliInterface, xnWasStartedHere, cliRC);
return cliRC;
}
///////////////////////////////////////////////////////////////////////
//
// An aligned table constains all columns in one hbase cell.
// To alter a column, we need to read each row, create a
// new row with the altered column and insert into the original table.
//
// Validation that altered col datatype is compatible with the
// original datatype has already been done before this method
// is called.
//
// Steps to alter a column from an aligned table:
//
// -- make a copy of the source aligned table using hbase copy
// -- truncate the source table
// -- Update metadata column definition with the new definition
// -- bulk load data from copied table into the source table
// -- recreate views, if existed, based on the new definition
// -- drop the copied temp table
//
// If an error happens after the source table has been truncated, then
// it will be restored from the copied table.
//
///////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::alignedFormatTableAlterColumnAttr
(
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
const NATable * naTable,
const NAString &altColName,
ElemDDLColDef *pColDef,
NABoolean ddlXns,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList)
{
Lng32 cliRC = 0;
const NAFileSet * naf = naTable->getClusteringIndex();
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ComUID comUID;
comUID.make_UID();
Int64 objUID = comUID.get_value();
char objUIDbuf[100];
NAString tempTable(naTable->getTableName().getQualifiedNameAsAnsiString());
tempTable += "_";
tempTable += str_ltoa(objUID, objUIDbuf);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
return -1;
ExeCliInterface cliInterface
(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int64 tableUID = naTable->objectUid().castToInt64();
const NAColumnArray &naColArr = naTable->getNAColumnArray();
const NAColumn * altNaCol = naColArr.getColumn(altColName);
Lng32 altColNum = altNaCol->getPosition();
char buf[4000];
NAString colFamily;
NAString colName;
Lng32 datatype, length, precision, scale, dt_start, dt_end,
nullable, upshifted;
ComColumnClass colClass;
ComColumnDefaultClass defaultClass;
NAString charset, defVal;
NAString heading;
ULng32 hbaseColFlags;
Int64 colFlags;
LobsStorage lobStorage;
NAString quotedDefVal;
NABoolean xnWasStartedHere = FALSE;
if (cloneSeabaseTable(naTable->getTableName().getQualifiedNameAsAnsiString(),
naTable->objectUid().castToInt64(),
tempTable,
naTable,
ehi, &cliInterface, TRUE))
{
cliRC = -1;
goto label_drop;
}
if (truncateHbaseTable(catalogNamePart, schemaNamePart, objectNamePart,
(NATable*)naTable, ehi))
{
cliRC = -1;
goto label_restore;
}
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
goto label_restore;
if (getColInfo(pColDef,
FALSE, // not a metadata, histogram or repository column
colFamily,
colName,
naTable->isSQLMXAlignedTable(),
datatype, length, precision, scale, dt_start, dt_end,
upshifted, nullable,
charset, colClass, defaultClass, defVal, heading, lobStorage,
hbaseColFlags, colFlags))
{
cliRC = -1;
processReturn();
goto label_restore;
}
if (NOT defVal.isNull())
{
ToQuotedString(quotedDefVal, defVal, FALSE);
}
str_sprintf(buf, "update %s.\"%s\".%s set (column_class, fs_data_type, sql_data_type, column_size, column_precision, column_scale, datetime_start_field, datetime_end_field, is_upshifted, nullable, character_set, default_class, default_value) = ('%s', %d, '%s', %d, %d, %d, %d, %d, '%s', %d, '%s', %d, '%s') where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
COM_ALTERED_USER_COLUMN_LIT,
datatype,
getAnsiTypeStrFromFSType(datatype),
length,
precision,
scale,
dt_start,
dt_end,
(upshifted ? "Y" : "N"),
nullable,
(char*)charset.data(),
(Lng32)defaultClass,
(quotedDefVal.isNull() ? "" : quotedDefVal.data()),
tableUID,
altColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_restore;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT, ddlXns, FALSE);
str_sprintf(buf, "upsert using load into %s select * from %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
tempTable.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
NAString reason;
reason = "Old data could not be updated using the altered column definition.";
// column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(altColName)
<< DgString0(reason);
goto label_restore;
}
if ((cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
ddlXns)) < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(altColName)
<< DgString0(reason);
goto label_restore;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
str_sprintf(buf, "drop table %s", tempTable.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_restore;
}
deallocEHI(ehi);
return 0;
label_restore:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT, FALSE, FALSE);
if (cloneSeabaseTable(tempTable, -1,
naTable->getTableName().getQualifiedNameAsAnsiString(),
naTable,
ehi, &cliInterface, FALSE))
{
cliRC = -1;
goto label_drop;
}
label_drop:
str_sprintf(buf, "drop table %s", tempTable.data());
Lng32 cliRC2 = cliInterface.executeImmediate(buf);
deallocEHI(ehi);
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
return (cliRC < 0 ? -1 : 0);
}
/////////////////////////////////////////////////////////////////////
// this method is called if alter could be done by metadata changes
// only and without affecting table data.
/////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::mdOnlyAlterColumnAttr(
const NAString &catalogNamePart, const NAString &schemaNamePart,
const NAString &objectNamePart,
const NATable * naTable, const NAColumn * naCol, NAType * newType,
StmtDDLAlterTableAlterColumnDatatype * alterColNode,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList)
{
Lng32 cliRC = 0;
ExeCliInterface cliInterface
(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int64 objUID = naTable->objectUid().castToInt64();
Lng32 colNumber = naCol->getPosition();
NABoolean xnWasStartedHere = FALSE;
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return -1;
char buf[4000];
str_sprintf(buf, "update %s.\"%s\".%s set column_size = %d, column_class = '%s' where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
newType->getNominalSize(),
COM_ALTERED_USER_COLUMN_LIT,
objUID,
colNumber);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
alterColNode->ddlXns());
if (cliRC < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(naCol->getColName().data())
<< DgString0(reason);
goto label_error;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
return 0;
label_error:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
return -1;
}
///////////////////////////////////////////////////////////////////////
//
// Steps to alter a column from an hbase format table:
//
// Validation that altered col datatype is compatible with the
// original datatype has already been done before this method
// is called.
//
// -- add a temp column based on the altered datatype
// -- update temp col with data from the original col
// -- update metadata column definition with the new col definition
// -- update original col with data from temp col
// -- recreate views, if existed, based on the new definition
// -- drop the temp col. Dependent views will be recreated during drop.
//
// If an error happens after the source table has been truncated, then
// it will be restored from the copied table.
//
///////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::hbaseFormatTableAlterColumnAttr(
const NAString &catalogNamePart, const NAString &schemaNamePart,
const NAString &objectNamePart,
const NATable * naTable, const NAColumn * naCol, NAType * newType,
StmtDDLAlterTableAlterColumnDatatype * alterColNode)
{
ExeCliInterface cliInterface
(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart,catalogNamePart);
Lng32 cliRC = 0;
Lng32 retcode = 0;
ComUID comUID;
comUID.make_UID();
Int64 objUID = comUID.get_value();
char objUIDbuf[100];
NAString tempCol(naCol->getColName());
tempCol += "_";
tempCol += str_ltoa(objUID, objUIDbuf);
char dispBuf[1000];
Lng32 ii = 0;
NABoolean identityCol;
ElemDDLColDef *pColDef = alterColNode->getColToAlter()->castToElemDDLColDef();
NAColumn *nac = NULL;
if (getNAColumnFromColDef(pColDef, nac))
return -1;
dispBuf[0] = 0;
if (cmpDisplayColumn(nac, (char*)tempCol.data(), newType, 3, NULL, dispBuf,
ii, FALSE, identityCol,
FALSE, FALSE, UINT_MAX, NULL))
return -1;
Int64 tableUID = naTable->objectUid().castToInt64();
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &altColName = naCol->getColName();
const NAColumn * altNaCol = nacolArr.getColumn(altColName);
Lng32 altColNum = altNaCol->getPosition();
char buf[4000];
str_sprintf(buf, "alter table %s add column %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
dispBuf);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return -1;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterColNode->ddlXns(), FALSE);
str_sprintf(buf, "update %s set %s = %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
tempCol.data(),
naCol->getColName().data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error1;
}
str_sprintf(buf, "delete from %s.\"%s\".%s where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
tableUID,
altColNum);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error1;
}
str_sprintf(buf, "insert into %s.\"%s\".%s select object_uid, '%s', %d, '%s', fs_data_type, sql_data_type, column_size, column_precision, column_scale, datetime_start_field, datetime_end_field, is_upshifted, column_flags, nullable, character_set, default_class, default_value, column_heading, '%s', '%s', direction, is_optional, flags from %s.\"%s\".%s where object_uid = %ld and column_number = (select column_number from %s.\"%s\".%s where object_uid = %ld and column_name = '%s')",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
naCol->getColName().data(),
altColNum,
COM_ALTERED_USER_COLUMN_LIT,
altNaCol->getHbaseColFam().data(),
altNaCol->getHbaseColQual().data(),
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
tableUID,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
tableUID,
tempCol.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error1;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterColNode->ddlXns(), FALSE);
str_sprintf(buf, "update %s set %s = %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
naCol->getColName().data(),
tempCol.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
NAString reason;
reason = "Old data could not be updated into the new column definition.";
// column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(naCol->getColName())
<< DgString0(reason);
processReturn();
goto label_error1;
}
str_sprintf(buf, "alter table %s drop column %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
tempCol.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return -1;
}
return 0;
label_error1:
str_sprintf(buf, "alter table %s drop column %s",
naTable->getTableName().getQualifiedNameAsAnsiString().data(),
tempCol.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
processReturn();
return -1;
}
return -1;
}
void CmpSeabaseDDL::alterSeabaseTableAlterColumnDatatype(
StmtDDLAlterTableAlterColumnDatatype * alterColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = alterColNode->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterColNode->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
ElemDDLColDef *pColDef = alterColNode->getColToAlter()->castToElemDDLColDef();
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = pColDef->getColumnName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
// column doesnt exist. Error.
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
processReturn();
return;
}
const NAType * currType = nacol->getType();
NAType * newType = pColDef->getColumnDataType();
// If column is a LOB column , error
if ((currType->getFSDatatype() == REC_BLOB) || (currType->getFSDatatype() == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
const NAFileSet * naFS = naTable->getClusteringIndex();
const NAColumnArray &naKeyColArr = naFS->getIndexKeyColumns();
if (naKeyColArr.getColumn(colName))
{
// key column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1420)
<< DgColumnName(colName);
processReturn();
return;
}
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const NAColumnArray &naIndexColArr = naFS->getAllColumns();
if (naIndexColArr.getColumn(colName))
{
// secondary index column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1421)
<< DgColumnName(colName)
<< DgTableName(naFS->getExtFileSetName());
processReturn();
return;
}
} // for
} // secondary indexes present
if ((NOT currType->isCompatible(*newType)) &&
(NOT ((currType->getTypeQualifier() == NA_CHARACTER_TYPE) &&
(newType->getTypeQualifier() == NA_CHARACTER_TYPE))))
{
NAString reason = "Old and New datatypes must be compatible.";
// column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
// Column that can be altered by updating metadata only
// must meet these conditions:
// -- old and new column datatype must be VARCHAR
// -- old and new datatype must have the same nullable attr
// -- new col length must be greater than or equal to old length
// -- old and new character sets must be the same
NABoolean mdAlterOnly = FALSE;
if ((DFS2REC::isSQLVarChar(currType->getFSDatatype())) &&
(DFS2REC::isSQLVarChar(newType->getFSDatatype())) &&
(currType->getFSDatatype() == newType->getFSDatatype()) &&
(currType->supportsSQLnull() == newType->supportsSQLnull()) &&
(currType->getNominalSize() <= newType->getNominalSize()) &&
(((CharType*)currType)->getCharSet() == ((CharType*)newType)->getCharSet()))
mdAlterOnly = TRUE;
if ((NOT mdAlterOnly) &&
(CmpCommon::getDefault(TRAF_ALTER_COL_ATTRS) == DF_OFF))
{
NAString reason;
if (NOT ((DFS2REC::isSQLVarChar(currType->getFSDatatype())) &&
(DFS2REC::isSQLVarChar(newType->getFSDatatype()))))
reason = "Old and New datatypes must be VARCHAR.";
else if (currType->getFSDatatype() != newType->getFSDatatype())
reason = "Old and New datatypes must be the same.";
else if (((CharType*)currType)->getCharSet() != ((CharType*)newType)->getCharSet())
reason = "Old and New character sets must be the same.";
else if (currType->getNominalSize() > newType->getNominalSize())
reason = "New length must be greater than or equal to old length.";
else if (currType->supportsSQLnull() != newType->supportsSQLnull())
reason = "Old and New nullability must be the same.";
// column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
// this operation cannot be done if a xn is already in progress.
if ((NOT mdAlterOnly) && (xnInProgress(&cliInterface)))
{
*CmpCommon::diags() << DgSqlCode(-20125)
<< DgString0("This ALTER");
processReturn();
return;
}
Int64 objUID = naTable->objectUid().castToInt64();
// if there are views on the table, save the definition and drop
// the views.
// At the end of alter, views will be recreated. If an error happens
// during view recreation, alter will fail.
NAList<NAString> viewNameList(STMTHEAP);
NAList<NAString> viewDefnList(STMTHEAP);
if (saveAndDropUsingViews(objUID, &cliInterface, viewNameList, viewDefnList))
{
NAString reason = "Error occurred while saving views.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
if (mdAlterOnly)
{
if (mdOnlyAlterColumnAttr
(catalogNamePart, schemaNamePart, objectNamePart,
naTable, nacol, newType, alterColNode,
viewNameList, viewDefnList))
{
cliRC = -1;
goto label_error;
}
}
else if (naTable->isSQLMXAlignedTable())
{
ElemDDLColDef *pColDef =
alterColNode->getColToAlter()->castToElemDDLColDef();
if (alignedFormatTableAlterColumnAttr
(catalogNamePart, schemaNamePart, objectNamePart,
naTable,
colName,
pColDef,
alterColNode->ddlXns(),
viewNameList, viewDefnList))
{
cliRC = -1;
goto label_error;
}
}
else if (hbaseFormatTableAlterColumnAttr
(catalogNamePart, schemaNamePart, objectNamePart,
naTable, nacol, newType, alterColNode))
{
cliRC = -1;
goto label_error;
}
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID);
if (cliRC < 0)
{
goto label_error;
}
label_return:
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterColNode->ddlXns(), FALSE);
processReturn();
return;
label_error:
if ((cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
alterColNode->ddlXns())) < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterColNode->ddlXns(), FALSE);
processReturn();
return;
}
/////////////////////////////////////////////////////////////////////
// this method renames an existing column to the specified new name.
//
// A column cannot be renamed if it is a system, salt, division by,
// computed or a lob column.
//
// If any index exists on the renamed column, then the index column
// is also renamed.
//
// If views exist on the table, they are dropped and recreated after
// rename. If recreation runs into an error, then alter fails.
//
///////////////////////////////////////////////////////////////////
void CmpSeabaseDDL::alterSeabaseTableAlterColumnRename(
StmtDDLAlterTableAlterColumnRename * alterColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterColNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
if (retcode == 0) // does not exist
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_WRONG_TYPE)
<< DgString0(extTableName);
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->useCache();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
// Make sure user has the privilege to perform the alter column
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return;
}
// return an error if trying to alter a column from a volatile table
if (naTable->isVolatileTable())
{
*CmpCommon::diags() << DgSqlCode(-CAT_REGULAR_OPERATION_ON_VOLATILE_OBJECT);
processReturn ();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = alterColNode->getColumnName();
const NAString &renamedColName = alterColNode->getRenamedColumnName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
// column doesnt exist. Error.
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
processReturn();
return;
}
if ((CmpCommon::getDefault(TRAF_ALLOW_RESERVED_COLNAMES) == DF_OFF) &&
(ComTrafReservedColName(renamedColName)))
{
NAString reason = "Renamed column " + renamedColName + " is reserved for internal system usage.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
if (nacol->isComputedColumn() || nacol->isSystemColumn())
{
NAString reason = "Cannot rename system or computed column.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
const NAColumn * renNacol = nacolArr.getColumn(renamedColName);
if (renNacol)
{
// column already exist. Error.
NAString reason = "Renamed column " + renamedColName + " already exist in the table.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
const NAType * currType = nacol->getType();
// If column is a LOB column , error
if ((currType->getFSDatatype() == REC_BLOB) || (currType->getFSDatatype() == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
const NAFileSet * naFS = naTable->getClusteringIndex();
const NAColumnArray &naKeyColArr = naFS->getIndexKeyColumns();
NABoolean isPkeyCol = FALSE;
if (naKeyColArr.getColumn(colName))
{
isPkeyCol = TRUE;
}
Int64 objUID = naTable->objectUid().castToInt64();
NAList<NAString> viewNameList(STMTHEAP);
NAList<NAString> viewDefnList(STMTHEAP);
if (saveAndDropUsingViews(objUID, &cliInterface, viewNameList, viewDefnList))
{
NAString reason = "Error occurred while saving dependent views.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
Lng32 colNumber = nacol->getPosition();
char buf[4000];
str_sprintf(buf, "update %s.\"%s\".%s set column_name = '%s', column_class = '%s' where object_uid = %ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
renamedColName.data(),
COM_ALTERED_USER_COLUMN_LIT,
objUID,
colNumber);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
if (isPkeyCol)
{
Lng32 divColPos = -1;
if ((naTable->hasDivisioningColumn(&divColPos)))
{
NAString reason = "Not supported with DIVISION BY clause.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
Lng32 saltColPos = -1;
if ((naTable->hasSaltedColumn(&saltColPos)))
{
NAString saltText;
cliRC = getTextFromMD(&cliInterface, objUID, COM_COMPUTED_COL_TEXT,
saltColPos, saltText);
if (cliRC < 0)
{
processReturn();
return;
}
// replace col reference with renamed col
NAString quotedColName = "\"" + colName + "\"";
NAString renamedQuotedColName = "\"" + renamedColName + "\"";
saltText = replaceAll(saltText, quotedColName, renamedQuotedColName);
cliRC = updateTextTable(&cliInterface, objUID, COM_COMPUTED_COL_TEXT,
saltColPos, saltText, NULL, -1, TRUE);
if (cliRC < 0)
{
processReturn();
return;
}
} // saltCol
} // pkeyCol being renamed
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
str_sprintf(buf, "update %s.\"%s\".%s set column_name = '%s' || '@' where object_uid = %ld and column_name = '%s' || '@'",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
renamedColName.data(),
naFS->getIndexUID(),
colName.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
str_sprintf(buf, "update %s.\"%s\".%s set column_name = '%s' where object_uid = %ld and column_name = '%s'",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
renamedColName.data(),
naFS->getIndexUID(),
colName.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
} // for
} // secondary indexes present
cliRC = recreateUsingViews(&cliInterface, viewNameList, viewDefnList,
alterColNode->ddlXns());
if (cliRC < 0)
{
NAString reason = "Error occurred while recreating views due to dependency on older column definition. Drop dependent views before doing the alter.";
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, objUID);
if (cliRC < 0)
{
return;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterColNode->ddlXns(), FALSE);
processReturn();
return;
}
void CmpSeabaseDDL::alterSeabaseTableAddPKeyConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
NAString tabName = alterAddConstraint->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterAddConstraint->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
ElemDDLColRefArray &keyColumnArray = alterAddConstraint->getConstraint()->castToElemDDLConstraintPK()->getKeyColumnArray();
NAList<NAString> keyColList(HEAP, keyColumnArray.entries());
NAString pkeyStr("(");
for (Int32 j = 0; j < keyColumnArray.entries(); j++)
{
const NAString &colName = keyColumnArray[j]->getColumnName();
keyColList.insert(colName);
pkeyStr += colName;
if (j < (keyColumnArray.entries() - 1))
pkeyStr += ", ";
}
pkeyStr += ")";
if (constraintErrorChecks(&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintUnique(),
naTable,
COM_UNIQUE_CONSTRAINT, //TRUE,
keyColList))
{
return;
}
// if table already has a primary key, return error.
if ((naTable->getClusteringIndex()) &&
(NOT naTable->getClusteringIndex()->hasOnlySyskey()))
{
*CmpCommon::diags()
<< DgSqlCode(-1256)
<< DgString0(extTableName);
processReturn();
return;
}
// update unique key constraint info
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
// if table doesnt have a user defined primary key, is empty and doesn't have any
// dependent objects (index, views, triggers, RI, etc), then drop it and recreate it with
// this new primary key.
// Do this optimization in mode_special_4 only.
Lng32 len = 0;
Lng32 rowCount = 0;
NABoolean ms4 = FALSE;
if (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON)
{
ms4 = TRUE;
char query[2000];
str_sprintf(query, "select [any 1] cast(1 as int not null) from \"%s\".\"%s\".\"%s\" for read committed access",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data());
cliRC = cliInterface.executeImmediate(query, (char*)&rowCount, &len, FALSE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
}
// if the table is not empty, or there are dependent objects/constraints,
// or the table already has a pkey/store by, then create a unique constraint.
NABoolean isStoreBy = FALSE;
Int32 nonSystemKeyCols = 0;
if (naTable->getClusteringIndex())
{
NAFileSet * naf = naTable->getClusteringIndex();
for (Lng32 i = 0; i < naf->getIndexKeyColumns().entries(); i++)
{
NAColumn * nac = naf->getIndexKeyColumns()[i];
if (NOT nac->isSystemColumn())
nonSystemKeyCols++;
else if (nac->isSyskeyColumn())
isStoreBy = TRUE;
} // for
if (nonSystemKeyCols == 0)
isStoreBy = FALSE;
} // if
if ((rowCount > 0) || // not empty
(NOT ms4) || // not mode_special_4
(naTable->hasSecondaryIndexes()) || // user indexes
(NOT naTable->getClusteringIndex()->hasSyskey()) || // user defined pkey
(isStoreBy) || // user defined store by
(naTable->getUniqueConstraints().entries() > 0) || // unique constraints
(naTable->getRefConstraints().entries() > 0) || // ref constraints
(naTable->getCheckConstraints().entries() > 0))
{
// cannot create clustered primary key constraint.
// create a unique constraint instead.
NAString cliQuery;
cliQuery = "alter table " + extTableName + " add constraint " + uniqueStr
+ " unique " + pkeyStr + ";";
cliRC = cliInterface.executeImmediate((char*)cliQuery.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterAddConstraint->ddlXns(), FALSE);
}
return;
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
// empty table. Drop and recreate it with the new primary key.
char * buf = NULL;
ULng32 buflen = 0;
retcode = CmpDescribeSeabaseTable(cn, 3/*createlike*/, buf, buflen, STMTHEAP,
pkeyStr.data(), TRUE);
if (retcode)
return;
NAString cliQuery;
// drop this table.
cliQuery = "drop table ";
cliQuery += extTableName;
cliQuery += " no check;";
cliRC = cliInterface.executeImmediate((char*)cliQuery.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
char cqdbuf[200];
str_sprintf(cqdbuf, "cqd traf_create_table_with_uid '%ld';",
tableUID);
cliRC = cliInterface.executeImmediate(cqdbuf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
// and recreate it with the new primary key.
cliQuery = "create table ";
cliQuery += extTableName;
cliQuery += " ";
NABoolean done = FALSE;
Lng32 curPos = 0;
while (NOT done)
{
short len = *(short*)&buf[curPos];
NAString frag(&buf[curPos+sizeof(short)],
len - ((buf[curPos+len-1]== '\n') ? 1 : 0));
cliQuery += frag;
curPos += ((((len+sizeof(short))-1)/8)+1)*8;
if (curPos >= buflen)
done = TRUE;
}
cliRC = cliInterface.executeImmediate((char*)cliQuery.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
}
str_sprintf(cqdbuf, "cqd traf_create_table_with_uid '' ;");
cliInterface.executeImmediate(cqdbuf);
if (cliRC < 0)
{
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, tableUID))
{
processReturn();
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterAddConstraint->ddlXns(), FALSE);
}
return;
}
void CmpSeabaseDDL::alterSeabaseTableAddUniqueConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
NAString tabName = alterAddConstraint->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterAddConstraint->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
ElemDDLColRefArray &keyColumnArray = alterAddConstraint->getConstraint()->castToElemDDLConstraintUnique()->getKeyColumnArray();
NAList<NAString> keyColList(HEAP, keyColumnArray.entries());
NAList<NAString> keyColOrderList(HEAP, keyColumnArray.entries());
for (Int32 j = 0; j < keyColumnArray.entries(); j++)
{
const NAString &colName = keyColumnArray[j]->getColumnName();
keyColList.insert(colName);
if (keyColumnArray[j]->getColumnOrdering() == COM_DESCENDING_ORDER)
keyColOrderList.insert("DESC");
else
keyColOrderList.insert("ASC");
}
if (constraintErrorChecks(
&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintUnique(),
naTable,
COM_UNIQUE_CONSTRAINT,
keyColList))
{
return;
}
// update unique key constraint info
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComUID comUID;
comUID.make_UID();
Int64 uniqueUID = comUID.get_value();
if (updateConstraintMD(keyColList, keyColOrderList, uniqueStr, tableUID, uniqueUID,
naTable, COM_UNIQUE_CONSTRAINT, TRUE, &cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1043)
<< DgTableName(uniqueStr);
return;
}
NAList<NAString> emptyKeyColList(STMTHEAP);
if (updateIndexInfo(keyColList,
keyColOrderList,
emptyKeyColList,
uniqueStr,
uniqueUID,
catalogNamePart, schemaNamePart, objectNamePart,
naTable,
TRUE,
(CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_ON),
TRUE,
FALSE,
&cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, tableUID))
{
processReturn();
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterAddConstraint->ddlXns(), FALSE);
}
return;
}
// returns 1 if referenced table refdTable has a dependency on the
// original referencing table origRingTable.
// return 0, if it does not.
// return -1, if error.
short CmpSeabaseDDL::isCircularDependent(CorrName &ringTable,
CorrName &refdTable,
CorrName &origRingTable,
BindWA *bindWA)
{
// get natable for the referenced table.
NATable *naTable = bindWA->getNATable(refdTable);
if (naTable == NULL || bindWA->errStatus())
{
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(naTable->getTableName().getQualifiedNameAsString());
processReturn();
return -1;
}
// find all the tables the refdTable depends on.
const AbstractRIConstraintList &refList = naTable->getRefConstraints();
for (Int32 i = 0; i < refList.entries(); i++)
{
AbstractRIConstraint *ariConstr = refList[i];
if (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT)
continue;
RefConstraint * refConstr = (RefConstraint*)ariConstr;
if (refConstr->selfRef())
continue;
CorrName cn(refConstr->getUniqueConstraintReferencedByMe().getTableName());
if (cn == origRingTable)
{
return 1; // dependency exists
}
short rc = isCircularDependent(cn, cn,
origRingTable, bindWA);
if (rc)
return rc;
} // for
return 0;
}
void CmpSeabaseDDL::alterSeabaseTableAddRIConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterAddConstraint->getTableName();
ComObjectName referencingTableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
referencingTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = referencingTableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = referencingTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = referencingTableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = referencingTableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(referencingTableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(referencingTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
referencingTableName.getSchemaNamePart().getInternalName(),
referencingTableName.getCatalogNamePart().getInternalName());
NATable *ringNaTable = bindWA.getNATable(cn);
if (ringNaTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the add RI constraint
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
ringNaTable->getOwner(),ringNaTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
if (ringNaTable->isHbaseMapTable())
{
// not supported
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("This alter option cannot be used for an HBase mapped table.");
deallocEHI(ehi);
processReturn();
return;
}
const ElemDDLConstraintRI *constraintNode =
alterAddConstraint->getConstraint()->castToElemDDLConstraintRI();
ComObjectName referencedTableName( constraintNode->getReferencedTableName()
, COM_TABLE_NAME);
referencedTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
if ((isSeabaseReservedSchema(referencedTableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
referencedTableName.getCatalogNamePart().getInternalName(),
referencedTableName.getSchemaNamePart().getInternalName(),
referencedTableName.getObjectNamePart().getInternalName(),
COM_BASE_TABLE_OBJECT,
TRUE);
if (retcode < 0)
{
processReturn();
return;
}
CorrName cn2(referencedTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
referencedTableName.getSchemaNamePart().getInternalName(),
referencedTableName.getCatalogNamePart().getInternalName());
NATable *refdNaTable = bindWA.getNATable(cn2);
if (refdNaTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn2.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
if (refdNaTable->getViewText())
{
*CmpCommon::diags()
<< DgSqlCode(-1127)
<< DgTableName(cn2.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
if (refdNaTable->isHbaseMapTable())
{
// not supported
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("This alter option cannot be used for an HBase mapped table.");
deallocEHI(ehi);
processReturn();
return;
}
// If the referenced and referencing tables are the same,
// reject the request. At this time, we do not allow self
// referencing constraints.
if ((CmpCommon::getDefault(TRAF_ALLOW_SELF_REF_CONSTR) == DF_OFF) &&
(referencingTableName == referencedTableName))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SELF_REFERENCING_CONSTRAINT);
processReturn();
return;
}
// User must have REFERENCES privilege on the referenced table
// First check for REFERENCES at the object level (column checks happen
// later)
NABoolean noObjPriv = FALSE;
if (isAuthorizationEnabled())
{
PrivMgrUserPrivs* privs = refdNaTable->getPrivInfo();
if (privs == NULL)
{
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_RETRIEVE_PRIVS);
deallocEHI(ehi);
processReturn();
return;
}
if (!ComUser::isRootUserID() && !privs->hasReferencePriv())
noObjPriv = TRUE;
}
ElemDDLColNameArray &ringCols = alterAddConstraint->getConstraint()->castToElemDDLConstraintRI()->getReferencingColumns();
NAList<NAString> ringKeyColList(HEAP, ringCols.entries());
NAList<NAString> ringKeyColOrderList(HEAP, ringCols.entries());
NAString ringColListForValidation;
NAString ringNullList;
for (Int32 j = 0; j < ringCols.entries(); j++)
{
const NAString &colName = ringCols[j]->getColumnName();
ringKeyColList.insert(colName);
ringKeyColOrderList.insert("ASC");
ringColListForValidation += "\"";
ringColListForValidation += colName;
ringColListForValidation += "\"";
if (j < (ringCols.entries() - 1))
ringColListForValidation += ", ";
ringNullList += "and ";
ringNullList += "\"";
ringNullList += colName;
ringNullList += "\"";
ringNullList += " is not null ";
}
if (constraintErrorChecks(&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintRI(),
ringNaTable,
COM_FOREIGN_KEY_CONSTRAINT, //FALSE, // referencing constr
ringKeyColList))
{
return;
}
const NAString &addConstrName = alterAddConstraint->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
// Compare the referenced column list against the primary and unique
// constraint defined for the referenced table. The referenced
// column list must match one of these constraints. Note that if there
// was no referenced column list specified, the primary key is used
// and a match has automatically been found.
const ElemDDLColNameArray &referencedColNode =
constraintNode->getReferencedColumns();
NAList<NAString> refdKeyColList(HEAP, referencedColNode.entries());
NAString refdColListForValidation;
for (Int32 j = 0; j < referencedColNode.entries(); j++)
{
const NAString &colName = referencedColNode[j]->getColumnName();
refdKeyColList.insert(colName);
refdColListForValidation += "\"";
refdColListForValidation += colName;
refdColListForValidation += "\"";
if (j < (referencedColNode.entries() - 1))
refdColListForValidation += ", ";
}
if (referencedColNode.entries() == 0)
{
NAFileSet * naf = refdNaTable->getClusteringIndex();
for (Lng32 i = 0; i < naf->getIndexKeyColumns().entries(); i++)
{
NAColumn * nac = naf->getIndexKeyColumns()[i];
if (nac->isComputedColumnAlways() &&
nac->isSystemColumn())
// always computed system columns in the key are redundant,
// don't include them (also don't include them in the DDL)
continue;
const NAString &colName = nac->getColName();
refdKeyColList.insert(colName);
refdColListForValidation += "\"";
refdColListForValidation += nac->getColName();
refdColListForValidation += "\"";
if (i < (naf->getIndexKeyColumns().entries() - 1))
refdColListForValidation += ", ";
}
}
if (ringKeyColList.entries() != refdKeyColList.entries())
{
*CmpCommon::diags()
<< DgSqlCode(-1046)
<< DgConstraintName(addConstrName);
processReturn();
return;
}
const NAColumnArray &ringNACarr = ringNaTable->getNAColumnArray();
const NAColumnArray &refdNACarr = refdNaTable->getNAColumnArray();
for (Int32 i = 0; i < ringKeyColList.entries(); i++)
{
const NAString &ringColName = ringKeyColList[i];
const NAString &refdColName = refdKeyColList[i];
const NAColumn * ringNAC = ringNACarr.getColumn(ringColName);
const NAColumn * refdNAC = refdNACarr.getColumn(refdColName);
if (! refdNAC)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName(refdColName);
processReturn();
return;
}
if (NOT (ringNAC->getType()->equalIgnoreNull(*refdNAC->getType())))
{
*CmpCommon::diags()
<< DgSqlCode(-1046)
<< DgConstraintName(addConstrName);
processReturn();
return;
}
// If the user/role does not have REFERENCES privilege at the object
// level, check to see if the user/role has been granted the privilege
// on all affected columns
if (noObjPriv)
{
PrivMgrUserPrivs* privs = refdNaTable->getPrivInfo();
if (!privs->hasColReferencePriv(refdNAC->getPosition()))
{
*CmpCommon::diags() << DgSqlCode(-4481)
<< DgString0("REFERENCES")
<< DgString1(referencedTableName.getObjectNamePart().getExternalName().data());
processReturn();
return;
}
}
}
// method getCorrespondingConstraint expects an empty input list if there are no
// user specified columns. Clear the refdKeyColList before calling it.
if (referencedColNode.entries() == 0)
{
refdKeyColList.clear();
}
NAString constrName;
NABoolean isPkey = FALSE;
NAList<int> reorderList(HEAP);
// Find a uniqueness constraint on the referenced table that matches
// the referenced column list (not necessarily in the original order
// of columns). Also find out how to reorder the column lists to
// match the found uniqueness constraint. This is the order in
// which we'll add the columns to the metadata (KEYS table). Note
// that SHOWDDL may therefore show the foreign key columns in a
// different order. This is a limitation of the current way we
// store RI constraints in the metadata.
if (NOT refdNaTable->getCorrespondingConstraint(refdKeyColList,
TRUE, // unique constraint
&constrName,
&isPkey,
&reorderList))
{
*CmpCommon::diags() << DgSqlCode(-CAT_REFERENCED_CONSTRAINT_DOES_NOT_EXIST)
<< DgConstraintName(addConstrName);
return;
}
if (reorderList.entries() > 0)
{
CollIndex numEntries = ringKeyColList.entries();
CMPASSERT(ringKeyColOrderList.entries() == numEntries &&
refdKeyColList.entries() == numEntries &&
reorderList.entries() == numEntries);
// re-order referencing and referenced key column lists to match
// the order of the uniqueness constraint in the referenced table
NAArray<NAString> ringTempKeyColArray(HEAP, numEntries);
NAArray<NAString> ringTempKeyColOrderArray(HEAP, numEntries);
NAArray<NAString> refdTempKeyColArray(HEAP, numEntries);
// copy the lists into temp arrays in the correct order
for (CollIndex i=0; i<numEntries; i++)
{
CollIndex newEntry = static_cast<CollIndex>(reorderList[i]);
ringTempKeyColArray.insertAt(newEntry, ringKeyColList[i]);
ringTempKeyColOrderArray.insertAt(newEntry, ringKeyColOrderList[i]);
refdTempKeyColArray.insertAt(newEntry, refdKeyColList[i]);
}
// copy back into the lists (this will assert if we have any holes in the array)
for (CollIndex j=0; j<numEntries; j++)
{
ringKeyColList[j] = ringTempKeyColArray[j];
ringKeyColOrderList[j] = ringTempKeyColOrderArray[j];
refdKeyColList[j] = refdTempKeyColArray[j];
}
} // reorder the lists if needed
// check for circular RI dependencies.
// check if referenced table cn2 refers back to the referencing table cn.
retcode = isCircularDependent(cn, cn2, cn, &bindWA);
if (retcode == 1) // dependency exists
{
*CmpCommon::diags() << DgSqlCode(-CAT_RI_CIRCULAR_DEPENDENCY)
<< DgConstraintName(addConstrName)
<< DgTableName(cn.getExposedNameAsAnsiString());
return;
}
else if (retcode < 0)
{
// error. Diags area has been populated
return;
}
if ((CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_OFF) &&
(constraintNode->isEnforced()))
{
// validate data for RI constraint.
// generate a "select" statement to validate the constraint. For example:
// SELECT count(*) FROM T1
// WHERE NOT ((T1C1,T1C2) IN (SELECT T2C1,T2C2 FROM T2))
// OR T1C1 IS NULL OR T1C2 IS NULL;
// This statement returns > 0 if there exist data violating the constraint.
char * validQry =
new(STMTHEAP) char[ringColListForValidation.length() +
refdColListForValidation.length() +
ringNullList.length() +
2000];
str_sprintf(validQry, "select count(*) from \"%s\".\"%s\".\"%s\" where not ((%s) in (select %s from \"%s\".\"%s\".\"%s\")) %s;",
referencingTableName.getCatalogNamePart().getInternalName().data(),
referencingTableName.getSchemaNamePart().getInternalName().data(),
referencingTableName.getObjectNamePart().getInternalName().data(),
ringColListForValidation.data(),
refdColListForValidation.data(),
referencedTableName.getCatalogNamePart().getInternalName().data(),
referencedTableName.getSchemaNamePart().getInternalName().data(),
referencedTableName.getObjectNamePart().getInternalName().data(),
ringNullList.data());
Lng32 len = 0;
Int64 rowCount = 0;
cliRC = cliInterface.executeImmediate(validQry, (char*)&rowCount, &len, FALSE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
if (rowCount > 0)
{
*CmpCommon::diags() << DgSqlCode(-1143)
<< DgConstraintName(addConstrName)
<< DgTableName(referencingTableName.getObjectNamePart().getInternalName().data())
<< DgString0(referencedTableName.getObjectNamePart().getInternalName().data())
<< DgString1(validQry);
return;
}
}
ComObjectName refdConstrName(constrName);
refdConstrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString refdCatName = refdConstrName.getCatalogNamePartAsAnsiString();
const NAString refdSchName = refdConstrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString refdObjName = refdConstrName.getObjectNamePartAsAnsiString(TRUE);
Int64 refdConstrUID =
getObjectUID(&cliInterface,
refdCatName.data(), refdSchName.data(), refdObjName.data(),
(isPkey ? COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT :
COM_UNIQUE_CONSTRAINT_OBJECT_LIT));
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComUID comUID;
comUID.make_UID();
Int64 ringConstrUID = comUID.get_value();
if (updateConstraintMD(ringKeyColList, ringKeyColOrderList, uniqueStr, tableUID, ringConstrUID,
ringNaTable, COM_FOREIGN_KEY_CONSTRAINT,
constraintNode->isEnforced(), &cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateRIConstraintMD(ringConstrUID, refdConstrUID,
&cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateIndexInfo(ringKeyColList,
ringKeyColOrderList,
refdKeyColList,
uniqueStr,
ringConstrUID,
catalogNamePart, schemaNamePart, objectNamePart,
ringNaTable,
FALSE,
(CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_ON),
constraintNode->isEnforced(),
TRUE, // because of the way the data is recorded in the
// metadata, the indexes of referencing and referenced
// tables need to have their columns in the same
// sequence (differences in ASC/DESC are ok)
&cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (NOT constraintNode->isEnforced())
{
*CmpCommon::diags()
<< DgSqlCode(1313)
<< DgString0(addConstrName);
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, tableUID))
{
processReturn();
deallocEHI(ehi);
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterAddConstraint->ddlXns(), FALSE);
}
// remove natable for the table being referenced
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn2,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterAddConstraint->ddlXns(), FALSE);
// regenerate and store packed descriptor in metadata for referenced table.
if (updateObjectRedefTime
(&cliInterface,
referencedTableName.getCatalogNamePart().getInternalName(),
referencedTableName.getSchemaNamePart().getInternalName(),
referencedTableName.getObjectNamePart().getInternalName(),
COM_BASE_TABLE_OBJECT_LIT, -1,
refdNaTable->objectUid().castToInt64()))
{
processReturn();
deallocEHI(ehi);
return;
}
return;
}
short CmpSeabaseDDL::getCheckConstraintText(StmtDDLAddConstraintCheck *addCheckNode,
NAString &qualifiedText)
{
// ComString qualifiedText;
const ParNameLocList &nameLocList = addCheckNode->getNameLocList();
const ParNameLoc *pNameLoc = NULL;
const char *pInputStr = nameLocList.getInputStringPtr();
StringPos inputStrPos = addCheckNode->getStartPosition();
// CharInfo::CharSet mapCS = (CharInfo::CharSet) SqlParser_ISO_MAPPING;
for (size_t x = 0; x < nameLocList.entries(); x++)
{
pNameLoc = &nameLocList[x];
const NAString &nameExpanded = pNameLoc->getExpandedName(FALSE/*no assert*/);
size_t nameAsIs = 0;
size_t nameLenInBytes = 0;
//
// When the character set of the input string is a variable-length/width
// multi-byte characters set, the value returned by getNameLength()
// may not be numerically equal to the number of bytes in the original
// input string that we need to skip. So, we get the character
// conversion routines to tell us how many bytes we need to skip.
//
enum cnv_charset eCnvCS = convertCharsetEnum(nameLocList.getInputStringCharSet());
const char *str_to_test = (const char *) &pInputStr[pNameLoc->getNamePosition()];
const int max_bytes2cnv = addCheckNode->getEndPosition()
- pNameLoc->getNamePosition() + 1;
const char *tmp_out_bufr = new (STMTHEAP) char[max_bytes2cnv * 4 + 10 /* Ensure big enough! */ ];
char * p1stUnstranslatedChar = NULL;
int cnvErrStatus = LocaleToUTF16(
cnv_version1 // in - const enum cnv_version version
, str_to_test // in - const char *in_bufr
, max_bytes2cnv // in - const int in_len
, tmp_out_bufr // out - const char *out_bufr
, max_bytes2cnv * 4 // in - const int out_len
, eCnvCS // in - enum cnv_charset charset
, p1stUnstranslatedChar // out - char * & first_untranslated_char
, NULL // out - unsigned int *output_data_len_p
, 0 // in - const int cnv_flags
, (int)FALSE // in - const int addNullAtEnd_flag
, NULL // out - unsigned int * translated_char_cnt_p
, pNameLoc->getNameLength() // in - unsigned int max_chars_to_convert
);
// NOTE: No errors should be possible -- string has been converted before.
NADELETEBASIC (tmp_out_bufr, STMTHEAP);
nameLenInBytes = p1stUnstranslatedChar - str_to_test;
// If name not expanded, then use the original name as is
if (nameExpanded.isNull())
nameAsIs = nameLenInBytes;
// Copy from (last position in) input string up to current name
qualifiedText += ComString(&pInputStr[inputStrPos],
pNameLoc->getNamePosition() - inputStrPos +
nameAsIs);
if (NOT nameAsIs) // original name to be replaced with expanded
{
size_t namePos = pNameLoc->getNamePosition();
size_t nameLen = pNameLoc->getNameLength();
// Solution 10-080506-3000
// For description and explanation of the fix, please read the
// comments in method CatExecCreateView::buildViewText() in
// module CatExecCreateView.cpp
// Example: CREATE TABLE T ("c1" INT NOT NULL PRIMARY KEY,
// C2 INT CHECK (C2 BETWEEN 0 AND"c1")) NO PARTITION;
if ( pInputStr[namePos] EQU '"'
AND nameExpanded.data()[0] NEQ '"'
AND namePos > 1
AND ( pInputStr[namePos - 1] EQU '_' OR
isAlNumIsoMapCS((unsigned char)pInputStr[namePos - 1]) )
)
{
// insert a blank separator to avoid syntax error
// WITHOUT FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_788388997_8627
// CHECK (CAT.SCH.T.C2 BETWEEN 0 ANDCAT.SCH.T."c1") DROPPABLE ;
// ... ^^^^^^
qualifiedText += " "; // the FIX
// WITH FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_788388997_8627
// CHECK (CAT.SCH.T.C2 BETWEEN 0 AND CAT.SCH.T."c1") DROPPABLE ;
// ... ^^^
}
qualifiedText += nameExpanded;
// Problem reported in solution 10-080506-3000
// Example: CREATE TABLE T (C1 INT NOT NULL PRIMARY KEY,
// C2 INT CHECK ("C2"IN(1,2,3))) NO PARTITION;
if ( pInputStr[namePos + nameLen - 1] EQU '"'
AND nameExpanded.data()[nameExpanded.length() - 1] NEQ '"'
AND pInputStr[namePos + nameLen] NEQ '\0'
AND ( pInputStr[namePos + nameLen] EQU '_' OR
isAlNumIsoMapCS((unsigned char)pInputStr[namePos + nameLen]) )
)
{
// insert a blank separator to avoid syntax error
// WITHOUT FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_654532688_9627
// CHECK (CAT.SCH.T.C2IN (1, 2, 3)) DROPPABLE ;
// ... ^^^^
qualifiedText += " "; // the FIX
// WITH FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_654532688_9627
// CHECK (CAT.SCH.T.C2 IN (1, 2, 3)) DROPPABLE ;
// ... ^^^
}
} // if (NOT nameAsIs)
inputStrPos = pNameLoc->getNamePosition() + nameLenInBytes;
} // for
// CAT_ASSERT(addCheckNode->getEndPosition() >= inputStrPos);
qualifiedText += ComString(&pInputStr[inputStrPos],
addCheckNode->getEndPosition() - inputStrPos + 1);
PrettifySqlText(qualifiedText, NULL);
return 0;
}
// nonstatic method, calling two member functions
short CmpSeabaseDDL::getTextFromMD(
ExeCliInterface * cliInterface,
Int64 textUID,
ComTextType textType,
Lng32 textSubID,
NAString &outText,
NABoolean binaryData)
{
short retcode = getTextFromMD(getSystemCatalog(),
cliInterface,
textUID,
textType,
textSubID,
outText,
binaryData);
if (retcode)
processReturn();
return retcode;
}
// static version of this method
short CmpSeabaseDDL::getTextFromMD(const char * catalogName,
ExeCliInterface * cliInterface,
Int64 textUID,
ComTextType textType,
Lng32 textSubID,
NAString &outText,
NABoolean binaryData)
{
Lng32 cliRC;
char query[1000];
str_sprintf(query, "select octet_length(text), text from %s.\"%s\".%s where text_uid = %ld and text_type = %d and sub_id = %d for read committed access order by seq_num",
catalogName, SEABASE_MD_SCHEMA, SEABASE_TEXT,
textUID, static_cast<int>(textType), textSubID);
Queue * textQueue = NULL;
cliRC = cliInterface->fetchAllRows(textQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// glue text together
NAString binaryText;
for (Lng32 idx = 0; idx < textQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)textQueue->getNext();
Lng32 len = *(Lng32*)vi->get(0);
char * text = (char*)vi->get(1);
if (binaryData)
binaryText.append(text, len);
else
outText.append(text, len);
}
// if binary data, decode it and then return
if (binaryData)
{
Lng32 decodedMaxLen = str_decoded_len(binaryText.length());
char * decodedData = new(STMTHEAP) char[decodedMaxLen];
Lng32 decodedLen =
str_decode(decodedData, decodedMaxLen,
binaryText.data(), binaryText.length());
if (decodedLen < 0)
return -1;
outText.append(decodedData, decodedLen);
}
return 0;
}
void CmpSeabaseDDL::alterSeabaseTableAddCheckConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
StmtDDLAddConstraintCheck *alterAddCheckNode = alterAddConstraint
->castToStmtDDLAddConstraintCheck();
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = alterAddConstraint->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterAddConstraint->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
const ParCheckConstraintColUsageList &colList =
alterAddCheckNode->getColumnUsageList();
for (CollIndex cols = 0; cols < colList.entries(); cols++)
{
const ParCheckConstraintColUsage &ckColUsg = colList[cols];
const ComString &colName = ckColUsg.getColumnName();
if ((colName EQU "SYSKEY") &&
(naTable->getClusteringIndex()->hasSyskey()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SYSKEY_COL_NOT_ALLOWED_IN_CK_CNSTRNT)
<< DgColumnName( "SYSKEY")
<< DgTableName(extTableName);
processReturn();
return;
}
}
NAList<NAString> keyColList(STMTHEAP);
if (constraintErrorChecks(&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintCheck(),
naTable,
COM_CHECK_CONSTRAINT,
keyColList))
{
return;
}
// update check constraint info
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
// get check text
NAString checkConstrText;
if (getCheckConstraintText(alterAddCheckNode, checkConstrText))
{
return;
}
if (CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_OFF)
{
// validate data for check constraint.
// generate a "select" statement to validate the constraint. For example:
// SELECT count(*) FROM T1 where not checkConstrText;
// This statement returns > 0 if there exist data violating the constraint.
char * validQry = new(STMTHEAP) char[checkConstrText.length() + 2000];
str_sprintf(validQry, "select count(*) from \"%s\".\"%s\".\"%s\" where not %s",
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
checkConstrText.data());
Lng32 len = 0;
Int64 rowCount = 0;
cliRC = cliInterface.executeImmediate(validQry, (char*)&rowCount, &len, FALSE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
if (rowCount > 0)
{
*CmpCommon::diags() << DgSqlCode(-1083)
<< DgConstraintName(uniqueStr);
return;
}
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComUID comUID;
comUID.make_UID();
Int64 checkUID = comUID.get_value();
NAList<NAString> emptyList(STMTHEAP);
if (updateConstraintMD(keyColList, emptyList, uniqueStr, tableUID, checkUID,
naTable, COM_CHECK_CONSTRAINT, TRUE, &cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateTextTable(&cliInterface, checkUID, COM_CHECK_CONSTR_TEXT, 0,
checkConstrText))
{
processReturn();
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, tableUID))
{
processReturn();
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
alterAddConstraint->ddlXns(), FALSE);
}
return;
}
void CmpSeabaseDDL::alterSeabaseTableDropConstraint(
StmtDDLDropConstraint * alterDropConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString tabName = alterDropConstraint->getTableName();
NAString catalogNamePart, schemaNamePart, objectNamePart;
NAString extTableName, extNameForHbase;
NATable * naTable = NULL;
CorrName cn;
retcode =
setupAndErrorChecks(tabName,
alterDropConstraint->getOrigTableNameAsQualifiedName(),
currCatName, currSchName,
catalogNamePart, schemaNamePart, objectNamePart,
extTableName, extNameForHbase, cn,
&naTable,
FALSE, FALSE,
&cliInterface);
if (retcode < 0)
{
processReturn();
return;
}
const NAString &dropConstrName = alterDropConstraint->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
const NAString &constrCatName = alterDropConstraint->
getConstraintNameAsQualifiedName().getCatalogName();
const NAString &constrSchName = alterDropConstraint->
getConstraintNameAsQualifiedName().getSchemaName();
const NAString &constrObjName = alterDropConstraint->
getConstraintNameAsQualifiedName().getObjectName();
char outObjType[10];
Int64 constrUID = getObjectUID(&cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
NULL,
"object_type = '" COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT"' or object_type = '" COM_UNIQUE_CONSTRAINT_OBJECT_LIT"' or object_type = '" COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT"' or object_type = '" COM_CHECK_CONSTRAINT_OBJECT_LIT"' ",
outObjType);
if (constrUID < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-1005)
<< DgConstraintName(dropConstrName);
processReturn();
return;
}
NABoolean isUniqConstr =
((strcmp(outObjType, COM_UNIQUE_CONSTRAINT_OBJECT_LIT) == 0) ||
(strcmp(outObjType, COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT) == 0));
NABoolean isRefConstr =
(strcmp(outObjType, COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT) == 0);
NABoolean isPkeyConstr =
(strcmp(outObjType, COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT) == 0);
NABoolean isCheckConstr =
(strcmp(outObjType, COM_CHECK_CONSTRAINT_OBJECT_LIT) == 0);
NABoolean constrFound = FALSE;
if (isUniqConstr)
{
constrFound = FALSE;
const AbstractRIConstraintList &ariList = naTable->getUniqueConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = ariList[i];
UniqueConstraint * uniqueConstr = (UniqueConstraint*)ariList[i];
const NAString &tableConstrName =
uniqueConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (dropConstrName == tableConstrName)
{
constrFound = TRUE;
if (uniqueConstr->hasRefConstraintsReferencingMe())
{
*CmpCommon::diags()
<< DgSqlCode(-1050);
processReturn();
return;
}
}
} // for
if (NOT constrFound)
{
*CmpCommon::diags() << DgSqlCode(-1052);
processReturn();
return;
}
if (isPkeyConstr)
{
*CmpCommon::diags() << DgSqlCode(-1255)
<< DgString0(dropConstrName)
<< DgString1(extTableName);
processReturn();
return;
}
}
NATable *otherNaTable = NULL;
Int64 otherConstrUID = 0;
if (isRefConstr)
{
constrFound = FALSE;
RefConstraint * refConstr = NULL;
const AbstractRIConstraintList &ariList = naTable->getRefConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = ariList[i];
const NAString &tableConstrName =
ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (dropConstrName == tableConstrName)
{
constrFound = TRUE;
refConstr = (RefConstraint*)ariConstr;
}
} // for
if (NOT constrFound)
{
*CmpCommon::diags() << DgSqlCode(-1052);
processReturn();
return;
}
CorrName otherCN(refConstr->getUniqueConstraintReferencedByMe().getTableName());
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
otherNaTable = bindWA.getNATable(otherCN);
if (otherNaTable == NULL || bindWA.errStatus())
{
processReturn();
return;
}
AbstractRIConstraint * otherConstr =
refConstr->findConstraint(&bindWA, refConstr->getUniqueConstraintReferencedByMe());
const NAString& otherCatName =
otherConstr->getConstraintName().getCatalogName();
const NAString& otherSchName =
otherConstr->getConstraintName().getSchemaName();
const NAString& otherConstrName =
otherConstr->getConstraintName().getObjectName();
otherConstrUID = getObjectUID(&cliInterface,
otherCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_UNIQUE_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
CmpCommon::diags()->clear();
otherConstrUID = getObjectUID(&cliInterface,
otherCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
processReturn();
return;
}
}
}
NABoolean indexFound = FALSE;
Lng32 isExplicit = 0;
Lng32 keytag = 0;
if ((isUniqConstr || isRefConstr) && (NOT isPkeyConstr))
{
// find the index that corresponds to this constraint
char query[1000];
// the cardinality hint should force a nested join with
// TABLE_CONSTRAINTS as the outer and INDEXES as the inner
str_sprintf(query, "select I.keytag, I.is_explicit from %s.\"%s\".%s T, %s.\"%s\".%s I /*+ cardinality 1e9 */ where T.table_uid = %ld and T.constraint_uid = %ld and T.table_uid = I.base_table_uid and T.index_uid = I.index_uid ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_INDEXES,
naTable->objectUid().castToInt64(),
constrUID);
Queue * indexQueue = NULL;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
cliRC = cliInterface.fetchAllRows(indexQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
if (indexQueue->numEntries() > 1)
{
*CmpCommon::diags()
<< DgSqlCode(-1005)
<< DgConstraintName(dropConstrName);
processReturn();
return;
}
if (indexQueue->numEntries() ==1)
{
indexFound = TRUE;
indexQueue->position();
OutputInfo * oi = (OutputInfo*)indexQueue->getCurr();
keytag = *(Lng32*)oi->get(0);
isExplicit = *(Lng32*)oi->get(1);
}
}
if (deleteConstraintInfoFromSeabaseMDTables(&cliInterface,
naTable->objectUid().castToInt64(),
(otherNaTable ? otherNaTable->objectUid().castToInt64() : 0),
constrUID,
otherConstrUID,
constrCatName,
constrSchName,
constrObjName,
(isPkeyConstr ? COM_PRIMARY_KEY_CONSTRAINT_OBJECT :
(isUniqConstr ? COM_UNIQUE_CONSTRAINT_OBJECT :
(isRefConstr ? COM_REFERENTIAL_CONSTRAINT_OBJECT :
COM_CHECK_CONSTRAINT_OBJECT)))))
{
processReturn();
return;
}
// if the index corresponding to this constraint is an implicit index and 'no check'
// option is not specified, drop it.
if (((indexFound) && (NOT isExplicit) && (keytag != 0)) &&
(alterDropConstraint->getDropBehavior() != COM_NO_CHECK_DROP_BEHAVIOR))
{
char buf[4000];
str_sprintf(buf, "drop index \"%s\".\"%s\".\"%s\" no check",
constrCatName.data(), constrSchName.data(), constrObjName.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
}
Int64 tableUID = naTable->objectUid().castToInt64();
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, -1, tableUID))
{
processReturn();
return;
}
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterDropConstraint->ddlXns(), FALSE);
if (isRefConstr && otherNaTable)
{
CorrName otherCn(
otherNaTable->getExtendedQualName().getQualifiedNameObj(), STMTHEAP);
if (updateObjectRedefTime
(&cliInterface,
otherCn.getQualifiedNameObj().getCatalogName(),
otherCn.getQualifiedNameObj().getSchemaName(),
otherCn.getQualifiedNameObj().getObjectName(),
COM_BASE_TABLE_OBJECT_LIT, -1,
otherNaTable->objectUid().castToInt64()))
{
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->removeNATable
(otherCn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
alterDropConstraint->ddlXns(), FALSE);
}
return;
}
void CmpSeabaseDDL::seabaseGrantRevoke(
StmtDDLNode * stmtDDLNode,
NABoolean isGrant,
NAString &currCatName,
NAString &currSchName,
NABoolean internalCall)
{
Lng32 retcode = 0;
if (!isAuthorizationEnabled())
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHORIZATION_NOT_ENABLED);
return;
}
StmtDDLGrant * grantNode = NULL;
StmtDDLRevoke * revokeNode = NULL;
NAString tabName;
NAString origTabName;
ComAnsiNameSpace nameSpace;
NAString grantedByName;
NABoolean isGrantedBySpecified = FALSE;
if (isGrant)
{
grantNode = stmtDDLNode->castToStmtDDLGrant();
tabName = grantNode->getTableName();
origTabName = grantNode->getOrigObjectName();
nameSpace = grantNode->getGrantNameAsQualifiedName().getObjectNameSpace();
isGrantedBySpecified = grantNode->isByGrantorOptionSpecified();
grantedByName =
isGrantedBySpecified ? grantNode->getByGrantor()->getAuthorizationIdentifier(): "";
}
else
{
revokeNode = stmtDDLNode->castToStmtDDLRevoke();
tabName = revokeNode->getTableName();
origTabName = revokeNode->getOrigObjectName();
nameSpace = revokeNode->getRevokeNameAsQualifiedName().getObjectNameSpace();
isGrantedBySpecified = revokeNode->isByGrantorOptionSpecified();
grantedByName =
isGrantedBySpecified ? revokeNode->getByGrantor()->getAuthorizationIdentifier(): "";
}
ComObjectName origTableName(origTabName, COM_TABLE_NAME);
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(origTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
origTableName.getSchemaNamePart().getInternalName(),
origTableName.getCatalogNamePart().getInternalName());
// set up common information for all grantees
ComObjectType objectType = COM_BASE_TABLE_OBJECT;
switch (nameSpace)
{
case COM_LIBRARY_NAME:
objectType = COM_LIBRARY_OBJECT;
break;
case COM_UDF_NAME:
case COM_UDR_NAME:
objectType = COM_USER_DEFINED_ROUTINE_OBJECT;
break;
case COM_SEQUENCE_GENERATOR_NAME:
objectType = COM_SEQUENCE_GENERATOR_OBJECT;
break;
default:
objectType = COM_BASE_TABLE_OBJECT;
}
// get the objectUID and objectOwner
Int64 objectUID = 0;
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
Int64 objectFlags = 0 ;
NATable *naTable = NULL;
ComObjectName tableName(tabName, COM_TABLE_NAME);
if (objectType == COM_BASE_TABLE_OBJECT)
{
naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
objectUID = (int64_t)naTable->objectUid().get_value();
objectOwnerID = (int32_t)naTable->getOwner();
schemaOwnerID = naTable->getSchemaOwner();
objectType = naTable->getObjectType();
if (naTable->isView())
objectType = COM_VIEW_OBJECT;
NAString tns = naTable->getTableName().getQualifiedNameAsAnsiString();
tableName = ComObjectName(tns);
}
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
ElemDDLGranteeArray & pGranteeArray =
(isGrant ? grantNode->getGranteeArray() : revokeNode->getGranteeArray());
ElemDDLPrivActArray & privActsArray =
(isGrant ? grantNode->getPrivilegeActionArray() :
revokeNode->getPrivilegeActionArray());
NABoolean allPrivs =
(isGrant ? grantNode->isAllPrivilegesSpecified() :
revokeNode->isAllPrivilegesSpecified());
NABoolean isWGOSpecified =
(isGrant ? grantNode->isWithGrantOptionSpecified() :
revokeNode->isGrantOptionForSpecified());
std::vector<PrivType> objectPrivs;
std::vector<ColPrivSpec> colPrivs;
if (allPrivs)
objectPrivs.push_back(ALL_PRIVS);
else
if (!checkSpecifiedPrivs(privActsArray,extTableName.data(),objectType,
naTable,objectPrivs,colPrivs))
{
processReturn();
return;
}
// If column privs specified for non SELECT ops for Hive/HBase native tables,
// return an error
if (naTable &&
(naTable->getTableName().isHive() || naTable->getTableName().isHbase()) &&
(colPrivs.size() > 0))
{
if (hasValue(colPrivs, INSERT_PRIV) ||
hasValue(colPrivs, UPDATE_PRIV) ||
hasValue(colPrivs, REFERENCES_PRIV))
{
NAString text1("INSERT, UPDATE, REFERENCES");
NAString text2("Hive columns on");
*CmpCommon::diags() << DgSqlCode(-CAT_INVALID_PRIV_FOR_OBJECT)
<< DgString0(text1.data())
<< DgString1(text2.data())
<< DgTableName(extTableName);
processReturn();
return;
}
}
// Prepare to call privilege manager
NAString MDLoc;
CONCAT_CATSCH(MDLoc, getSystemCatalog(), SEABASE_MD_SCHEMA);
NAString privMgrMDLoc;
CONCAT_CATSCH(privMgrMDLoc, getSystemCatalog(), SEABASE_PRIVMGR_SCHEMA);
PrivMgrCommands command(std::string(MDLoc.data()),
std::string(privMgrMDLoc.data()),
CmpCommon::diags());
// If the object is a metadata table or a privilege manager table, don't
// allow the privilege to be grantable.
NABoolean isMDTable = (isSeabaseMD(tableName) ||
isSeabasePrivMgrMD(tableName));
if (isMDTable && isWGOSpecified)
{
*CmpCommon::diags() << DgSqlCode(-CAT_WGO_NOT_ALLOWED);
processReturn();
return;
}
// Grants/revokes of the select privilege on metadata tables are allowed
// Grants/revokes of other relevant privileges are allowed if parser flag
// INTERNAL_QUERY_FROM_EXEUTIL is set
// Revoke: allow ALL and ALL_DML to be specified
if (isMDTable && !Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) &&
!isMDGrantRevokeOK(objectPrivs,colPrivs,isGrant))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SMD_PRIVS_CANNOT_BE_CHANGED);
processReturn();
return;
}
// Hive tables must be registered in traf metadata
if (objectUID == 0 &&
naTable && naTable->isHiveTable())
{
// Register this hive table in traf metadata
// Privilege checks performed by register code
char query[(ComMAX_ANSI_IDENTIFIER_EXTERNAL_LEN*4) + 100];
snprintf(query, sizeof(query),
"register internal hive %s if not exists %s.\"%s\".\"%s\"",
(naTable->isView() ? "view" : "table"),
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data());
Lng32 retcode = cliInterface.executeImmediate(query);
if (retcode < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
// reload NATable to get registered objectUID
naTable = bindWA.getNATable(cn);
if (naTable == NULL)
{
SEABASEDDL_INTERNAL_ERROR("Bad NATable pointer in seabaseGrantRevoke");
return;
}
objectUID = (int64_t)naTable->objectUid().get_value();
objectOwnerID = (int32_t)naTable->getOwner();
schemaOwnerID = naTable->getSchemaOwner();
objectType = naTable->getObjectType();
if (naTable->isView())
objectType = COM_VIEW_OBJECT;
}
// HBase tables must be registered in traf metadata
if (objectUID == 0 &&
naTable &&
((naTable->isHbaseCellTable()) || (naTable->isHbaseRowTable())))
{
// For native hbase tables, grantor must be DB__ROOT or belong
// to one of the admin roles: DB__ROOTROLE, DB__HIVEROLE
// In hive, you must be an admin, DB__ROOTROLE and DB__HIVEROLE
// is the equivalent of an admin.
if (!ComUser::isRootUserID() &&
!ComUser::currentUserHasRole(ROOT_ROLE_ID) &&
!ComUser::currentUserHasRole(HBASE_ROLE_ID))
{
*CmpCommon::diags() << DgSqlCode (-CAT_NOT_AUTHORIZED);
processReturn();
return;
}
// register this hive table in traf metadata
char query[(ComMAX_ANSI_IDENTIFIER_EXTERNAL_LEN*4) + 100];
snprintf(query, sizeof(query),
"register internal hbase table if not exists %s.\"%s\".\"%s\"",
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data());
Lng32 retcode = cliInterface.executeImmediate(query);
if (retcode < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
// reload NATable to get registered objectUID
naTable = bindWA.getNATable(cn);
if (naTable == NULL)
{
SEABASEDDL_INTERNAL_ERROR("Bad NATable pointer in seabaseGrantRevoke");
return;
}
objectUID = (int64_t)naTable->objectUid().get_value();
objectOwnerID = (int32_t)naTable->getOwner();
schemaOwnerID = naTable->getSchemaOwner();
objectType = naTable->getObjectType();
}
// for metadata tables, the objectUID is not initialized in the NATable
// structure
if (objectUID == 0)
{
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
objectUID = getObjectInfo(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(), objectType,
objectOwnerID,schemaOwnerID,objectFlags);
if (objectUID == -1 || objectOwnerID == 0)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting object UID and object owner for grant/revoke request");
processReturn();
return;
}
}
// Determine effective grantor ID and grantor name based on GRANTED BY clause
// current user, and object owner
//
// NOTE: If the user can grant privilege based on a role, we may want the
// effective grantor to be the role instead of the current user.
Int32 effectiveGrantorID;
std::string effectiveGrantorName;
PrivStatus result = command.getGrantorDetailsForObject(
isGrantedBySpecified,
std::string(grantedByName.data()),
objectOwnerID,
effectiveGrantorID,
effectiveGrantorName);
if (result != STATUS_GOOD)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting grantor ID and grantor name");
processReturn();
return;
}
std::string objectName (extTableName.data());
// Map hbase map table to external name
if (ComIsHBaseMappedIntFormat(catalogNamePart, schemaNamePart))
{
NAString newCatName;
NAString newSchName;
ComConvertHBaseMappedIntToExt(catalogNamePart, schemaNamePart,
newCatName, newSchName);
objectName = newCatName.data() + std::string(".\"");
objectName += newSchName.data() + std::string("\".");
objectName += tableName.getObjectNamePart().getExternalName();
}
else
objectName = extTableName.data();
// For now, only support one grantee per request
// TBD: support multiple grantees - a testing effort?
if (pGranteeArray.entries() > 1)
{
*CmpCommon::diags() << DgSqlCode (-CAT_ONLY_ONE_GRANTEE_ALLOWED);
processReturn();
return;
}
for (CollIndex j = 0; j < pGranteeArray.entries(); j++)
{
NAString authName(pGranteeArray[j]->getAuthorizationIdentifier());
Int32 grantee;
if (pGranteeArray[j]->isPublic())
{
// don't allow WGO for public auth ID
if (isWGOSpecified)
{
*CmpCommon::diags() << DgSqlCode(-CAT_WGO_NOT_ALLOWED);
processReturn();
return;
}
grantee = PUBLIC_USER;
authName = PUBLIC_AUTH_NAME;
}
else
{
Int16 retcode = ComUser::getAuthIDFromAuthName(authName.data(), grantee);
if (retcode == FENOTFOUND)
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR)
<< DgString0(authName.data());
processReturn();
return;
}
else if (retcode != FEOK)
{
*CmpCommon::diags() << DgSqlCode (-CAT_INTERNAL_EXCEPTION_ERROR)
<< DgString0(__FILE__)
<< DgInt0(__LINE__)
<< DgString1("verifying grantee");
processReturn();
return;
}
// Don't allow WGO for roles
if (CmpSeabaseDDLauth::isRoleID(grantee) && isWGOSpecified &&
CmpCommon::getDefault(ALLOW_WGO_FOR_ROLES) == DF_OFF)
{
// If grantee is system role, allow grant
Int32 numberRoles = sizeof(systemRoles)/sizeof(SystemAuthsStruct);
NABoolean isSystemRole = FALSE;
for (Int32 i = 0; i < numberRoles; i++)
{
const SystemAuthsStruct &roleDefinition = systemRoles[i];
NAString systemRole = roleDefinition.authName;
if (systemRole == authName)
{
isSystemRole = TRUE;
break;
}
}
if (!isSystemRole)
{
*CmpCommon::diags() << DgSqlCode(-CAT_WGO_NOT_ALLOWED);
processReturn();
return;
}
}
}
std::string granteeName (authName.data());
if (isGrant)
{
PrivStatus result = command.grantObjectPrivilege(objectUID,
objectName,
objectType,
grantee,
granteeName,
effectiveGrantorID,
effectiveGrantorName,
objectPrivs,
colPrivs,
allPrivs,
isWGOSpecified);
}
else
{
PrivStatus result = command.revokeObjectPrivilege(objectUID,
objectName,
objectType,
grantee,
granteeName,
effectiveGrantorID,
effectiveGrantorName,
objectPrivs,
colPrivs,
allPrivs,
isWGOSpecified);
}
}
if (result == STATUS_ERROR)
return;
if (isHbase(tableName))
hbaseGrantRevoke(stmtDDLNode, isGrant, currCatName, currSchName);
// Adjust the stored descriptor
char objectTypeLit[3] = {0};
strncpy(objectTypeLit,PrivMgr::ObjectEnumToLit(objectType),2);
updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
objectTypeLit, -1, objectUID);
return;
}
void CmpSeabaseDDL::hbaseGrantRevoke(
StmtDDLNode * stmtDDLNode,
NABoolean isGrant,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
StmtDDLGrant * grantNode = NULL;
StmtDDLRevoke * revokeNode = NULL;
NAString tabName;
if (isGrant)
{
grantNode = stmtDDLNode->castToStmtDDLGrant();
tabName = grantNode->getTableName();
}
else
{
revokeNode = stmtDDLNode->castToStmtDDLRevoke();
tabName = revokeNode->getTableName();
}
ComObjectName tableName(tabName);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (isSeabaseReservedSchema(tableName))
{
*CmpCommon::diags() << DgSqlCode(-1118)
<< DgTableName(extTableName);
processReturn();
return;
}
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
ElemDDLGranteeArray & pGranteeArray =
(isGrant ? grantNode->getGranteeArray() : revokeNode->getGranteeArray());
ElemDDLPrivActArray & pPrivActsArray =
(isGrant ? grantNode->getPrivilegeActionArray() :
revokeNode->getPrivilegeActionArray());
NABoolean allPrivs =
(isGrant ? grantNode->isAllPrivilegesSpecified() :
revokeNode->isAllPrivilegesSpecified());
TextVec userPermissions;
if (allPrivs)
{
userPermissions.push_back("READ");
userPermissions.push_back("WRITE");
userPermissions.push_back("CREATE");
}
else
{
for (Lng32 i = 0; i < pPrivActsArray.entries(); i++)
{
switch (pPrivActsArray[i]->getOperatorType() )
{
case ELM_PRIV_ACT_SELECT_ELEM:
{
userPermissions.push_back("READ");
break;
}
case ELM_PRIV_ACT_INSERT_ELEM:
case ELM_PRIV_ACT_DELETE_ELEM:
case ELM_PRIV_ACT_UPDATE_ELEM:
{
userPermissions.push_back("WRITE");
break;
}
case ELM_PRIV_ACT_CREATE_ELEM:
{
userPermissions.push_back("CREATE");
break;
}
default:
{
NAString privType = "UNKNOWN";
*CmpCommon::diags() << DgSqlCode(-CAT_INVALID_PRIV_FOR_OBJECT)
<< DgString0(privType)
<< DgString1(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
} // end switch
} // for
}
for (CollIndex j = 0; j < pGranteeArray.entries(); j++)
{
NAString authName(pGranteeArray[j]->getAuthorizationIdentifier());
if (isGrant)
retcode = ehi->grant(authName.data(), extNameForHbase.data(), userPermissions);
else
retcode = ehi->revoke(authName.data(), extNameForHbase.data(), userPermissions);
if (retcode < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< (isGrant ? DgString0((char*)"ExpHbaseInterface::grant()") :
DgString0((char*)"ExpHbaseInterface::revoke()"))
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr());
deallocEHI(ehi);
processReturn();
return;
}
}
retcode = ehi->close();
if (retcode < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::close()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr());
deallocEHI(ehi);
processReturn();
return;
}
deallocEHI(ehi);
processReturn();
return;
}
void CmpSeabaseDDL::createNativeHbaseTable(
ExeCliInterface *cliInterface,
StmtDDLCreateHbaseTable * createTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
// Verify that user has privilege to create HBase tables - must be DB__ROOT
// or granted the DB__HBASEROLE
if (isAuthorizationEnabled() &&
!ComUser::isRootUserID() &&
!ComUser::currentUserHasRole(ROOT_ROLE_ID) &&
!ComUser::currentUserHasRole(HBASE_ROLE_ID))
{
*CmpCommon::diags() << DgSqlCode (-CAT_NOT_AUTHORIZED);
processReturn();
return;
}
ComObjectName tableName(createTableNode->getTableName());
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
// If table already exists, return
retcode = existsInHbase(objectNamePart, ehi);
if (retcode)
{
*CmpCommon::diags() << DgSqlCode(CAT_TABLE_ALREADY_EXISTS)
<< DgTableName(objectNamePart.data());
deallocEHI(ehi);
processReturn();
return;
}
std::vector<NAString> colFamVec;
for (Lng32 i = 0; i < createTableNode->csl()->entries(); i++)
{
const NAString * nas = (NAString*)(*createTableNode->csl())[i];
colFamVec.push_back(nas->data());
}
NAList<HbaseCreateOption*> hbaseCreateOptions(STMTHEAP);
NAString hco;
retcode = setupHbaseOptions(createTableNode->getHbaseOptionsClause(),
0, objectNamePart,
hbaseCreateOptions, hco);
if (retcode)
{
deallocEHI(ehi);
processReturn();
return;
}
HbaseStr hbaseTable;
hbaseTable.val = (char*)objectNamePart.data();
hbaseTable.len = objectNamePart.length();
if (createHbaseTable(ehi, &hbaseTable, colFamVec,
&hbaseCreateOptions) == -1)
{
deallocEHI(ehi);
processReturn();
return;
}
// Register the table
char query[(ComMAX_ANSI_IDENTIFIER_EXTERNAL_LEN) + 100];
snprintf(query, sizeof(query),
"register internal hbase table if not exists \"%s\"",
objectNamePart.data());
cliRC = cliInterface->executeImmediate(query);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
}
void CmpSeabaseDDL::dropNativeHbaseTable(
ExeCliInterface *cliInterface,
StmtDDLDropHbaseTable * dropTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
// Verify that user has privilege to drop HBase tables - must be DB__ROOT
// or granted the DB__HBASEROLE
if (isAuthorizationEnabled() &&
!ComUser::isRootUserID() &&
!ComUser::currentUserHasRole(ROOT_ROLE_ID) &&
!ComUser::currentUserHasRole(HBASE_ROLE_ID))
{
*CmpCommon::diags() << DgSqlCode (-CAT_NOT_AUTHORIZED);
processReturn();
return;
}
ComObjectName tableName(dropTableNode->getTableName());
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
// If table does not exist, return
retcode = existsInHbase(objectNamePart, ehi);
if (retcode == 0)
{
*CmpCommon::diags() << DgSqlCode(CAT_TABLE_DOES_NOT_EXIST_ERROR)
<< DgTableName(objectNamePart.data());
deallocEHI(ehi);
processReturn();
return;
}
// Load definitions into cache
BindWA bindWA(ActiveSchemaDB(),CmpCommon::context(),FALSE/*inDDL*/);
CorrName cnCell(objectNamePart,STMTHEAP, HBASE_CELL_SCHEMA, HBASE_SYSTEM_CATALOG);
NATable *naCellTable = bindWA.getNATableInternal(cnCell);
CorrName cnRow(objectNamePart,STMTHEAP, HBASE_ROW_SCHEMA, HBASE_SYSTEM_CATALOG);
NATable *naRowTable = bindWA.getNATableInternal(cnRow);
// unregister tables
char query[(ComMAX_ANSI_IDENTIFIER_EXTERNAL_LEN*4) + 100];
snprintf(query, sizeof(query),
"unregister hbase table %s", tableName.getObjectNamePart().getExternalName().data());
cliRC = cliInterface->executeImmediate(query);
if (cliRC < 0 && cliRC != -CAT_REG_UNREG_OBJECTS && cliRC != -3251)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return;
}
// Drop external mapping table
//ComObjectName externalName(objectNamePart);
snprintf(query, sizeof(query),
"drop external table if exists %s ",
tableName.getObjectNamePart().getExternalName().data());
cliRC = cliInterface->executeImmediate(query);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return;
}
// Remove cell and row tables from cache.
if (naCellTable)
{
ActiveSchemaDB()->getNATableDB()->removeNATable
(cnCell,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(), FALSE);
}
if (naRowTable)
{
ActiveSchemaDB()->getNATableDB()->removeNATable
(cnRow,
ComQiScope::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT,
dropTableNode->ddlXns(), FALSE);
}
// Remove table from HBase
HbaseStr hbaseTable;
hbaseTable.val = (char*)objectNamePart.data();
hbaseTable.len = objectNamePart.length();
retcode = dropHbaseTable(ehi, &hbaseTable, FALSE, FALSE);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
}
short CmpSeabaseDDL::registerNativeTable
(
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
Int32 objOwnerId,
Int32 schemaOwnerId,
ExeCliInterface &cliInterface,
NABoolean isRegister,
NABoolean isInternal
)
{
Lng32 retcode = 0;
ComObjectType objType = COM_BASE_TABLE_OBJECT;
Int64 flags = 0;
if (isRegister && isInternal)
flags = MD_OBJECTS_INTERNAL_REGISTER;
Int64 objUID = -1;
retcode =
updateSeabaseMDObjectsTable
(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
objType,
NULL,
objOwnerId, schemaOwnerId,
flags, objUID);
if (retcode < 0)
return -1;
// Grant owner privileges
if (isAuthorizationEnabled())
{
NAString fullName (catalogNamePart);
fullName += ".";
fullName += schemaNamePart;
fullName += ".";
fullName += objectNamePart;
if (!insertPrivMgrInfo(objUID,
fullName,
objType,
objOwnerId,
schemaOwnerId,
ComUser::getCurrentUser()))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_GRANT_PRIVILEGES)
<< DgTableName(objectNamePart);
return -1;
}
}
return 0;
}
short CmpSeabaseDDL::unregisterNativeTable
(
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
ExeCliInterface &cliInterface,
ComObjectType objType
)
{
short retcode = 0;
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
comObjectTypeLit(objType));
// Revoke owner privileges
if (isAuthorizationEnabled())
{
NAString fullName (catalogNamePart);
fullName += ".";
fullName += schemaNamePart;
fullName += ".";
fullName += objectNamePart;
if (!deletePrivMgrInfo(fullName,
objUID,
objType))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_PRIVILEGE_NOT_REVOKED)
<< DgTableName(objectNamePart);
return -1;
}
}
// delete hist stats, if HIST tables exist
retcode = existsInSeabaseMDTable
(&cliInterface,
HIVE_STATS_CATALOG, HIVE_STATS_SCHEMA_NO_QUOTES, HBASE_HIST_NAME,
COM_BASE_TABLE_OBJECT);
if (retcode < 0)
return -1;
if (retcode == 1) // exists
{
if (dropSeabaseStats(&cliInterface,
HIVE_STATS_CATALOG,
HIVE_STATS_SCHEMA_NO_QUOTES,
objUID))
{
return -1;
}
}
// drop from metadata
retcode =
deleteFromSeabaseMDObjectsTable
(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
objType
);
return 0;
}
short CmpSeabaseDDL::registerHiveView
(
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
Int32 objOwnerId,
Int32 schemaOwnerId,
NATable *naTable,
ExeCliInterface &cliInterface,
NABoolean isInternal,
NABoolean cascade
)
{
Lng32 retcode = 0;
Int64 flags = 0;
if (isInternal)
flags = MD_OBJECTS_INTERNAL_REGISTER;
ComObjectType objType = COM_VIEW_OBJECT;
Int64 objUID = -1;
retcode =
updateSeabaseMDObjectsTable
(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
objType,
NULL,
objOwnerId, schemaOwnerId,
flags, objUID);
if (retcode < 0)
return -1;
// Grant owner privileges
if (isAuthorizationEnabled())
{
NAString fullName (catalogNamePart);
fullName += ".";
fullName += schemaNamePart;
fullName += ".";
fullName += objectNamePart;
if (!insertPrivMgrInfo(objUID,
fullName,
objType,
objOwnerId,
schemaOwnerId,
ComUser::getCurrentUser()))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_GRANT_PRIVILEGES)
<< DgTableName(objectNamePart);
return -1;
}
}
// if cascade option is specified, find out all objects that are part
// of this view. Register them in traf metadata and update view usage
// metadata table.
if (cascade)
{
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
// temporarily change the default schema to
// that of viewName.
// This will make sure that all unqualified objects in view
// text are expanded in this schema.
SchemaName s(schemaNamePart, catalogNamePart);
bindWA.setDefaultSchema(s);
Parser parser(bindWA.currentCmpContext());
ExprNode *viewTree = parser.parseDML(naTable->getViewText(),
naTable->getViewLen(),
naTable->getViewTextCharSet());
if (! viewTree)
{
return -1;
}
RelExpr *queryTree =
viewTree->castToStatementExpr()->getQueryExpression();
StmtDDLCreateView *createViewNode =
((DDLExpr *)(queryTree->getChild(0)))->
getDDLNode()->castToStmtDDLNode()->castToStmtDDLCreateView();
ExprNode * boundTree = createViewNode->bindNode(&bindWA);
if ((! boundTree) || (bindWA.errStatus()) ||
(CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)))
{
return -1;
}
if (updateViewUsage(createViewNode, objUID, &cliInterface))
{
return -1;
}
}
return 0;
}
short CmpSeabaseDDL::unregisterHiveView
(
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
NATable *naTable,
ExeCliInterface &cliInterface,
NABoolean cascade
)
{
Lng32 retcode = 0;
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
comObjectTypeLit(COM_VIEW_OBJECT));
// Revoke owner privileges
if (isAuthorizationEnabled())
{
NAString fullName (catalogNamePart);
fullName += ".";
fullName += schemaNamePart;
fullName += ".";
fullName += objectNamePart;
if (!deletePrivMgrInfo(fullName,
objUID,
COM_VIEW_OBJECT))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_PRIVILEGE_NOT_REVOKED)
<< DgTableName(objectNamePart);
return -1;
}
}
// delete hist stats, if HIST tables exist
retcode = existsInSeabaseMDTable
(&cliInterface,
HIVE_STATS_CATALOG, HIVE_STATS_SCHEMA_NO_QUOTES, HBASE_HIST_NAME,
COM_BASE_TABLE_OBJECT);
if (retcode < 0)
return -1;
if (retcode == 1) // exists
{
if (dropSeabaseStats(&cliInterface,
HIVE_STATS_CATALOG,
HIVE_STATS_SCHEMA_NO_QUOTES,
objUID))
{
return -1;
}
}
// drop from metadata
retcode =
// deleteFromSeabaseMDObjectsTable
deleteFromSeabaseMDTable
(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
COM_VIEW_OBJECT
);
if (retcode < 0)
return -1;
// if cascade option is specified, find out all objects that are part
// of this view. Unregister them from traf metadata and update view usage
// metadata table.
if ((cascade) && (naTable))
{
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
// temporarily change the default schema to
// that of viewName.
// This will make sure that all unqualified objects in view
// text are expanded in this schema.
SchemaName s(schemaNamePart, catalogNamePart);
bindWA.setDefaultSchema(s);
Parser parser(bindWA.currentCmpContext());
ExprNode *viewTree = parser.parseDML(naTable->getViewText(),
naTable->getViewLen(),
naTable->getViewTextCharSet());
if (! viewTree)
{
return -1;
}
RelExpr *queryTree =
viewTree->castToStatementExpr()->getQueryExpression();
StmtDDLCreateView *createViewNode =
((DDLExpr *)(queryTree->getChild(0)))->
getDDLNode()->castToStmtDDLNode()->castToStmtDDLCreateView();
ExprNode * boundTree = createViewNode->bindNode(&bindWA);
if ((! boundTree) || (bindWA.errStatus()) ||
(CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)))
{
return -1;
}
if (unregisterHiveViewUsage(createViewNode, objUID, &cliInterface))
{
return -1;
}
}
return 0;
}
short CmpSeabaseDDL::unregisterHiveViewUsage(StmtDDLCreateView * createViewParseNode,
Int64 viewUID,
ExeCliInterface * cliInterface)
{
const ParViewUsages &vu = createViewParseNode->getViewUsages();
const ParTableUsageList &vtul = vu.getViewTableUsageList();
char query[1000];
for (CollIndex i = 0; i < vtul.entries(); i++)
{
ComObjectName usedObjName(vtul[i].getQualifiedNameObj()
.getQualifiedNameAsAnsiString(),
vtul[i].getAnsiNameSpace());
const NAString catalogNamePart = usedObjName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = usedObjName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = usedObjName.getObjectNamePartAsAnsiString(TRUE);
const NAString extUsedObjName = usedObjName.getExternalName(TRUE);
Int64 usedObjUID = -1;
CorrName cn(objectNamePart,STMTHEAP, schemaNamePart,catalogNamePart);
BindWA bindWA(ActiveSchemaDB(),CmpCommon::context(),FALSE/*inDDL*/);
NATable *naTable = bindWA.getNATableInternal(cn);
if ((naTable == NULL) ||
(NOT naTable->isHiveTable()) ||
(NOT naTable->isRegistered()))
{
SEABASEDDL_INTERNAL_ERROR("NATable pointer in unregisterHiveViewUsage");
return -1;
}
// unregister this hive object from traf metadata, if not already
str_sprintf(query, "unregister hive %s if exists %s.\"%s\".\"%s\" ",
(naTable->isView() ? "view" : "table"),
catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data());
Lng32 cliRC = cliInterface->executeImmediate(query);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// save the current parserflags setting
ULng32 savedCliParserFlags = 0;;
SQL_EXEC_GetParserFlagsForExSqlComp_Internal(savedCliParserFlags_);
SQL_EXEC_SetParserFlagsForExSqlComp_Internal(INTERNAL_QUERY_FROM_EXEUTIL);
usedObjUID = naTable->objectUid().get_value();
str_sprintf(query, "delete from %s.\"%s\".%s where using_view_uid = %ld and used_object_uid = %ld",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_VIEWS_USAGE,
viewUID,
usedObjUID);
cliRC = cliInterface->executeImmediate(query);
SQL_EXEC_AssignParserFlagsForExSqlComp_Internal(savedCliParserFlags);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
} // for
return 0;
}
void CmpSeabaseDDL::regOrUnregNativeObject(
StmtDDLRegOrUnregObject * regOrUnregObject,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
char errReason[400];
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NAString catalogNamePart = regOrUnregObject->getObjNameAsQualifiedName().
getCatalogName();
NAString schemaNamePart = regOrUnregObject->getObjNameAsQualifiedName().
getSchemaName();
NAString objectNamePart = regOrUnregObject->getObjNameAsQualifiedName().
getObjectName();
ComObjectName tableName;
NAString tabName;
NAString extTableName;
NABoolean isHive = (catalogNamePart == HIVE_SYSTEM_CATALOG);
NABoolean isHBase = (catalogNamePart == HBASE_SYSTEM_CATALOG);
if (NOT (isHive || isHBase))
{
*CmpCommon::diags() << DgSqlCode(-3242) <<
DgString0("Register/Unregister statement must specify a hive or hbase object.");
processReturn();
return;
}
// make sure that underlying hive/hbase object exists
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(objectNamePart, STMTHEAP,
((isHBase && (schemaNamePart == HBASE_SYSTEM_SCHEMA))
? HBASE_CELL_SCHEMA : schemaNamePart),
catalogNamePart);
if (isHive && regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT)
cn.setSpecialType(ExtendedQualName::SCHEMA_TABLE);
NATable * naTable = bindWA.getNATable(cn);
if (((naTable == NULL) || (bindWA.errStatus())) &&
((regOrUnregObject->isRegister()) || // register
(NOT regOrUnregObject->cleanup()))) // unreg and not cleanup
{
CmpCommon::diags()->clear();
*CmpCommon::diags() << DgSqlCode(-3251)
<< (regOrUnregObject->isRegister() ? DgString0("REGISTER") :
DgString0("UNREGISTER"))
<< DgString1(NAString(" Reason: Specified object ") +
(regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT ?
schemaNamePart : objectNamePart) +
NAString(" does not exist."));
processReturn();
return;
}
// ignore errors for 'unregister cleanup'
CmpCommon::diags()->clear();
if (naTable)
{
if (regOrUnregObject->isRegister() &&
(naTable->isRegistered())) // already registered
{
if (NOT regOrUnregObject->existsOption())
{
str_sprintf(errReason, " Reason: %s has already been registered.",
(regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT ?
schemaNamePart.data() :
regOrUnregObject->getObjNameAsQualifiedName().
getQualifiedNameAsString().data()));
*CmpCommon::diags() << DgSqlCode(-3251)
<< DgString0("REGISTER")
<< DgString1(errReason);
}
processReturn();
return;
}
else if ((NOT regOrUnregObject->isRegister()) && // unregister
(NOT naTable->isRegistered())) // not registered
{
if (NOT regOrUnregObject->existsOption())
{
str_sprintf(errReason, " Reason: %s has not been registered.",
(regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT ?
schemaNamePart.data() :
regOrUnregObject->getObjNameAsQualifiedName().
getQualifiedNameAsString().data()));
*CmpCommon::diags() << DgSqlCode(-3251)
<< DgString0("UNREGISTER")
<< DgString1(errReason);
}
processReturn();
return;
}
}
// For native hive/hbase tables, grantor must be DB__ROOT or belong
// to one of the admin roles: DB__ROOTROLE, DB__HIVEROLE/DB__HBASEROLE.
// In hive/hbase, you must be an admin, DB__ROOTROLE,DB__HIVEROLE/HBASEROLE
// is the equivalent of an admin.
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) &&
!ComUser::isRootUserID() &&
!ComUser::currentUserHasRole(ROOT_ROLE_ID) &&
((isHive && !ComUser::currentUserHasRole(HIVE_ROLE_ID)) ||
(isHBase && !ComUser::currentUserHasRole(HBASE_ROLE_ID))))
{
*CmpCommon::diags() << DgSqlCode (-CAT_NOT_AUTHORIZED);
processReturn();
return;
}
Int32 objOwnerId = (isHive ? HIVE_ROLE_ID : HBASE_ROLE_ID);
Int32 schemaOwnerId = (isHive ? HIVE_ROLE_ID : HBASE_ROLE_ID);
Int64 objUID = -1;
Int64 flags = 0;
if ((regOrUnregObject->isRegister()) &&
(regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT))
{
retcode =
updateSeabaseMDObjectsTable
(&cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
"__SCHEMA__",
regOrUnregObject->objType(),
NULL,
objOwnerId, schemaOwnerId,
flags, objUID);
}
else if (regOrUnregObject->isRegister())
{
if (((regOrUnregObject->objType() == COM_BASE_TABLE_OBJECT) &&
(naTable && naTable->isView())) ||
((regOrUnregObject->objType() == COM_VIEW_OBJECT) &&
(naTable && (! naTable->isView()))))
{
// underlying object is a view but registered object type specified
// in the register statement is a table, or
// underlying object is a table but registered object type specified
// in the register statement is a view
str_sprintf(errReason, " Reason: Mismatch between specified(%s) and underlying(%s) type for %s.",
(regOrUnregObject->objType() == COM_BASE_TABLE_OBJECT ? "TABLE" : "VIEW"),
(naTable->isView() ? "VIEW" : "TABLE"),
regOrUnregObject->getObjNameAsQualifiedName().
getQualifiedNameAsString().data());
*CmpCommon::diags() << DgSqlCode(-3251)
<< DgString0("REGISTER")
<< DgString1(errReason);
processReturn();
return;
}
if (regOrUnregObject->objType() == COM_BASE_TABLE_OBJECT)
{
if (schemaNamePart == HBASE_SYSTEM_SCHEMA)
{
// register CELL and ROW formats of HBase table
retcode = registerNativeTable(
catalogNamePart, HBASE_CELL_SCHEMA, objectNamePart,
objOwnerId, schemaOwnerId,
cliInterface,
regOrUnregObject->isRegister(),
regOrUnregObject->isInternal());
retcode = registerNativeTable(
catalogNamePart, HBASE_ROW_SCHEMA, objectNamePart,
objOwnerId, schemaOwnerId,
cliInterface,
regOrUnregObject->isRegister(),
regOrUnregObject->isInternal());
}
else
{
retcode = registerNativeTable(
catalogNamePart, schemaNamePart, objectNamePart,
objOwnerId, schemaOwnerId,
cliInterface,
regOrUnregObject->isRegister(),
regOrUnregObject->isInternal());
}
}
else // COM_VIEW_OBJECT
{
retcode = registerHiveView(
catalogNamePart, schemaNamePart, objectNamePart,
objOwnerId, schemaOwnerId,
naTable,
cliInterface,
regOrUnregObject->isInternal(),
regOrUnregObject->cascade());
}
if (retcode < 0)
return;
}
else // unregister
{
if ((regOrUnregObject->objType() == COM_BASE_TABLE_OBJECT) ||
(regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT))
{
if (schemaNamePart == HBASE_SYSTEM_SCHEMA)
{
// unregister CELL and ROW formats of HBase table
retcode = unregisterNativeTable(
catalogNamePart, HBASE_CELL_SCHEMA, objectNamePart,
cliInterface);
retcode = unregisterNativeTable(
catalogNamePart, HBASE_ROW_SCHEMA, objectNamePart,
cliInterface);
}
else
{
retcode = unregisterNativeTable(
catalogNamePart, schemaNamePart, objectNamePart,
cliInterface,
(regOrUnregObject->objType() == COM_SHARED_SCHEMA_OBJECT ?
COM_SHARED_SCHEMA_OBJECT : COM_BASE_TABLE_OBJECT));
}
}
else // view
{
retcode = unregisterHiveView(
catalogNamePart, schemaNamePart, objectNamePart,
naTable,
cliInterface,
regOrUnregObject->cascade());
}
} // unregister
if (retcode < 0)
return;
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, regOrUnregObject->objType(),
FALSE, FALSE);
if (isHBase)
{
CorrName cn(objectNamePart, STMTHEAP,
HBASE_ROW_SCHEMA,
catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, regOrUnregObject->objType(),
FALSE, FALSE);
}
return;
}
/////////////////////////////////////////////////////////////////////////
// This method generates and returns tableInfo struct for internal special
// tables (like metadata, histograms). These tables have hardcoded definitions
// but need objectUID to be returned. ObjectUID is stored in metadata
// and is read from there.
// This is done only if we are not in bootstrap mode, for example, when initializing
// metadata. At that time, there is no metadata available so it cannot be read
// to return objectUID.
// A NULL tableInfo is returned if in bootstrap mode.
//
// RETURN: -1, if error. 0, if all ok.
//////////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::getSpecialTableInfo
(
NAMemory * heap,
const NAString &catName,
const NAString &schName,
const NAString &objName,
const NAString &extTableName,
const ComObjectType &objType,
ComTdbVirtTableTableInfo* &tableInfo)
{
Lng32 cliRC = 0;
tableInfo = NULL;
NABoolean switched = FALSE;
Int32 objectOwner = NA_UserIdDefault;
Int32 schemaOwner = NA_UserIdDefault;
Int64 objUID = 1; // dummy value
Int64 objectFlags = 0 ;
NABoolean createTableInfo = FALSE;
NABoolean isUninit = FALSE;
if (CmpCommon::context()->isUninitializedSeabase())
{
isUninit = TRUE;
createTableInfo = TRUE;
}
NABoolean getUID = TRUE;
if (isUninit)
getUID = FALSE;
else if (CmpCommon::context()->isMxcmp())
getUID = FALSE;
else if (CmpCommon::getDefault(TRAF_BOOTSTRAP_MD_MODE) == DF_ON)
getUID = FALSE;
if (getUID)
{
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return -1;
cliRC = cliInterface.holdAndSetCQD("traf_bootstrap_md_mode", "ON");
if (cliRC < 0)
{
goto label_error_return;
}
objUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), objName.data(),
objType, objectOwner, schemaOwner,objectFlags);
cliRC = cliInterface.restoreCQD("traf_bootstrap_md_mode");
if (objUID <= 0)
goto label_error_return;
switchBackCompiler();
createTableInfo = TRUE;
}
if (createTableInfo)
{
tableInfo = new(heap) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = new(heap) char[extTableName.length() + 1];
strcpy((char*)tableInfo->tableName, (char*)extTableName.data());
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = objUID;
tableInfo->isAudited = 1;
tableInfo->validDef = 1;
tableInfo->objOwnerID = objectOwner;
tableInfo->schemaOwnerID = schemaOwner;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->objectFlags = objectFlags;
tableInfo->tablesFlags = 0;
tableInfo->rowFormat = COM_UNKNOWN_FORMAT_TYPE;
}
return 0;
label_error_return:
switchBackCompiler();
return -1;
}
TrafDesc * CmpSeabaseDDL::getSeabaseMDTableDesc(
const NAString &catName,
const NAString &schName,
const NAString &objName,
const ComObjectType objType)
{
Lng32 cliRC = 0;
TrafDesc * tableDesc = NULL;
NAString schNameL = "\"";
schNameL += schName;
schNameL += "\"";
ComObjectName coName(catName, schNameL, objName);
NAString extTableName = coName.getExternalName(TRUE);
ComTdbVirtTableTableInfo * tableInfo = NULL;
Lng32 colInfoSize = 0;
const ComTdbVirtTableColumnInfo * colInfo = NULL;
Lng32 keyInfoSize = 0;
const ComTdbVirtTableKeyInfo * keyInfo = NULL;
Lng32 uniqueInfoSize = 0;
ComTdbVirtTableConstraintInfo * constrInfo = NULL;
Lng32 indexInfoSize = 0;
const ComTdbVirtTableIndexInfo * indexInfo = NULL;
if (NOT CmpSeabaseMDupgrade::getMDtableInfo(coName,
tableInfo,
colInfoSize, colInfo,
keyInfoSize, keyInfo,
indexInfoSize, indexInfo,
objType))
return NULL;
// Setup the primary key information as a unique constraint
uniqueInfoSize = 1;
constrInfo = new(STMTHEAP) ComTdbVirtTableConstraintInfo[uniqueInfoSize];
constrInfo->baseTableName = (char*)extTableName.data();
// The primary key constraint name is the name of the object appended
// with "_PK";
NAString constrName = extTableName;
constrName += "_PK";
constrInfo->constrName = (char*)constrName.data();
constrInfo->constrType = 3; // pkey_constr
constrInfo->colCount = keyInfoSize;
constrInfo->keyInfoArray = (ComTdbVirtTableKeyInfo *)keyInfo;
constrInfo->numRingConstr = 0;
constrInfo->ringConstrArray = NULL;
constrInfo->numRefdConstr = 0;
constrInfo->refdConstrArray = NULL;
constrInfo->checkConstrLen = 0;
constrInfo->checkConstrText = NULL;
tableDesc =
Generator::createVirtualTableDesc
((char*)extTableName.data(),
NULL, // let it decide what heap to use
colInfoSize,
(ComTdbVirtTableColumnInfo*)colInfo,
keyInfoSize,
(ComTdbVirtTableKeyInfo*)keyInfo,
uniqueInfoSize, constrInfo,
indexInfoSize,
(ComTdbVirtTableIndexInfo *)indexInfo,
0, NULL,
tableInfo);
return tableDesc;
}
TrafDesc * CmpSeabaseDDL::getSeabaseHistTableDesc(const NAString &catName,
const NAString &schName,
const NAString &objName)
{
Lng32 cliRC = 0;
TrafDesc * tableDesc = NULL;
NAString schNameL = "\"";
schNameL += schName;
schNameL += "\""; // transforms internal format schName to external format
ComObjectName coName(catName, schNameL, objName);
NAString extTableName = coName.getExternalName(TRUE);
Lng32 numCols = 0;
ComTdbVirtTableColumnInfo * colInfo = NULL;
Lng32 numKeys;
ComTdbVirtTableKeyInfo * keyInfo;
ComTdbVirtTableIndexInfo * indexInfo;
Parser parser(CmpCommon::context());
ComTdbVirtTableConstraintInfo * constrInfo =
new(STMTHEAP) ComTdbVirtTableConstraintInfo[1];
NAString constrName;
if (objName == HBASE_HIST_NAME)
{
if (processDDLandCreateDescs(parser,
seabaseHistogramsDDL, sizeof(seabaseHistogramsDDL),
FALSE,
0, NULL, 0, NULL,
numCols, colInfo,
numKeys, keyInfo,
indexInfo))
return NULL;
constrName = HBASE_HIST_PK;
}
else if (objName == HBASE_HISTINT_NAME)
{
if (processDDLandCreateDescs(parser,
seabaseHistogramIntervalsDDL, sizeof(seabaseHistogramIntervalsDDL),
FALSE,
0, NULL, 0, NULL,
numCols, colInfo,
numKeys, keyInfo,
indexInfo))
return NULL;
constrName = HBASE_HISTINT_PK;
}
else
return NULL;
ComObjectName coConstrName(catName, schNameL, constrName);
NAString * extConstrName =
new(STMTHEAP) NAString(coConstrName.getExternalName(TRUE));
constrInfo->baseTableName = (char*)extTableName.data();
constrInfo->constrName = (char*)extConstrName->data();
constrInfo->constrType = 3; // pkey_constr
constrInfo->colCount = numKeys;
constrInfo->keyInfoArray = keyInfo;
constrInfo->numRingConstr = 0;
constrInfo->ringConstrArray = NULL;
constrInfo->numRefdConstr = 0;
constrInfo->refdConstrArray = NULL;
constrInfo->checkConstrLen = 0;
constrInfo->checkConstrText = NULL;
ComTdbVirtTableTableInfo * tableInfo = NULL;
if (getSpecialTableInfo(STMTHEAP, catName, schName, objName,
extTableName, COM_BASE_TABLE_OBJECT, tableInfo))
return NULL;
tableDesc =
Generator::createVirtualTableDesc
((char*)extTableName.data(),
NULL, // let it decide what heap to use
numCols,
colInfo,
numKeys,
keyInfo,
1, constrInfo,
0, NULL,
0, NULL,
tableInfo);
return tableDesc;
}
Lng32 CmpSeabaseDDL::getSeabaseColumnInfo(ExeCliInterface *cliInterface,
Int64 objUID,
const NAString &catName,
const NAString &schName,
const NAString &objName,
char *direction,
NABoolean *isTableSalted,
Lng32 *identityColPos,
Lng32 *numCols,
ComTdbVirtTableColumnInfo **outColInfoArray)
{
char query[3000];
Lng32 cliRC;
if (identityColPos)
*identityColPos = -1;
Queue * tableColInfo = NULL;
str_sprintf(query, "select column_name, column_number, column_class, "
"fs_data_type, column_size, column_precision, column_scale, "
"datetime_start_field, datetime_end_field, trim(is_upshifted), column_flags, "
"nullable, trim(character_set), default_class, default_value, "
"trim(column_heading), hbase_col_family, hbase_col_qualifier, direction, "
"is_optional, flags from %s.\"%s\".%s "
"where object_uid = %ld and direction in (%s)"
"order by 2 for read committed access",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
direction);
cliRC = cliInterface->fetchAllRows(tableColInfo, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
*numCols = tableColInfo->numEntries();
ComTdbVirtTableColumnInfo *colInfoArray =
new(STMTHEAP) ComTdbVirtTableColumnInfo[*numCols];
NABoolean tableIsSalted = FALSE;
tableColInfo->position();
for (Lng32 idx = 0; idx < *numCols; idx++)
{
OutputInfo * oi = (OutputInfo*)tableColInfo->getNext();
ComTdbVirtTableColumnInfo &colInfo = colInfoArray[idx];
char * data = NULL;
Lng32 len = 0;
// get the column name
oi->get(0, data, len);
colInfo.colName = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.colName, data);
colInfo.colNumber = *(Lng32*)oi->get(1);
char *colClass = (char*)oi->get(2);
if (strcmp(colClass,COM_USER_COLUMN_LIT) == 0)
colInfo.columnClass = COM_USER_COLUMN;
else if (strcmp(colClass,COM_SYSTEM_COLUMN_LIT) == 0)
colInfo.columnClass = COM_SYSTEM_COLUMN;
else if (strcmp(colClass,COM_ADDED_USER_COLUMN_LIT) == 0)
colInfo.columnClass = COM_ADDED_USER_COLUMN;
else if (strcmp(colClass,COM_ALTERED_USER_COLUMN_LIT) == 0)
colInfo.columnClass = COM_ALTERED_USER_COLUMN;
else if (strcmp(colClass,COM_MV_SYSTEM_ADDED_COLUMN_LIT) == 0)
colInfo.columnClass = COM_MV_SYSTEM_ADDED_COLUMN;
else
CMPASSERT(0);
colInfo.datatype = *(Lng32*)oi->get(3);
colInfo.length = *(Lng32*)oi->get(4);
colInfo.precision = *(Lng32*)oi->get(5);
colInfo.scale = *(Lng32*)oi->get(6);
colInfo.dtStart = *(Lng32 *)oi->get(7);
colInfo.dtEnd = *(Lng32 *)oi->get(8);
if (strcmp((char*)oi->get(9), "Y") == 0)
colInfo.upshifted = -1;
else
colInfo.upshifted = 0;
colInfo.hbaseColFlags = *(ULng32 *)oi->get(10);
colInfo.nullable = *(Lng32 *)oi->get(11);
colInfo.charset =
(SQLCHARSET_CODE)CharInfo::getCharSetEnum((char*)oi->get(12));
colInfo.defaultClass = (ComColumnDefaultClass)*(Lng32 *)oi->get(13);
NAString tempDefVal;
data = NULL;
if (colInfo.defaultClass == COM_USER_DEFINED_DEFAULT ||
colInfo.defaultClass == COM_ALWAYS_COMPUTE_COMPUTED_COLUMN_DEFAULT ||
colInfo.defaultClass == COM_ALWAYS_DEFAULT_COMPUTED_COLUMN_DEFAULT)
{
oi->get(14, data, len);
if (colInfo.defaultClass != COM_USER_DEFINED_DEFAULT)
{
// get computed column definition from text table, but note
// that for older tables the definition may be stored in
// COLUMNS.DEFAULT_VALUE instead (that's returned in "data")
cliRC = getTextFromMD(cliInterface,
objUID,
COM_COMPUTED_COL_TEXT,
colInfo.colNumber,
tempDefVal);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
if (strcmp(colInfo.colName,
ElemDDLSaltOptionsClause::getSaltSysColName()) == 0)
tableIsSalted = TRUE;
}
}
else if (colInfo.defaultClass == COM_FUNCTION_DEFINED_DEFAULT)
{
oi->get(14, data, len);
tempDefVal = data ;
}
else if (colInfo.defaultClass == COM_NULL_DEFAULT)
{
tempDefVal = "NULL";
}
else if (colInfo.defaultClass == COM_USER_FUNCTION_DEFAULT)
{
tempDefVal = "USER";
}
else if (colInfo.defaultClass == COM_CURRENT_DEFAULT)
{
tempDefVal = "CURRENT_TIMESTAMP";
}
else if (colInfo.defaultClass == COM_CURRENT_UT_DEFAULT)
{
tempDefVal = "UNIX_TIMESTAMP()";
}
else if (colInfo.defaultClass == COM_UUID_DEFAULT)
{
tempDefVal = "UUID()";
}
else if ((colInfo.defaultClass == COM_IDENTITY_GENERATED_BY_DEFAULT) ||
(colInfo.defaultClass == COM_IDENTITY_GENERATED_ALWAYS))
{
NAString userFunc("SEQNUM(");
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catName, schName, objName, colInfo.colName,
seqName);
NAString fullyQSeq = catName + "." + schName + "." + "\"" + seqName + "\"";
tempDefVal = userFunc + fullyQSeq + ")";
if (identityColPos)
*identityColPos = idx;
}
if (! tempDefVal.isNull())
{
data = (char*)tempDefVal.data();
len = tempDefVal.length();
}
if (colInfo.defaultClass != COM_NO_DEFAULT)
{
colInfo.defVal = new(STMTHEAP) char[len + 2];
str_cpy_all((char*)colInfo.defVal, data, len);
char * c = (char*)colInfo.defVal;
c[len] = 0;
c[len+1] = 0;
}
else
colInfo.defVal = NULL;
oi->get(15, data, len);
if (len > 0)
{
colInfo.colHeading = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.colHeading, data);
}
else
colInfo.colHeading = NULL;
oi->get(16, data, len);
colInfo.hbaseColFam = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.hbaseColFam, data);
oi->get(17, data, len);
colInfo.hbaseColQual = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.hbaseColQual, data);
strcpy(colInfo.paramDirection, (char*)oi->get(18));
if (*((char*)oi->get(19)) == 'Y')
colInfo.isOptional = 1;
else
colInfo.isOptional = 0;
colInfo.colFlags = *(Int64 *)oi->get(20);
// temporary code, until we have updated flags to have the salt
// flag set for all tables, even those created before end of November
// 2014, when the flag was added during Trafodion R1.0 development
if (colInfo.defaultClass == COM_ALWAYS_COMPUTE_COMPUTED_COLUMN_DEFAULT &&
strcmp(colInfo.colName,
ElemDDLSaltOptionsClause::getSaltSysColName()) == 0)
colInfo.colFlags |= SEABASE_COLUMN_IS_SALT;
}
if (isTableSalted != NULL)
*isTableSalted = tableIsSalted;
*outColInfoArray = colInfoArray;
return *numCols;
}
ComTdbVirtTableSequenceInfo * CmpSeabaseDDL::getSeabaseSequenceInfo(
const NAString &catName,
const NAString &schName,
const NAString &seqName,
NAString &extSeqName,
Int32 & objectOwner,
Int32 & schemaOwner,
Int64 & seqUID)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
NAString schNameL = "\"";
schNameL += schName;
schNameL += "\"";
NAString seqNameL = "\"";
seqNameL += seqName;
seqNameL += "\"";
ComObjectName coName(catName, schNameL, seqNameL);
extSeqName = coName.getExternalName(TRUE);
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
objectOwner = NA_UserIdDefault;
seqUID = -1;
schemaOwner = NA_UserIdDefault;
Int64 objectFlags = 0 ;
seqUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), seqName.data(),
COM_SEQUENCE_GENERATOR_OBJECT,
objectOwner,schemaOwner,objectFlags,TRUE/*report error*/);
if (seqUID == -1 || objectOwner == 0)
{
// There may not be an error in the diags area, if not, add an error
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting object UID and owners for get sequence command");
return NULL;
}
char buf[4000];
str_sprintf(buf, "select fs_data_type, start_value, increment, max_value, min_value, cycle_option, cache_size, next_value, seq_type, redef_ts from %s.\"%s\".%s where seq_uid = %ld",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_SEQ_GEN,
seqUID);
Queue * seqQueue = NULL;
cliRC = cliInterface.fetchAllRows(seqQueue, buf, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return NULL;
}
if ((seqQueue->numEntries() == 0) ||
(seqQueue->numEntries() > 1))
{
*CmpCommon::diags() << DgSqlCode(-4082)
<< DgTableName(extSeqName);
return NULL;
}
ComTdbVirtTableSequenceInfo *seqInfo =
new (STMTHEAP) ComTdbVirtTableSequenceInfo();
seqQueue->position();
OutputInfo * vi = (OutputInfo*)seqQueue->getNext();
seqInfo->datatype = *(Lng32*)vi->get(0);
seqInfo->startValue = *(Int64*)vi->get(1);
seqInfo->increment = *(Int64*)vi->get(2);
seqInfo->maxValue = *(Int64*)vi->get(3);
seqInfo->minValue = *(Int64*)vi->get(4);
seqInfo->cycleOption = (memcmp(vi->get(5), COM_YES_LIT, 1) == 0 ? 1 : 0);
seqInfo->cache = *(Int64*)vi->get(6);
seqInfo->nextValue = *(Int64*)vi->get(7);
seqInfo->seqType = (memcmp(vi->get(8), "E", 1) == 0 ? COM_EXTERNAL_SG : COM_INTERNAL_SG);
seqInfo->seqUID = seqUID;
seqInfo->redefTime = *(Int64*)vi->get(9);
return seqInfo;
}
// ****************************************************************************
// Method: getSeabasePrivInfo
//
// This method retrieves the list of privilege descriptors for each user that
// has been granted an object or column level privilege on the object.
// ****************************************************************************
ComTdbVirtTablePrivInfo * CmpSeabaseDDL::getSeabasePrivInfo(
const Int64 objUID,
const ComObjectType objType)
{
if (!isAuthorizationEnabled())
return NULL;
// Prepare to call privilege manager
NAString MDLoc;
CONCAT_CATSCH(MDLoc, getSystemCatalog(), SEABASE_MD_SCHEMA);
NAString privMgrMDLoc;
CONCAT_CATSCH(privMgrMDLoc, getSystemCatalog(), SEABASE_PRIVMGR_SCHEMA);
// Summarize privileges for object
PrivStatus privStatus = STATUS_GOOD;
std::vector<PrivMgrDesc> privDescs;
PrivMgrCommands command(std::string(MDLoc.data()),
std::string(privMgrMDLoc.data()),
CmpCommon::diags());
if (command.getPrivileges(objUID, objType, privDescs) != STATUS_GOOD)
{
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_RETRIEVE_PRIVS);
return NULL;
}
ComTdbVirtTablePrivInfo *privInfo = new (STMTHEAP) ComTdbVirtTablePrivInfo();
// PrivMgrDesc operator= is a deep copy
privInfo->privmgr_desc_list = new (STMTHEAP) NAList<PrivMgrDesc>(STMTHEAP);
for (size_t i = 0; i < privDescs.size(); i++)
privInfo->privmgr_desc_list->insert(privDescs[i]);
return privInfo;
}
TrafDesc * CmpSeabaseDDL::getSeabaseLibraryDesc(
const NAString &catName,
const NAString &schName,
const NAString &libraryName)
{
TrafDesc * tableDesc = NULL;
NAString extLibName;
Int32 objectOwner = 0;
Int32 schemaOwner = 0;
Int64 objectFlags = 0 ;
char query[4000];
char buf[4000];
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return NULL;
Int64 libUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(),
libraryName.data(),
COM_LIBRARY_OBJECT,
objectOwner, schemaOwner,objectFlags);
if (libUID == -1)
{
switchBackCompiler();
return NULL;
}
str_sprintf(buf, "SELECT library_filename, version "
"FROM %s.\"%s\".%s "
"WHERE library_uid = %ld "
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_LIBRARIES,libUID);
Int32 cliRC = cliInterface.fetchRowsPrologue(buf, TRUE/*no exec*/);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
switchBackCompiler();
return NULL;
}
cliRC = cliInterface.clearExecFetchClose(NULL, 0);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
switchBackCompiler();
return NULL;
}
if (cliRC == 100) // did not find the row
{
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(libraryName);
switchBackCompiler();
return NULL;
}
switchBackCompiler();
char * ptr = NULL;
Lng32 len = 0;
ComTdbVirtTableLibraryInfo *libraryInfo = new (STMTHEAP) ComTdbVirtTableLibraryInfo();
if (libraryInfo == NULL)
return NULL;
libraryInfo->library_name = libraryName.data();
cliInterface.getPtrAndLen(1, ptr, len);
libraryInfo->library_filename = new (STMTHEAP) char[len + 1];
str_cpy_and_null((char *)libraryInfo->library_filename, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(2, ptr, len);
libraryInfo->library_version = *(Int32 *)ptr;
libraryInfo->object_owner_id = objectOwner;
libraryInfo->schema_owner_id = schemaOwner;
libraryInfo->library_UID = libUID;
TrafDesc *library_desc = Generator::createVirtualLibraryDesc(
libraryName.data(),
libraryInfo, NULL);
processReturn();
return library_desc;
}
TrafDesc * CmpSeabaseDDL::getSeabaseSequenceDesc(const NAString &catName,
const NAString &schName,
const NAString &seqName)
{
TrafDesc * tableDesc = NULL;
NAString extSeqName;
Int32 objectOwner = 0;
Int32 schemaOwner = 0;
Int64 seqUID = -1;
ComTdbVirtTableSequenceInfo * seqInfo =
getSeabaseSequenceInfo(catName, schName, seqName, extSeqName,
objectOwner, schemaOwner, seqUID);
if (! seqInfo)
{
return NULL;
}
ComTdbVirtTablePrivInfo * privInfo =
getSeabasePrivInfo(seqUID, COM_SEQUENCE_GENERATOR_OBJECT);
ComTdbVirtTableTableInfo * tableInfo =
new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = extSeqName.data();
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = seqUID;
tableInfo->isAudited = 0;
tableInfo->validDef = 1;
tableInfo->objOwnerID = objectOwner;
tableInfo->schemaOwnerID = schemaOwner;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->objectFlags = 0;
tableInfo->tablesFlags = 0;
tableDesc =
Generator::createVirtualTableDesc
((char*)extSeqName.data(),
NULL, // let it decide what heap to use
0, NULL, // colInfo
0, NULL, // keyInfo
0, NULL,
0, NULL, //indexInfo
0, NULL, // viewInfo
tableInfo,
seqInfo,
NULL, NULL, // endKeyArray, snapshotName
FALSE, NULL, FALSE, // genPackedDesc, packedDescLen, isUserTable
privInfo);
return tableDesc;
}
short CmpSeabaseDDL::genHbaseRegionDescs(TrafDesc * desc,
const NAString &catName,
const NAString &schName,
const NAString &objName)
{
if (! desc)
return -1;
ExpHbaseInterface* ehi = allocEHI();
if (ehi == NULL)
return -1;
NAString extNameForHbase;
if ((catName != HBASE_SYSTEM_CATALOG) ||
((schName != HBASE_ROW_SCHEMA) && (schName != HBASE_CELL_SCHEMA)))
extNameForHbase = genHBaseObjName(catName, schName, objName);
else
// for HBASE._ROW_.objName or HBASE._CELL_.objName, just pass objName
extNameForHbase = objName;
NAArray<HbaseStr>* endKeyArray =
ehi->getRegionEndKeys(extNameForHbase);
TrafDesc * regionKeyDesc =
Generator::assembleDescs(endKeyArray, heap_);
deallocEHI(ehi);
TrafTableDesc* tDesc = desc->tableDesc();
if (tDesc)
tDesc->hbase_regionkey_desc = regionKeyDesc;
else {
TrafIndexesDesc* iDesc = desc->indexesDesc();
if (iDesc)
iDesc->hbase_regionkey_desc = regionKeyDesc;
else
return -1;
}
return 0;
}
TrafDesc * CmpSeabaseDDL::getSeabaseUserTableDesc(const NAString &catName,
const NAString &schName,
const NAString &objName,
const ComObjectType objType,
NABoolean includeInvalidDefs,
Int32 ctlFlags,
Int32 &packedDescLen)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char query[4000];
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
TrafDesc * tableDesc = NULL;
Int32 objectOwner = 0 ;
Int32 schemaOwner = 0 ;
Int64 objUID = -1 ;
Int64 objectFlags = 0 ;
//
// For performance reasons, whenever possible, we want to issue only one
// "select" to the OBJECTS metadata table to determine both the existence
// of the specified table and the objUID for the table. Since it is more
// likely that a user query refers to tables (directly or indirectly) that
// are already in existence, this optimization can save the cost of the
// existence check for all such user objects. In the less likely case that
// an object does not exist we must drop back and re-issue the metadata
// query for the existence check in order to ensure we get the proper error
// reported.
//
NABoolean checkForValidDef = TRUE;
if ((includeInvalidDefs) ||
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) ||
(objType == COM_INDEX_OBJECT))
checkForValidDef = FALSE;
if ( objType ) // Must have objType
{
objUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), objName.data(),
objType, objectOwner, schemaOwner,objectFlags, FALSE /*no error now */,
checkForValidDef);
}
// If we didn't call getObjectInfo() above OR if it gave an error, then:
if ( objUID < 0 )
{
cliRC = existsInSeabaseMDTable(&cliInterface,
catName.data(), schName.data(), objName.data(),
COM_UNKNOWN_OBJECT,
checkForValidDef,
TRUE, TRUE);
if (cliRC < 0)
{
processReturn();
return NULL;
}
if (cliRC == 0) // doesn't exist
{
processReturn();
return NULL;
}
}
if (objUID < 0)
{
if (objType != COM_BASE_TABLE_OBJECT)
{
processReturn();
return NULL;
}
else
{
// object type passed in was for a table. Could not find it but.
// this could be a view. Look for that.
CmpCommon::diags()->clear();
objUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), objName.data(), COM_VIEW_OBJECT,
objectOwner,schemaOwner,objectFlags);
if (objUID < 0)
{
processReturn();
return NULL;
}
}
}
if ((ctlFlags & READ_OBJECT_DESC) && // read stored descriptor
((objectFlags & MD_OBJECTS_STORED_DESC) != 0) && // stored desc available
((objectFlags & MD_OBJECTS_DISABLE_STORED_DESC) == 0)) // not disabled
{
TrafDesc * desc = NULL;
// if good stored desc was retrieved, return it.
// Otherwise, continue and generate descriptor the old fashioned way.
if (! checkAndGetStoredObjectDesc(&cliInterface, objUID, &desc))
{
CmpCommon::diags()->clear();
return desc;
}
// clear diags and continue
CmpCommon::diags()->clear();
}
str_sprintf(query, "select is_audited, num_salt_partns, row_format, flags from %s.\"%s\".%s where table_uid = %ld for read committed access",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLES,
objUID);
Queue * tableAttrQueue = NULL;
cliRC = cliInterface.fetchAllRows(tableAttrQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
Int64 tablesFlags = 0;
NABoolean isAudited = TRUE;
Lng32 numSaltPartns = 0;
NABoolean alignedFormat = FALSE;
NABoolean hbaseStrDataFormat = FALSE;
NAString * hbaseCreateOptions = new(STMTHEAP) NAString();
NAString colFamStr;
if (cliRC == 0) // read some rows
{
if (tableAttrQueue->entries() != 1) // only one row should be returned
{
processReturn();
return NULL;
}
tableAttrQueue->position();
OutputInfo * vi = (OutputInfo*)tableAttrQueue->getNext();
char * audit = vi->get(0);
isAudited = (memcmp(audit, COM_YES_LIT, 1) == 0);
numSaltPartns = *(Lng32*)vi->get(1);
char * format = vi->get(2);
alignedFormat = (memcmp(format, COM_ALIGNED_FORMAT_LIT, 2) == 0);
hbaseStrDataFormat = (memcmp(format, COM_HBASE_STR_FORMAT_LIT, 2) == 0);
tablesFlags = *(Int64*)vi->get(3);
if (getTextFromMD(&cliInterface, objUID, COM_HBASE_OPTIONS_TEXT, 0,
*hbaseCreateOptions))
{
processReturn();
return NULL;
}
if (getTextFromMD(&cliInterface, objUID, COM_HBASE_COL_FAMILY_TEXT, 0,
colFamStr))
{
processReturn();
return NULL;
}
}
Lng32 numCols;
ComTdbVirtTableColumnInfo * colInfoArray;
NABoolean tableIsSalted = FALSE;
char direction[20];
str_sprintf(direction, "'%s'", COM_UNKNOWN_PARAM_DIRECTION_LIT);
Lng32 identityColPos = -1;
if (getSeabaseColumnInfo(&cliInterface,
objUID,
catName, schName, objName,
(char *)direction,
&tableIsSalted,
&identityColPos,
&numCols,
&colInfoArray) <= 0)
{
processReturn();
return NULL;
}
if (objType == COM_INDEX_OBJECT)
{
str_sprintf(query, "select k.column_name, c.column_number, k.keyseq_number, ordering, cast(0 as int not null) from %s.\"%s\".%s k, %s.\"%s\".%s c where k.column_name = c.column_name and k.object_uid = c.object_uid and k.object_uid = %ld and k.nonkeycol = 0 for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID);
}
else
{
str_sprintf(query, "select column_name, column_number, keyseq_number, ordering, cast(0 as int not null) from %s.\"%s\".%s where object_uid = %ld and nonkeycol = 0 for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
objUID);
}
Queue * tableKeyInfo = NULL;
cliRC = cliInterface.fetchAllRows(tableKeyInfo, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
if (tableKeyInfo->numEntries() > 0)
{
keyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[tableKeyInfo->numEntries()];
}
tableKeyInfo->position();
for (int idx = 0; idx < tableKeyInfo->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)tableKeyInfo->getNext();
populateKeyInfo(keyInfoArray[idx], vi);
}
str_sprintf(query, "select O.catalog_name, O.schema_name, O.object_name, I.keytag, I.is_unique, I.is_explicit, I.key_colcount, I.nonkey_colcount, T.num_salt_partns, T.row_format, I.index_uid from %s.\"%s\".%s I, %s.\"%s\".%s O , %s.\"%s\".%s T where I.base_table_uid = %ld and I.index_uid = O.object_uid %s and I.index_uid = T.table_uid for read committed access order by 1,2,3",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_INDEXES,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLES,
objUID,
(includeInvalidDefs ? " " : " and O.valid_def = 'Y' "));
//Turn off CQDs MERGE_JOINS and HASH_JOINS to avoid a full table scan of
//SEABASE_OBJECTS table. Full table scan of SEABASE_OBJECTS table causes
//simultaneous DDL operations to run into conflict.
//Make sure to restore the CQDs after this query including error paths.
cliInterface.holdAndSetCQD("MERGE_JOINS", "OFF");
cliInterface.holdAndSetCQD("HASH_JOINS", "OFF");
Queue * indexInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(indexInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
}
//restore CQDs.
cliInterface.restoreCQD("MERGE_JOINS");
cliInterface.restoreCQD("HASH_JOINS");
if (cliRC < 0)
return NULL;
ComTdbVirtTableIndexInfo * indexInfoArray = NULL;
if (indexInfoQueue->numEntries() > 0)
{
indexInfoArray =
new(STMTHEAP) ComTdbVirtTableIndexInfo[indexInfoQueue->numEntries()];
}
NAString qCatName = "\"";
qCatName += catName;
qCatName += "\"";
NAString qSchName = "\"";
qSchName += schName;
qSchName += "\"";
NAString qObjName = "\"";
qObjName += objName;
qObjName += "\"";
ComObjectName coName(qCatName, qSchName, qObjName);
NAString * extTableName =
new(STMTHEAP) NAString(coName.getExternalName(TRUE));
const NAString extNameForHbase = catName + "." + schName + "." + objName;
indexInfoQueue->position();
for (int idx = 0; idx < indexInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)indexInfoQueue->getNext();
char * idxCatName = (char*)vi->get(0);
char * idxSchName = (char*)vi->get(1);
char * idxObjName = (char*)vi->get(2);
Lng32 keyTag = *(Lng32*)vi->get(3);
Lng32 isUnique = *(Lng32*)vi->get(4);
Lng32 isExplicit = *(Lng32*)vi->get(5);
Lng32 keyColCount = *(Lng32*)vi->get(6);
Lng32 nonKeyColCount = *(Lng32*)vi->get(7);
Lng32 idxNumSaltPartns = *(Lng32*)vi->get(8);
char * format = vi->get(9);
Int64 indexUID = *(Int64*)vi->get(10);
ComRowFormat idxRowFormat;
if (memcmp(format, COM_ALIGNED_FORMAT_LIT, 2) == 0)
idxRowFormat = COM_ALIGNED_FORMAT_TYPE;
else
if (memcmp(format, COM_HBASE_FORMAT_LIT, 2) == 0)
idxRowFormat = COM_HBASE_FORMAT_TYPE;
else
idxRowFormat = COM_UNKNOWN_FORMAT_TYPE;
Int64 idxUID = getObjectUID(&cliInterface,
idxCatName, idxSchName, idxObjName,
COM_INDEX_OBJECT_LIT);
if (idxUID < 0)
{
processReturn();
return NULL;
}
NAString * idxHbaseCreateOptions = new(STMTHEAP) NAString();
if (getTextFromMD(&cliInterface, idxUID, COM_HBASE_OPTIONS_TEXT, 0,
*idxHbaseCreateOptions))
{
processReturn();
return NULL;
}
indexInfoArray[idx].baseTableName = (char*)extTableName->data();
NAString qIdxCatName = "\"";
qIdxCatName += idxCatName;
qIdxCatName += "\"";
NAString qIdxSchName = "\"";
qIdxSchName += idxSchName;
qIdxSchName += "\"";
NAString qIdxObjName = "\"";
qIdxObjName += idxObjName;
qIdxObjName += "\"";
ComObjectName coIdxName(qIdxCatName, qIdxSchName, qIdxObjName);
NAString * extIndexName =
new(STMTHEAP) NAString(coIdxName.getExternalName(TRUE));
indexInfoArray[idx].indexName = (char*)extIndexName->data();
indexInfoArray[idx].indexUID = indexUID;
indexInfoArray[idx].keytag = keyTag;
indexInfoArray[idx].isUnique = isUnique;
indexInfoArray[idx].isExplicit = isExplicit;
indexInfoArray[idx].keyColCount = keyColCount;
indexInfoArray[idx].nonKeyColCount = nonKeyColCount;
indexInfoArray[idx].hbaseCreateOptions =
(idxHbaseCreateOptions->isNull() ? NULL : idxHbaseCreateOptions->data());
indexInfoArray[idx].numSaltPartns = idxNumSaltPartns;
indexInfoArray[idx].rowFormat = idxRowFormat;
Queue * keyInfoQueue = NULL;
str_sprintf(query, "select column_name, column_number, keyseq_number, ordering, nonkeycol from %s.\"%s\".%s where object_uid = %ld for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
idxUID);
cliRC = cliInterface.initializeInfoList(keyInfoQueue, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
cliRC = cliInterface.fetchAllRows(keyInfoQueue, query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
if (keyInfoQueue->numEntries() == 0)
{
*CmpCommon::diags() << DgSqlCode(-4400);
processReturn();
return NULL;
}
ComTdbVirtTableKeyInfo * keyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[keyColCount];
ComTdbVirtTableKeyInfo * nonKeyInfoArray = NULL;
if (nonKeyColCount > 0)
{
nonKeyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[nonKeyColCount];
}
keyInfoQueue->position();
Lng32 jk = 0;
Lng32 jnk = 0;
for (Lng32 j = 0; j < keyInfoQueue->numEntries(); j++)
{
OutputInfo * vi = (OutputInfo*)keyInfoQueue->getNext();
Lng32 nonKeyCol = *(Lng32*)vi->get(4);
if (nonKeyCol == 0)
{
populateKeyInfo(keyInfoArray[jk], vi, TRUE);
jk++;
}
else
{
if (nonKeyInfoArray)
{
populateKeyInfo(nonKeyInfoArray[jnk], vi, TRUE);
jnk++;
}
}
}
indexInfoArray[idx].keyInfoArray = keyInfoArray;
indexInfoArray[idx].nonKeyInfoArray = nonKeyInfoArray;
} // for
// get constraint info
str_sprintf(query, "select O.object_name, C.constraint_type, C.col_count, C.constraint_uid, C.enforced, C.flags from %s.\"%s\".%s O, %s.\"%s\".%s C where O.catalog_name = '%s' and O.schema_name = '%s' and C.table_uid = %ld and O.object_uid = C.constraint_uid order by 1",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
catName.data(), schName.data(),
objUID);
Queue * constrInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(constrInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableConstraintInfo * constrInfoArray = NULL;
if (constrInfoQueue->numEntries() > 0)
{
constrInfoArray =
new(STMTHEAP) ComTdbVirtTableConstraintInfo[constrInfoQueue->numEntries()];
}
NAString tableCatName = "\"";
tableCatName += catName;
tableCatName += "\"";
NAString tableSchName = "\"";
tableSchName += schName;
tableSchName += "\"";
NAString tableObjName = "\"";
tableObjName += objName;
tableObjName += "\"";
ComObjectName coTableName(tableCatName, tableSchName, tableObjName);
extTableName =
new(STMTHEAP) NAString(coTableName.getExternalName(TRUE));
NABoolean pkeyNotSerialized = FALSE;
constrInfoQueue->position();
for (int idx = 0; idx < constrInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)constrInfoQueue->getNext();
char * constrName = (char*)vi->get(0);
char * constrType = (char*)vi->get(1);
Lng32 colCount = *(Lng32*)vi->get(2);
Int64 constrUID = *(Int64*)vi->get(3);
char * enforced = (char*)vi->get(4);
Int64 flags = *(Int64*)vi->get(5);
constrInfoArray[idx].baseTableName = (char*)extTableName->data();
NAString cnNas = "\"";
cnNas += constrName;
cnNas += "\"";
ComObjectName coConstrName(tableCatName, tableSchName, cnNas);
NAString * extConstrName =
new(STMTHEAP) NAString(coConstrName.getExternalName(TRUE));
constrInfoArray[idx].constrName = (char*)extConstrName->data();
constrInfoArray[idx].colCount = colCount;
if (strcmp(constrType, COM_UNIQUE_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 0; // unique_constr
else if (strcmp(constrType, COM_FOREIGN_KEY_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 1; // ref_constr
else if (strcmp(constrType, COM_CHECK_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 2; // check_constr
else if (strcmp(constrType, COM_PRIMARY_KEY_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 3; // pkey_constr
if ((constrInfoArray[idx].constrType == 3) && // pkey. TBD: Add Enum
(CmpSeabaseDDL::isMDflagsSet(flags, CmpSeabaseDDL::MD_TABLE_CONSTRAINTS_PKEY_NOT_SERIALIZED_FLG)))
constrInfoArray[idx].notSerialized = 1;
else
constrInfoArray[idx].notSerialized = 0;
if (strcmp(enforced, COM_YES_LIT) == 0)
constrInfoArray[idx].isEnforced = 1;
else
constrInfoArray[idx].isEnforced = 0;
Queue * keyInfoQueue = NULL;
str_sprintf(query, "select column_name, column_number, keyseq_number, ordering , cast(0 as int not null) from %s.\"%s\".%s where object_uid = %ld for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
constrUID);
cliRC = cliInterface.initializeInfoList(keyInfoQueue, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
cliRC = cliInterface.fetchAllRows(keyInfoQueue, query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
if (colCount > 0)
{
keyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[colCount];
keyInfoQueue->position();
Lng32 jk = 0;
for (Lng32 j = 0; j < keyInfoQueue->numEntries(); j++)
{
OutputInfo * vi = (OutputInfo*)keyInfoQueue->getNext();
populateKeyInfo(keyInfoArray[jk], vi, TRUE);
jk++;
}
}
constrInfoArray[idx].keyInfoArray = keyInfoArray;
constrInfoArray[idx].numRingConstr = 0;
constrInfoArray[idx].ringConstrArray = NULL;
constrInfoArray[idx].numRefdConstr = 0;
constrInfoArray[idx].refdConstrArray = NULL;
constrInfoArray[idx].checkConstrLen = 0;
constrInfoArray[idx].checkConstrText = NULL;
// attach all the referencing constraints
if ((strcmp(constrType, COM_UNIQUE_CONSTRAINT_LIT) == 0) ||
(strcmp(constrType, COM_PRIMARY_KEY_CONSTRAINT_LIT) == 0))
{
// force the query plan; without this we tend to do full scans of
// TABLE_CONSTRAINTS which reduces DDL concurrency
str_sprintf(query,"control query shape sort(nested_join(nested_join(nested_join(scan('U'),scan('O')), scan('T','TRAFODION.\"_MD_\".TABLE_CONSTRAINTS_IDX')),cut))");
cliRC = cliInterface.setCQS(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
str_sprintf(query, "select trim(O.catalog_name || '.' || '\"' || O.schema_name || '\"' || '.' || '\"' || O.object_name || '\"' ) constr_name, trim(O2.catalog_name || '.' || '\"' || O2.schema_name || '\"' || '.' || '\"' || O2.object_name || '\"' ) table_name from %s.\"%s\".%s U, %s.\"%s\".%s O, %s.\"%s\".%s O2, %s.\"%s\".%s T where O.object_uid = U.foreign_constraint_uid and O2.object_uid = T.table_uid and T.constraint_uid = U.foreign_constraint_uid and U.unique_constraint_uid = %ld order by 2, 1",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_UNIQUE_REF_CONSTR_USAGE,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
constrUID
);
Queue * ringInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(ringInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
cliInterface.resetCQS();
return NULL;
}
cliInterface.resetCQS();
ComTdbVirtTableRefConstraints * ringInfoArray = NULL;
if (ringInfoQueue->numEntries() > 0)
{
ringInfoArray =
new(STMTHEAP) ComTdbVirtTableRefConstraints[ringInfoQueue->numEntries()];
}
ringInfoQueue->position();
for (Lng32 i = 0; i < ringInfoQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)ringInfoQueue->getNext();
ringInfoArray[i].constrName = (char*)vi->get(0);
ringInfoArray[i].baseTableName = (char*)vi->get(1);
}
constrInfoArray[idx].numRingConstr = ringInfoQueue->numEntries();
constrInfoArray[idx].ringConstrArray = ringInfoArray;
}
// attach all the referencing constraints
if (strcmp(constrType, COM_FOREIGN_KEY_CONSTRAINT_LIT) == 0)
{
str_sprintf(query, "select trim(O.catalog_name || '.' || '\"' || O.schema_name || '\"' || '.' || '\"' || O.object_name || '\"' ) constr_name, trim(O2.catalog_name || '.' || '\"' || O2.schema_name || '\"' || '.' || '\"' || O2.object_name || '\"' ) table_name from %s.\"%s\".%s R, %s.\"%s\".%s O, %s.\"%s\".%s O2, %s.\"%s\".%s T where O.object_uid = R.unique_constraint_uid and O2.object_uid = T.table_uid and T.constraint_uid = R.unique_constraint_uid and R.ref_constraint_uid = %ld order by 2,1",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_REF_CONSTRAINTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
constrUID
);
Queue * refdInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(refdInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableRefConstraints * refdInfoArray = NULL;
if (refdInfoQueue->numEntries() > 0)
{
refdInfoArray =
new(STMTHEAP) ComTdbVirtTableRefConstraints[refdInfoQueue->numEntries()];
}
refdInfoQueue->position();
for (Lng32 i = 0; i < refdInfoQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)refdInfoQueue->getNext();
refdInfoArray[i].constrName = (char*)vi->get(0);
refdInfoArray[i].baseTableName = (char*)vi->get(1);
}
constrInfoArray[idx].numRefdConstr = refdInfoQueue->numEntries();
constrInfoArray[idx].refdConstrArray = refdInfoArray;
}
if (strcmp(constrType, COM_CHECK_CONSTRAINT_LIT) == 0)
{
NAString constrText;
if (getTextFromMD(&cliInterface, constrUID, COM_CHECK_CONSTR_TEXT, 0,
constrText))
{
processReturn();
return NULL;
}
char * ct = new(STMTHEAP) char[constrText.length()+1];
memcpy(ct, constrText.data(), constrText.length());
ct[constrText.length()] = 0;
constrInfoArray[idx].checkConstrLen = constrText.length();
constrInfoArray[idx].checkConstrText = ct;
}
} // for
str_sprintf(query, "select check_option, is_updatable, is_insertable from %s.\"%s\".%s where view_uid = %ld for read committed access ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_VIEWS,
objUID);
Queue * viewInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(viewInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableViewInfo * viewInfoArray = NULL;
if (viewInfoQueue->numEntries() > 0)
{
// must have only one entry
if (viewInfoQueue->numEntries() > 1)
{
processReturn();
return NULL;
}
viewInfoArray = new(STMTHEAP) ComTdbVirtTableViewInfo[1];
viewInfoQueue->position();
OutputInfo * vi = (OutputInfo*)viewInfoQueue->getNext();
char * checkOption = (char*)vi->get(0);
Lng32 isUpdatable = *(Lng32*)vi->get(1);
Lng32 isInsertable = *(Lng32*)vi->get(2);
viewInfoArray[0].viewName = (char*)extTableName->data();
if (NAString(checkOption) != COM_NONE_CHECK_OPTION_LIT)
{
viewInfoArray[0].viewCheckText = new(STMTHEAP) char[strlen(checkOption) + 1];
strcpy(viewInfoArray[0].viewCheckText, checkOption);
}
else
viewInfoArray[0].viewCheckText = NULL;
viewInfoArray[0].isUpdatable = isUpdatable;
viewInfoArray[0].isInsertable = isInsertable;
// get view text from TEXT table
NAString viewText;
if (getTextFromMD(&cliInterface, objUID, COM_VIEW_TEXT, 0, viewText))
{
processReturn();
return NULL;
}
viewInfoArray[0].viewText = new(STMTHEAP) char[viewText.length() + 1];
strcpy(viewInfoArray[0].viewText, viewText.data());
// get view col usages from TEXT table
NAString viewColUsages;
if (getTextFromMD(&cliInterface, objUID, COM_VIEW_REF_COLS_TEXT, 0, viewColUsages))
{
processReturn();
return NULL;
}
viewInfoArray[0].viewColUsages = new(STMTHEAP) char[viewColUsages.length() + 1];
strcpy(viewInfoArray[0].viewColUsages, viewColUsages.data());
}
ComTdbVirtTableSequenceInfo * seqInfo = NULL;
if (identityColPos >= 0)
{
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catName, schName, objName, colInfoArray[identityColPos].colName,
seqName);
NAString extSeqName;
Int32 objectOwner;
Int64 seqUID;
seqInfo = getSeabaseSequenceInfo(catName, schName, seqName,
extSeqName, objectOwner, schemaOwner, seqUID);
}
ComTdbVirtTablePrivInfo * privInfo = getSeabasePrivInfo(objUID, objType);
ComTdbVirtTableTableInfo * tableInfo = new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = extTableName->data();
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = objUID;
tableInfo->isAudited = (isAudited ? -1 : 0);
tableInfo->validDef = 1;
tableInfo->objOwnerID = objectOwner;
tableInfo->schemaOwnerID = schemaOwner;
tableInfo->numSaltPartns = numSaltPartns;
tableInfo->hbaseCreateOptions =
(hbaseCreateOptions->isNull() ? NULL : hbaseCreateOptions->data());
if (alignedFormat)
tableInfo->rowFormat = COM_ALIGNED_FORMAT_TYPE;
else if (hbaseStrDataFormat)
tableInfo->rowFormat = COM_HBASE_STR_FORMAT_TYPE;
else
tableInfo->rowFormat = COM_HBASE_FORMAT_TYPE;
if (NOT colFamStr.isNull())
{
char colFamBuf[1000];
char * colFamBufPtr = colFamBuf;
strcpy(colFamBufPtr, colFamStr.data());
strsep(&colFamBufPtr, " ");
tableInfo->defaultColFam = new(STMTHEAP) char[strlen(colFamBuf)+1];
strcpy((char*)tableInfo->defaultColFam, colFamBuf);
tableInfo->allColFams = new(STMTHEAP) char[strlen(colFamBufPtr)+1];
strcpy((char*)tableInfo->allColFams, colFamBufPtr);
}
else
{
tableInfo->defaultColFam = new(STMTHEAP) char[strlen(SEABASE_DEFAULT_COL_FAMILY)+1];
strcpy((char*)tableInfo->defaultColFam, SEABASE_DEFAULT_COL_FAMILY);
tableInfo->allColFams = NULL;
}
tableInfo->objectFlags = objectFlags;
tableInfo->tablesFlags = tablesFlags;
// request the default
ExpHbaseInterface* ehi = CmpSeabaseDDL::allocEHI();
if (ehi == NULL)
return NULL;
NAArray<HbaseStr>* endKeyArray = ehi->getRegionEndKeys(extNameForHbase);
char * snapshotName = NULL;
if (ctlFlags & GET_SNAPSHOTS)
{
Lng32 retcode =
ehi->getLatestSnapshot(extNameForHbase.data(), snapshotName, STMTHEAP);
if (retcode < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::getLatestSnapshot()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr());
delete ehi;
}
}
tableDesc =
Generator::createVirtualTableDesc
(
extTableName->data(), //objName,
NULL, // let it decide what heap to use
numCols,
colInfoArray,
tableKeyInfo->numEntries(), //keyIndex,
keyInfoArray,
constrInfoQueue->numEntries(),
constrInfoArray,
indexInfoQueue->numEntries(),
indexInfoArray,
viewInfoQueue->numEntries(),
viewInfoArray,
tableInfo,
seqInfo,
endKeyArray,
snapshotName,
((ctlFlags & GEN_PACKED_DESC) != 0),
&packedDescLen,
TRUE /*user table*/,
privInfo);
deleteNAArray(heap_, endKeyArray);
if ( tableDesc ) {
// if this is base table or index and hbase object doesn't exist,
// then this object is corrupted.
if (!objectFlags & SEABASE_OBJECT_IS_EXTERNAL_HIVE &&
!objectFlags & SEABASE_OBJECT_IS_EXTERNAL_HBASE)
{
if ((tableDesc->tableDesc()->objectType() == COM_BASE_TABLE_OBJECT) &&
(existsInHbase(extNameForHbase, ehi) == 0))
{
*CmpCommon::diags() << DgSqlCode(-4254)
<< DgString0(*extTableName);
tableDesc = NULL;
return NULL;
}
}
}
CmpSeabaseDDL::deallocEHI(ehi);
if (! tableDesc)
processReturn();
return tableDesc;
}
TrafDesc * CmpSeabaseDDL::getSeabaseTableDesc(const NAString &catName,
const NAString &schName,
const NAString &objName,
const ComObjectType objType,
NABoolean includeInvalidDefs)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
if ((CmpCommon::context()->isUninitializedSeabase()) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
if (CmpCommon::context()->uninitializedSeabaseErrNum() == -1398)
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum())
<< DgInt0(CmpCommon::context()->hbaseErrNum())
<< DgString0(CmpCommon::context()->hbaseErrStr());
else
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum());
return NULL;
}
TrafDesc *tDesc = NULL;
NABoolean isMDTable = (isSeabaseMD(catName, schName, objName) ||
isSeabasePrivMgrMD(catName, schName));
if (isMDTable)
{
if (! CmpCommon::context()->getTrafMDDescsInfo())
{
*CmpCommon::diags() << DgSqlCode(-1428);
return NULL;
}
tDesc = getSeabaseMDTableDesc(catName, schName, objName, objType);
// Could not find this metadata object in the static predefined structs.
// It could be a metadata view or other objects created in MD schema.
// Look for it as a regular object.
}
else if ((objName == HBASE_HIST_NAME) ||
(objName == HBASE_HISTINT_NAME))
{
NAString tabName = catName;
tabName += ".";
tabName += schName;
tabName += ".";
tabName += objName;
if (existsInHbase(tabName))
{
tDesc = getSeabaseHistTableDesc(catName, schName, objName);
}
return tDesc;
}
if (! tDesc)
{
if ((CmpCommon::context()->isUninitializedSeabase()) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
if (CmpCommon::context()->uninitializedSeabaseErrNum() == -1398)
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum())
<< DgInt0(CmpCommon::context()->hbaseErrNum())
<< DgString0(CmpCommon::context()->hbaseErrStr());
else
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum());
}
else
{
Int32 ctlFlags = 0;
if (CmpCommon::getDefault(TRAF_TABLE_SNAPSHOT_SCAN) != DF_NONE)
ctlFlags = GET_SNAPSHOTS; // get snapshot
if ((CmpCommon::getDefault(TRAF_READ_OBJECT_DESC) == DF_ON) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) &&
(NOT includeInvalidDefs))
ctlFlags |= READ_OBJECT_DESC;
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return NULL;
switch (objType)
{
case COM_SEQUENCE_GENERATOR_OBJECT:
tDesc = getSeabaseSequenceDesc(catName, schName, objName);
break;
case COM_LIBRARY_OBJECT:
tDesc = getSeabaseLibraryDesc(catName, schName, objName);
break;
default:
Int32 packedDescLen = 0;
tDesc = getSeabaseUserTableDesc(catName, schName, objName,
objType, includeInvalidDefs,
ctlFlags, packedDescLen);
break;
}
switchBackCompiler();
}
}
return tDesc;
}
// a wrapper method to getSeabaseRoutineDescInternal so
// CmpContext context switching can take place.
// getSeabaseRoutineDescInternal prepares and executes
// several queries on metadata tables
TrafDesc *CmpSeabaseDDL::getSeabaseRoutineDesc(const NAString &catName,
const NAString &schName,
const NAString &objName)
{
TrafDesc *result = NULL;
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return NULL;
result = getSeabaseRoutineDescInternal(catName, schName, objName);
switchBackCompiler();
return result;
}
TrafDesc *CmpSeabaseDDL::getSeabaseRoutineDescInternal(const NAString &catName,
const NAString &schName,
const NAString &objName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
TrafDesc *result;
char query[4000];
char buf[4000];
ExeCliInterface cliInterface(STMTHEAP, 0, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int64 objectUID = 0;
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
Int64 objectFlags = 0 ;
ComObjectType objectType = COM_USER_DEFINED_ROUTINE_OBJECT;
objectUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(),
objName.data(), objectType,
objectOwnerID,schemaOwnerID,objectFlags);
if (objectUID == -1 || objectOwnerID == 0)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting object UID and owners for routine desc request");
processReturn();
return NULL;
}
str_sprintf(buf, "select udr_type, language_type, deterministic_bool,"
" sql_access, call_on_null, isolate_bool, param_style,"
" transaction_attributes, max_results, state_area_size, external_name,"
" parallelism, user_version, external_security, execution_mode,"
" library_filename, version, signature, catalog_name, schema_name,"
" object_name"
" from %s.\"%s\".%s r, %s.\"%s\".%s l, %s.\"%s\".%s o "
" where r.udr_uid = %ld and r.library_uid = l.library_uid "
" and l.library_uid = o.object_uid for read committed access",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_ROUTINES,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_LIBRARIES,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
objectUID);
cliRC = cliInterface.fetchRowsPrologue(buf, TRUE/*no exec*/);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return NULL;
}
cliRC = cliInterface.clearExecFetchClose(NULL, 0);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return NULL;
}
if (cliRC == 100) // did not find the row
{
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(objName);
return NULL;
}
char * ptr = NULL;
Lng32 len = 0;
ComTdbVirtTableRoutineInfo *routineInfo = new (STMTHEAP) ComTdbVirtTableRoutineInfo();
routineInfo->object_uid = objectUID;
routineInfo->object_owner_id = objectOwnerID;
routineInfo->schema_owner_id = schemaOwnerID;
routineInfo->routine_name = objName.data();
cliInterface.getPtrAndLen(1, ptr, len);
str_cpy_all(routineInfo->UDR_type, ptr, len);
routineInfo->UDR_type[len] = '\0';
cliInterface.getPtrAndLen(2, ptr, len);
str_cpy_all(routineInfo->language_type, ptr, len);
routineInfo->language_type[len] = '\0';
cliInterface.getPtrAndLen(3, ptr, len);
if (*ptr == 'Y')
routineInfo->deterministic = 1;
else
routineInfo->deterministic = 0;
cliInterface.getPtrAndLen(4, ptr, len);
str_cpy_all(routineInfo->sql_access, ptr, len);
routineInfo->sql_access[len] = '\0';
cliInterface.getPtrAndLen(5, ptr, len);
if (*ptr == 'Y')
routineInfo->call_on_null = 1;
else
routineInfo->call_on_null = 0;
cliInterface.getPtrAndLen(6, ptr, len);
if (*ptr == 'Y')
routineInfo->isolate = 1;
else
routineInfo->isolate = 0;
cliInterface.getPtrAndLen(7, ptr, len);
str_cpy_all(routineInfo->param_style, ptr, len);
routineInfo->param_style[len] = '\0';
cliInterface.getPtrAndLen(8, ptr, len);
str_cpy_all(routineInfo->transaction_attributes, ptr, len);
routineInfo->transaction_attributes[len] = '\0';
cliInterface.getPtrAndLen(9, ptr, len);
routineInfo->max_results = *(Int32 *)ptr;
cliInterface.getPtrAndLen(10, ptr, len);
routineInfo->state_area_size = *(Int32 *)ptr;
cliInterface.getPtrAndLen(11, ptr, len);
routineInfo->external_name = new (STMTHEAP) char[len+1];
str_cpy_and_null((char *)routineInfo->external_name, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(12, ptr, len);
str_cpy_all(routineInfo->parallelism, ptr, len);
routineInfo->parallelism[len] = '\0';
cliInterface.getPtrAndLen(13, ptr, len);
str_cpy_all(routineInfo->user_version, ptr, len);
routineInfo->user_version[len] = '\0';
cliInterface.getPtrAndLen(14, ptr, len);
str_cpy_all(routineInfo->external_security, ptr, len);
routineInfo->external_security[len] = '\0';
cliInterface.getPtrAndLen(15, ptr, len);
str_cpy_all(routineInfo->execution_mode, ptr, len);
routineInfo->execution_mode[len] = '\0';
cliInterface.getPtrAndLen(16, ptr, len);
routineInfo->library_filename = new (STMTHEAP) char[len+1];
str_cpy_and_null((char *)routineInfo->library_filename, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(17, ptr, len);
routineInfo->library_version = *(Int32 *)ptr;
cliInterface.getPtrAndLen(18, ptr, len);
routineInfo->signature = new (STMTHEAP) char[len+1];
str_cpy_and_null((char *)routineInfo->signature, ptr, len, '\0', ' ', TRUE);
// library SQL name, in three parts
cliInterface.getPtrAndLen(19, ptr, len);
char *libCat = new (STMTHEAP) char[len+1];
str_cpy_and_null(libCat, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(20, ptr, len);
char *libSch = new (STMTHEAP) char[len+1];
str_cpy_and_null(libSch, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(21, ptr, len);
char *libObj = new (STMTHEAP) char[len+1];
str_cpy_and_null(libObj, ptr, len, '\0', ' ', TRUE);
ComObjectName libSQLName(libCat, libSch, libObj,
COM_UNKNOWN_NAME,
ComAnsiNamePart::INTERNAL_FORMAT,
STMTHEAP);
NAString libSQLExtName = libSQLName.getExternalName();
routineInfo->library_sqlname = new (STMTHEAP) char[libSQLExtName.length()+1];
str_cpy_and_null((char *)routineInfo->library_sqlname,
libSQLExtName.data(),
libSQLExtName.length(),
'\0', ' ', TRUE);
ComTdbVirtTableColumnInfo *paramsArray;
Lng32 numParams;
char direction[50];
str_sprintf(direction, "'%s', '%s', '%s'",
COM_INPUT_PARAM_LIT, COM_OUTPUT_PARAM_LIT,
COM_INOUT_PARAM_LIT);
// Params
if (getSeabaseColumnInfo(&cliInterface,
objectUID,
catName, schName, objName,
(char *)direction,
NULL,
NULL,
&numParams,
¶msArray) < 0)
{
processReturn();
return NULL;
}
ComTdbVirtTablePrivInfo * privInfo = getSeabasePrivInfo(objectUID, objectType);
TrafDesc *routine_desc = NULL;
routine_desc = Generator::createVirtualRoutineDesc(
objName.data(),
routineInfo,
numParams,
paramsArray,
privInfo,
NULL);
if (routine_desc == NULL)
processReturn();
return routine_desc;
}
// *****************************************************************************
// * *
// * Function: checkSpecifiedPrivs *
// * *
// * Processes the privilege specification and returns the lists of object *
// * and column privileges. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <privActsArray> ElemDDLPrivActArray & In *
// * is a reference to the parsed list of privileges to be granted or *
// * revoked. *
// * *
// * <externalObjectName> const char * In *
// * is the fully qualified name of the object that privileges are being *
// * granted or revoked on. *
// * *
// * <objectType> ComObjectType In *
// * is the type of the object that privileges are being granted or *
// * revoked on. *
// * *
// * <naTable> NATable * In *
// * if the object type is a table or view, the cache for the metadata *
// * related to the object, otherwise NULL. *
// * *
// * <objectPrivs> std::vector<PrivType> & Out *
// * passes back a list of the object privileges to be granted or revoked. *
// * *
// * <colPrivs> std::vector<ColPrivSpec> & Out *
// * passes back a list of the column privileges and the specific columns *
// * on which the privileges are to be granted or revoked. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Privileges processed successfully. Lists of object and column *
// * privileges were returned. *
// * false: Error processing privileges. The error is in the diags area. *
// * *
// *****************************************************************************
static bool checkSpecifiedPrivs(
ElemDDLPrivActArray & privActsArray,
const char * externalObjectName,
ComObjectType objectType,
NATable * naTable,
std::vector<PrivType> & objectPrivs,
std::vector<ColPrivSpec> & colPrivs)
{
for (Lng32 i = 0; i < privActsArray.entries(); i++)
{
// Currently only DML privileges are supported.
PrivType privType;
if (!ElmPrivToPrivType(privActsArray[i]->getOperatorType(),privType) ||
!isDMLPrivType(privType))
{
*CmpCommon::diags() << DgSqlCode(-CAT_INVALID_PRIV_FOR_OBJECT)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str())
<< DgString1(externalObjectName);
return false;
}
//
// The same privilege cannot be specified twice in one grant or revoke
// statement. This includes granting or revoking the same privilege at
// the object-level and the column-level.
if (hasValue(objectPrivs,privType) || hasValue(colPrivs,privType))
{
*CmpCommon::diags() << DgSqlCode(-CAT_DUPLICATE_PRIVILEGES);
return false;
}
if (!isValidPrivTypeForObject(objectType,privType) && privType != PrivType::ALL_DML)
{
*CmpCommon::diags() << DgSqlCode(-CAT_PRIVILEGE_NOT_ALLOWED_FOR_THIS_OBJECT_TYPE)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str());
return false;
}
// For some DML privileges the user may be granting either column
// or object privileges. If it is not a privilege that can be granted
// at the column level, it is an object-level privilege.
if (!isColumnPrivType(privType))
{
objectPrivs.push_back(privType);
continue;
}
ElemDDLPrivActWithColumns * privActWithColumns = dynamic_cast<ElemDDLPrivActWithColumns *>(privActsArray[i]);
ElemDDLColNameArray colNameArray = privActWithColumns->getColumnNameArray();
// If no columns were specified, this is an object-level privilege.
if (colNameArray.entries() == 0)
{
objectPrivs.push_back(privType);
continue;
}
// Column-level privileges can only be specified for tables and views.
if (objectType != COM_BASE_TABLE_OBJECT && objectType != COM_VIEW_OBJECT)
{
*CmpCommon::diags() << DgSqlCode(-CAT_INCORRECT_OBJECT_TYPE)
<< DgTableName(externalObjectName);
return false;
}
// It's a table or view, validate the column. Get the list of
// columns and verify the list contains the specified column(s).
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
for (size_t c = 0; c < colNameArray.entries(); c++)
{
const NAColumn * naCol = nacolArr.getColumn(colNameArray[c]->getColumnName());
if (naCol == NULL)
{
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colNameArray[c]->getColumnName());
return false;
}
// Specified column was found.
ColPrivSpec colPrivEntry;
colPrivEntry.privType = privType;
colPrivEntry.columnOrdinal = naCol->getPosition();
colPrivs.push_back(colPrivEntry);
}
}
return true;
}
//************************ End of checkSpecifiedPrivs **************************
// *****************************************************************************
// * *
// * Function: ElmPrivToPrivType *
// * *
// * This function maps a parser privilege enum (ELM_PRIV_ACT) to a Privilege *
// * Manager PrivType. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <elmPriv> OperatorTypeEnum In *
// * is a parser privilege enum. *
// * *
// * <privType> PrivType & Out *
// * passes back the CatPrivBitmap privilege enum. *
// * *
// * <forRevoke> bool [In] *
// * is true if this is part of a revoke command, otherwise false. Default *
// * to true. Currently unused, placeholder for schema and DDL privileges. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Privilege converted *
// * false: Privilege not recognized. *
// * *
// *****************************************************************************
static bool ElmPrivToPrivType(
OperatorTypeEnum elmPriv,
PrivType & privType,
bool forRevoke)
{
switch (elmPriv)
{
case ELM_PRIV_ACT_DELETE_ELEM:
privType = PrivType::DELETE_PRIV;
break;
case ELM_PRIV_ACT_EXECUTE_ELEM:
privType = PrivType::EXECUTE_PRIV;
break;
case ELM_PRIV_ACT_INSERT_ELEM:
privType = PrivType::INSERT_PRIV;
break;
case ELM_PRIV_ACT_REFERENCES_ELEM:
privType = PrivType::REFERENCES_PRIV;
break;
case ELM_PRIV_ACT_SELECT_ELEM:
privType = PrivType::SELECT_PRIV;
break;
case ELM_PRIV_ACT_UPDATE_ELEM:
privType = PrivType::UPDATE_PRIV;
break;
case ELM_PRIV_ACT_USAGE_ELEM:
privType = PrivType::USAGE_PRIV;
break;
case ELM_PRIV_ACT_ALTER_ELEM:
// if (forRevoke)
// privType = PrivType::ALL_ALTER;
// else
privType = PrivType::ALTER_PRIV;
break;
case ELM_PRIV_ACT_CREATE_ELEM:
// if (forRevoke)
// privType = PrivType::ALL_CREATE;
// else
privType = PrivType::CREATE_PRIV;
break;
case ELM_PRIV_ACT_DROP_ELEM:
// if (forRevoke)
// privType = PrivType::ALL_DROP;
// else
privType = PrivType::DROP_PRIV;
break;
case ELM_PRIV_ACT_ALL_DDL_ELEM:
privType = PrivType::ALL_DDL;
break;
case ELM_PRIV_ACT_ALL_DML_ELEM:
privType = PrivType::ALL_DML;
break;
case ELM_PRIV_ACT_ALL_OTHER_ELEM:
privType = PrivType::ALL_PRIVS;
break;
default:
return false;
}
return true;
}
//************************* End of ElmPrivToPrivType ***************************
// *****************************************************************************
// * *
// * Function: hasValue *
// * *
// * This function determines if a ColPrivSpec vector contains a PrivType *
// * value. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <container> std::vector<ColPrivSpec> In *
// * is the vector of ColPrivSpec values. *
// * *
// * <value> PrivType In *
// * is the value to be compared against existing values in the vector. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Vector contains the value. *
// * false: Vector does not contain the value. *
// * *
// *****************************************************************************
static bool hasValue(
const std::vector<ColPrivSpec> & container,
PrivType value)
{
for (size_t index = 0; index < container.size(); index++)
if (container[index].privType == value)
return true;
return false;
}
//***************************** End of hasValue ********************************
// *****************************************************************************
// * *
// * Function: hasValue *
// * *
// * This function determines if a PrivType vector contains a PrivType value.*
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <container> std::vector<PrivType> In *
// * is the vector of 32-bit values. *
// * *
// * <value> PrivType In *
// * is the value to be compared against existing values in the vector. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Vector contains the value. *
// * false: Vector does not contain the value. *
// * *
// *****************************************************************************
static bool hasValue(
const std::vector<PrivType> & container,
PrivType value)
{
for (size_t index = 0; index < container.size(); index++)
if (container[index] == value)
return true;
return false;
}
//***************************** End of hasValue ********************************
// *****************************************************************************
// * *
// * Function: isMDGrantRevokeOK *
// * *
// * This function determines if a grant or revoke a privilege to/from a *
// * metadata table should be allowed. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <objectPrivs> const std::vector<PrivType> & In *
// * is a vector of object-level privileges. *
// * *
// * <colPrivs> const std::vector<ColPrivSpec> & In *
// * is a vector of column-level privileges. *
// * *
// * <isGrant> bool In *
// * is a true if this is a grant operation, false if revoke. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Grant/revoke is OK. *
// * false: Grant/revoke should be rejected. *
// * *
// *****************************************************************************
static bool isMDGrantRevokeOK(
const std::vector<PrivType> & objectPrivs,
const std::vector<ColPrivSpec> & colPrivs,
bool isGrant)
{
// Can only grant or revoke privileges on MD tables if only granting select,
// or only revoking all privileges. Only valid combination is no object
// privileges and 1 or more column privileges (all SELECT), or no column
// privilege and exactly one object privilege. In the latter case, the
// privilege must either be SELECT, or if a REVOKE operation, either
// ALL_PRIVS or ALL_DML.
// First check if no column privileges.
if (colPrivs.size() == 0)
{
// Should never get this far with both vectors being empty, but check
// just in case.
if (objectPrivs.size() == 0)
return false;
if (objectPrivs.size() > 1)
return false;
if (objectPrivs[0] == SELECT_PRIV)
return true;
if (isGrant)
return false;
if (objectPrivs[0] == ALL_PRIVS || objectPrivs[0] == ALL_DML)
return true;
return false;
}
// Have column privs
if (objectPrivs.size() > 0)
return false;
for (size_t i = 0; i < colPrivs.size(); i++)
if (colPrivs[i].privType != SELECT_PRIV)
return false;
return true;
}
//************************* End of isMDGrantRevokeOK ***************************
// *****************************************************************************
// * *
// * Function: isValidPrivTypeForObject *
// * *
// * This function determines if a priv type is valid for an object. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <objectType> ComObjectType In *
// * is the type of the object. *
// * *
// * <privType> PrivType In *
// * is the type of the privilege. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Priv type is valid for object. *
// * false: Priv type is not valid for object. *
// * *
// *****************************************************************************
static bool isValidPrivTypeForObject(
ComObjectType objectType,
PrivType privType)
{
switch (objectType)
{
case COM_LIBRARY_OBJECT:
return isLibraryPrivType(privType);
case COM_STORED_PROCEDURE_OBJECT:
case COM_USER_DEFINED_ROUTINE_OBJECT:
return isUDRPrivType(privType);
case COM_SEQUENCE_GENERATOR_OBJECT:
return isSequenceGeneratorPrivType(privType);
case COM_BASE_TABLE_OBJECT:
case COM_VIEW_OBJECT:
return isTablePrivType(privType);
default:
return false;
}
return false;
}
//********************* End of isValidPrivTypeForObject ************************
| 1 | 21,304 | if retcode is < 0, it should return -1 indicating an error. | apache-trafodion | cpp |
@@ -1,5 +1,5 @@
"""Emit a message for iteration through dict keys and subscripting dict with key."""
-# pylint: disable=line-too-long,missing-docstring,unsubscriptable-object,too-few-public-methods,redefined-outer-name,use-dict-literal
+# pylint: disable=locally-disabled,useless-suppression,suppressed-message,line-too-long,missing-docstring,unsubscriptable-object,too-few-public-methods,redefined-outer-name,use-dict-literal,iterating-modified-dict
def bad():
a_dict = {1: 1, 2: 2, 3: 3} | 1 | """Emit a message for iteration through dict keys and subscripting dict with key."""
# pylint: disable=line-too-long,missing-docstring,unsubscriptable-object,too-few-public-methods,redefined-outer-name,use-dict-literal
def bad():
a_dict = {1: 1, 2: 2, 3: 3}
for k in a_dict: # [consider-using-dict-items]
print(a_dict[k])
another_dict = dict()
for k in another_dict: # [consider-using-dict-items]
print(another_dict[k])
def good():
a_dict = {1: 1, 2: 2, 3: 3}
for k in a_dict:
print(k)
out_of_scope_dict = dict()
def another_bad():
for k in out_of_scope_dict: # [consider-using-dict-items]
print(out_of_scope_dict[k])
def another_good():
for k in out_of_scope_dict:
k = 1
k = 2
k = 3
print(out_of_scope_dict[k])
b_dict = {}
for k2 in b_dict: # Should not emit warning, key access necessary
b_dict[k2] = 2
for k2 in b_dict: # Should not emit warning, key access necessary (AugAssign)
b_dict[k2] += 2
# Warning should be emitted in this case
for k6 in b_dict: # [consider-using-dict-items]
val = b_dict[k6]
b_dict[k6] = 2
for k3 in b_dict: # [consider-using-dict-items]
val = b_dict[k3]
for k4 in b_dict.keys(): # [consider-iterating-dictionary,consider-using-dict-items]
val = b_dict[k4]
class Foo:
c_dict = {}
# Should emit warning when iterating over a dict attribute of a class
for k5 in Foo.c_dict: # [consider-using-dict-items]
val = Foo.c_dict[k5]
c_dict = {}
# Should NOT emit warning whey key used to access a different dict
for k5 in Foo.c_dict: # This is fine
val = b_dict[k5]
for k5 in Foo.c_dict: # This is fine
val = c_dict[k5]
# Should emit warning within a list/dict comprehension
val = {k9: b_dict[k9] for k9 in b_dict} # [consider-using-dict-items]
val = [(k7, b_dict[k7]) for k7 in b_dict] # [consider-using-dict-items]
# Should emit warning even when using dict attribute of a class within comprehension
val = [(k7, Foo.c_dict[k7]) for k7 in Foo.c_dict] # [consider-using-dict-items]
val = any(True for k8 in Foo.c_dict if Foo.c_dict[k8]) # [consider-using-dict-items]
# Should emit warning when dict access done in ``if`` portion of comprehension
val = any(True for k8 in b_dict if b_dict[k8]) # [consider-using-dict-items]
# Should NOT emit warning whey key used to access a different dict
val = [(k7, b_dict[k7]) for k7 in Foo.c_dict]
val = any(True for k8 in Foo.c_dict if b_dict[k8])
# Should NOT emit warning, essentially same check as above
val = [(k7, c_dict[k7]) for k7 in Foo.c_dict]
val = any(True for k8 in Foo.c_dict if c_dict[k8])
# Should emit warning, using .keys() of Foo.c_dict
val = any(True for k8 in Foo.c_dict.keys() if Foo.c_dict[k8]) # [consider-iterating-dictionary,consider-using-dict-items]
# Test false positive described in #4630
# (https://github.com/PyCQA/pylint/issues/4630)
d = {'key': 'value'}
for k in d: # this is fine, with the reassignment of d[k], d[k] is necessary
d[k] += '123'
if '1' in d[k]: # index lookup necessary here, do not emit error
print('found 1')
for k in d: # if this gets rewritten to d.items(), we are back to the above problem
d[k] = d[k] + 1
if '1' in d[k]: # index lookup necessary here, do not emit error
print('found 1')
for k in d: # [consider-using-dict-items]
if '1' in d[k]: # index lookup necessary here, do not emit error
print('found 1')
| 1 | 19,988 | Are those necessary? I feel like `useless-suppression` could be avoided here? | PyCQA-pylint | py |
@@ -309,13 +309,11 @@ define(['events', 'datetime', 'appSettings', 'itemHelper', 'pluginManager', 'pla
if (codecProfile.Type === 'Audio') {
(codecProfile.Conditions || []).map(function (condition) {
if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioBitDepth') {
- maxAudioBitDepth = condition.Value;
- }
- if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioSampleRate') {
- maxAudioSampleRate = condition.Value;
- }
- if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioBitrate') {
- maxAudioBitrate = condition.Value;
+ return maxAudioBitDepth = condition.Value;
+ } else if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioSampleRate') {
+ return maxAudioSampleRate = condition.Value;
+ } else if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioBitrate') {
+ return maxAudioBitrate = condition.Value;
}
});
} | 1 | define(['events', 'datetime', 'appSettings', 'itemHelper', 'pluginManager', 'playQueueManager', 'userSettings', 'globalize', 'connectionManager', 'loading', 'apphost', 'screenfull'], function (events, datetime, appSettings, itemHelper, pluginManager, PlayQueueManager, userSettings, globalize, connectionManager, loading, apphost, screenfull) {
'use strict';
/** Delay time in ms for reportPlayback logging */
const reportPlaybackLogDelay = 1e3;
function enableLocalPlaylistManagement(player) {
if (player.getPlaylist) {
return false;
}
if (player.isLocalPlayer) {
return true;
}
return false;
}
function bindToFullscreenChange(player) {
if (screenfull.isEnabled) {
screenfull.on('change', function () {
events.trigger(player, 'fullscreenchange');
});
}
}
function triggerPlayerChange(playbackManagerInstance, newPlayer, newTarget, previousPlayer, previousTargetInfo) {
if (!newPlayer && !previousPlayer) {
return;
}
if (newTarget && previousTargetInfo) {
if (newTarget.id === previousTargetInfo.id) {
return;
}
}
events.trigger(playbackManagerInstance, 'playerchange', [newPlayer, newTarget, previousPlayer]);
}
/** Last invoked method */
let reportPlaybackLastMethod;
/** Last invoke time of method */
let reportPlaybackLastTime;
function reportPlayback(playbackManagerInstance, state, player, reportPlaylist, serverId, method, progressEventName) {
if (!serverId) {
// Not a server item
// We can expand on this later and possibly report them
return;
}
var info = Object.assign({}, state.PlayState);
info.ItemId = state.NowPlayingItem.Id;
if (progressEventName) {
info.EventName = progressEventName;
}
if (reportPlaylist) {
addPlaylistToPlaybackReport(playbackManagerInstance, info, player, serverId);
}
const now = (new Date).getTime();
if (method !== reportPlaybackLastMethod || now - (reportPlaybackLastTime || 0) >= reportPlaybackLogDelay) {
console.debug(method + '-' + JSON.stringify(info));
reportPlaybackLastMethod = method;
reportPlaybackLastTime = now;
}
var apiClient = connectionManager.getApiClient(serverId);
apiClient[method](info);
}
function getPlaylistSync(playbackManagerInstance, player) {
player = player || playbackManagerInstance._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.getPlaylistSync();
}
return playbackManagerInstance._playQueueManager.getPlaylist();
}
function addPlaylistToPlaybackReport(playbackManagerInstance, info, player, serverId) {
info.NowPlayingQueue = getPlaylistSync(playbackManagerInstance, player).map(function (i) {
var itemInfo = {
Id: i.Id,
PlaylistItemId: i.PlaylistItemId
};
if (i.ServerId !== serverId) {
itemInfo.ServerId = i.ServerId;
}
return itemInfo;
});
}
function normalizeName(t) {
return t.toLowerCase().replace(' ', '');
}
function getItemsForPlayback(serverId, query) {
var apiClient = connectionManager.getApiClient(serverId);
if (query.Ids && query.Ids.split(',').length === 1) {
var itemId = query.Ids.split(',');
return apiClient.getItem(apiClient.getCurrentUserId(), itemId).then(function (item) {
return {
Items: [item],
TotalRecordCount: 1
};
});
} else {
query.Limit = query.Limit || 300;
query.Fields = 'Chapters';
query.ExcludeLocationTypes = 'Virtual';
query.EnableTotalRecordCount = false;
query.CollapseBoxSetItems = false;
return apiClient.getItems(apiClient.getCurrentUserId(), query);
}
}
function createStreamInfoFromUrlItem(item) {
// Check item.Path for games
return {
url: item.Url || item.Path,
playMethod: 'DirectPlay',
item: item,
textTracks: [],
mediaType: item.MediaType
};
}
function mergePlaybackQueries(obj1, obj2) {
var query = Object.assign(obj1, obj2);
var filters = query.Filters ? query.Filters.split(',') : [];
if (filters.indexOf('IsNotFolder') === -1) {
filters.push('IsNotFolder');
}
query.Filters = filters.join(',');
return query;
}
function backdropImageUrl(apiClient, item, options) {
options = options || {};
options.type = options.type || 'Backdrop';
// If not resizing, get the original image
if (!options.maxWidth && !options.width && !options.maxHeight && !options.height) {
options.quality = 100;
}
if (item.BackdropImageTags && item.BackdropImageTags.length) {
options.tag = item.BackdropImageTags[0];
return apiClient.getScaledImageUrl(item.Id, options);
}
if (item.ParentBackdropImageTags && item.ParentBackdropImageTags.length) {
options.tag = item.ParentBackdropImageTags[0];
return apiClient.getScaledImageUrl(item.ParentBackdropItemId, options);
}
return null;
}
function getMimeType(type, container) {
container = (container || '').toLowerCase();
if (type === 'audio') {
if (container === 'opus') {
return 'audio/ogg';
}
if (container === 'webma') {
return 'audio/webm';
}
if (container === 'm4a') {
return 'audio/mp4';
}
} else if (type === 'video') {
if (container === 'mkv') {
return 'video/x-matroska';
}
if (container === 'm4v') {
return 'video/mp4';
}
if (container === 'mov') {
return 'video/quicktime';
}
if (container === 'mpg') {
return 'video/mpeg';
}
if (container === 'flv') {
return 'video/x-flv';
}
}
return type + '/' + container;
}
function getParam(name, url) {
name = name.replace(/[\[]/, '\\\[').replace(/[\]]/, '\\\]');
var regexS = '[\\?&]' + name + '=([^&#]*)';
var regex = new RegExp(regexS, 'i');
var results = regex.exec(url);
if (results == null) {
return '';
} else {
return decodeURIComponent(results[1].replace(/\+/g, ' '));
}
}
function isAutomaticPlayer(player) {
if (player.isLocalPlayer) {
return true;
}
return false;
}
function getAutomaticPlayers(instance, forceLocalPlayer) {
if (!forceLocalPlayer) {
var player = instance._currentPlayer;
if (player && !isAutomaticPlayer(player)) {
return [player];
}
}
return instance.getPlayers().filter(isAutomaticPlayer);
}
function isServerItem(item) {
if (!item.Id) {
return false;
}
return true;
}
function enableIntros(item) {
if (item.MediaType !== 'Video') {
return false;
}
if (item.Type === 'TvChannel') {
return false;
}
// disable for in-progress recordings
if (item.Status === 'InProgress') {
return false;
}
return isServerItem(item);
}
function getIntros(firstItem, apiClient, options) {
if (options.startPositionTicks || options.startIndex || options.fullscreen === false || !enableIntros(firstItem) || !userSettings.enableCinemaMode()) {
return Promise.resolve({
Items: []
});
}
return apiClient.getIntros(firstItem.Id).then(function (result) {
return result;
}, function (err) {
return Promise.resolve({
Items: []
});
});
}
function getAudioMaxValues(deviceProfile) {
// TODO - this could vary per codec and should be done on the server using the entire profile
var maxAudioSampleRate = null;
var maxAudioBitDepth = null;
var maxAudioBitrate = null;
deviceProfile.CodecProfiles.map(function (codecProfile) {
if (codecProfile.Type === 'Audio') {
(codecProfile.Conditions || []).map(function (condition) {
if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioBitDepth') {
maxAudioBitDepth = condition.Value;
}
if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioSampleRate') {
maxAudioSampleRate = condition.Value;
}
if (condition.Condition === 'LessThanEqual' && condition.Property === 'AudioBitrate') {
maxAudioBitrate = condition.Value;
}
});
}
});
return {
maxAudioSampleRate: maxAudioSampleRate,
maxAudioBitDepth: maxAudioBitDepth,
maxAudioBitrate: maxAudioBitrate
};
}
var startingPlaySession = new Date().getTime();
function getAudioStreamUrl(item, transcodingProfile, directPlayContainers, maxBitrate, apiClient, maxAudioSampleRate, maxAudioBitDepth, maxAudioBitrate, startPosition) {
var url = 'Audio/' + item.Id + '/universal';
startingPlaySession++;
return apiClient.getUrl(url, {
UserId: apiClient.getCurrentUserId(),
DeviceId: apiClient.deviceId(),
MaxStreamingBitrate: maxAudioBitrate || maxBitrate,
Container: directPlayContainers,
TranscodingContainer: transcodingProfile.Container || null,
TranscodingProtocol: transcodingProfile.Protocol || null,
AudioCodec: transcodingProfile.AudioCodec,
MaxAudioSampleRate: maxAudioSampleRate,
MaxAudioBitDepth: maxAudioBitDepth,
api_key: apiClient.accessToken(),
PlaySessionId: startingPlaySession,
StartTimeTicks: startPosition || 0,
EnableRedirection: true,
EnableRemoteMedia: apphost.supports('remoteaudio')
});
}
function getAudioStreamUrlFromDeviceProfile(item, deviceProfile, maxBitrate, apiClient, startPosition) {
var transcodingProfile = deviceProfile.TranscodingProfiles.filter(function (p) {
return p.Type === 'Audio' && p.Context === 'Streaming';
})[0];
var directPlayContainers = '';
deviceProfile.DirectPlayProfiles.map(function (p) {
if (p.Type === 'Audio') {
if (directPlayContainers) {
directPlayContainers += ',' + p.Container;
} else {
directPlayContainers = p.Container;
}
if (p.AudioCodec) {
directPlayContainers += '|' + p.AudioCodec;
}
}
});
var maxValues = getAudioMaxValues(deviceProfile);
return getAudioStreamUrl(item, transcodingProfile, directPlayContainers, maxBitrate, apiClient, maxValues.maxAudioSampleRate, maxValues.maxAudioBitDepth, maxValues.maxAudioBitrate, startPosition);
}
function getStreamUrls(items, deviceProfile, maxBitrate, apiClient, startPosition) {
var audioTranscodingProfile = deviceProfile.TranscodingProfiles.filter(function (p) {
return p.Type === 'Audio' && p.Context === 'Streaming';
})[0];
var audioDirectPlayContainers = '';
deviceProfile.DirectPlayProfiles.map(function (p) {
if (p.Type === 'Audio') {
if (audioDirectPlayContainers) {
audioDirectPlayContainers += ',' + p.Container;
} else {
audioDirectPlayContainers = p.Container;
}
if (p.AudioCodec) {
audioDirectPlayContainers += '|' + p.AudioCodec;
}
}
});
var maxValues = getAudioMaxValues(deviceProfile);
var streamUrls = [];
for (var i = 0, length = items.length; i < length; i++) {
var item = items[i];
var streamUrl;
if (item.MediaType === 'Audio' && !itemHelper.isLocalItem(item)) {
streamUrl = getAudioStreamUrl(item, audioTranscodingProfile, audioDirectPlayContainers, maxBitrate, apiClient, maxValues.maxAudioSampleRate, maxValues.maxAudioBitDepth, maxValues.maxAudioBitrate, startPosition);
}
streamUrls.push(streamUrl || '');
if (i === 0) {
startPosition = 0;
}
}
return Promise.resolve(streamUrls);
}
function setStreamUrls(items, deviceProfile, maxBitrate, apiClient, startPosition) {
return getStreamUrls(items, deviceProfile, maxBitrate, apiClient, startPosition).then(function (streamUrls) {
for (var i = 0, length = items.length; i < length; i++) {
var item = items[i];
var streamUrl = streamUrls[i];
if (streamUrl) {
item.PresetMediaSource = {
StreamUrl: streamUrl,
Id: item.Id,
MediaStreams: [],
RunTimeTicks: item.RunTimeTicks
};
}
}
});
}
function getPlaybackInfo(player,
apiClient,
item,
deviceProfile,
maxBitrate,
startPosition,
isPlayback,
mediaSourceId,
audioStreamIndex,
subtitleStreamIndex,
liveStreamId,
enableDirectPlay,
enableDirectStream,
allowVideoStreamCopy,
allowAudioStreamCopy) {
if (!itemHelper.isLocalItem(item) && item.MediaType === 'Audio') {
return Promise.resolve({
MediaSources: [
{
StreamUrl: getAudioStreamUrlFromDeviceProfile(item, deviceProfile, maxBitrate, apiClient, startPosition),
Id: item.Id,
MediaStreams: [],
RunTimeTicks: item.RunTimeTicks
}]
});
}
if (item.PresetMediaSource) {
return Promise.resolve({
MediaSources: [item.PresetMediaSource]
});
}
var itemId = item.Id;
var query = {
UserId: apiClient.getCurrentUserId(),
StartTimeTicks: startPosition || 0
};
if (isPlayback) {
query.IsPlayback = true;
query.AutoOpenLiveStream = true;
} else {
query.IsPlayback = false;
query.AutoOpenLiveStream = false;
}
if (audioStreamIndex != null) {
query.AudioStreamIndex = audioStreamIndex;
}
if (subtitleStreamIndex != null) {
query.SubtitleStreamIndex = subtitleStreamIndex;
}
if (enableDirectPlay != null) {
query.EnableDirectPlay = enableDirectPlay;
}
if (enableDirectStream != null) {
query.EnableDirectStream = enableDirectStream;
}
if (allowVideoStreamCopy != null) {
query.AllowVideoStreamCopy = allowVideoStreamCopy;
}
if (allowAudioStreamCopy != null) {
query.AllowAudioStreamCopy = allowAudioStreamCopy;
}
if (mediaSourceId) {
query.MediaSourceId = mediaSourceId;
}
if (liveStreamId) {
query.LiveStreamId = liveStreamId;
}
if (maxBitrate) {
query.MaxStreamingBitrate = maxBitrate;
}
if (player.enableMediaProbe && !player.enableMediaProbe(item)) {
query.EnableMediaProbe = false;
}
// lastly, enforce player overrides for special situations
if (query.EnableDirectStream !== false) {
if (player.supportsPlayMethod && !player.supportsPlayMethod('DirectStream', item)) {
query.EnableDirectStream = false;
}
}
if (player.getDirectPlayProtocols) {
query.DirectPlayProtocols = player.getDirectPlayProtocols();
}
return apiClient.getPlaybackInfo(itemId, query, deviceProfile);
}
function getOptimalMediaSource(apiClient, item, versions) {
var promises = versions.map(function (v) {
return supportsDirectPlay(apiClient, item, v);
});
if (!promises.length) {
return Promise.reject();
}
return Promise.all(promises).then(function (results) {
for (var i = 0, length = versions.length; i < length; i++) {
versions[i].enableDirectPlay = results[i] || false;
}
var optimalVersion = versions.filter(function (v) {
return v.enableDirectPlay;
})[0];
if (!optimalVersion) {
optimalVersion = versions.filter(function (v) {
return v.SupportsDirectStream;
})[0];
}
optimalVersion = optimalVersion || versions.filter(function (s) {
return s.SupportsTranscoding;
})[0];
return optimalVersion || versions[0];
});
}
function getLiveStream(player, apiClient, item, playSessionId, deviceProfile, maxBitrate, startPosition, mediaSource, audioStreamIndex, subtitleStreamIndex) {
var postData = {
DeviceProfile: deviceProfile,
OpenToken: mediaSource.OpenToken
};
var query = {
UserId: apiClient.getCurrentUserId(),
StartTimeTicks: startPosition || 0,
ItemId: item.Id,
PlaySessionId: playSessionId
};
if (maxBitrate) {
query.MaxStreamingBitrate = maxBitrate;
}
if (audioStreamIndex != null) {
query.AudioStreamIndex = audioStreamIndex;
}
if (subtitleStreamIndex != null) {
query.SubtitleStreamIndex = subtitleStreamIndex;
}
// lastly, enforce player overrides for special situations
if (query.EnableDirectStream !== false) {
if (player.supportsPlayMethod && !player.supportsPlayMethod('DirectStream', item)) {
query.EnableDirectStream = false;
}
}
return apiClient.ajax({
url: apiClient.getUrl('LiveStreams/Open', query),
type: 'POST',
data: JSON.stringify(postData),
contentType: 'application/json',
dataType: 'json'
});
}
function isHostReachable(mediaSource, apiClient) {
if (mediaSource.IsRemote) {
return Promise.resolve(true);
}
return apiClient.getEndpointInfo().then(function (endpointInfo) {
if (endpointInfo.IsInNetwork) {
if (!endpointInfo.IsLocal) {
var path = (mediaSource.Path || '').toLowerCase();
if (path.indexOf('localhost') !== -1 || path.indexOf('127.0.0.1') !== -1) {
// This will only work if the app is on the same machine as the server
return Promise.resolve(false);
}
}
return Promise.resolve(true);
}
// media source is in network, but connection is out of network
return Promise.resolve(false);
});
}
function supportsDirectPlay(apiClient, item, mediaSource) {
// folder rip hacks due to not yet being supported by the stream building engine
var isFolderRip = mediaSource.VideoType === 'BluRay' || mediaSource.VideoType === 'Dvd' || mediaSource.VideoType === 'HdDvd';
if (mediaSource.SupportsDirectPlay || isFolderRip) {
if (mediaSource.IsRemote && !apphost.supports('remotevideo')) {
return Promise.resolve(false);
}
if (mediaSource.Protocol === 'Http' && !mediaSource.RequiredHttpHeaders.length) {
// If this is the only way it can be played, then allow it
if (!mediaSource.SupportsDirectStream && !mediaSource.SupportsTranscoding) {
return Promise.resolve(true);
} else {
return isHostReachable(mediaSource, apiClient);
}
} else if (mediaSource.Protocol === 'File') {
return new Promise(function (resolve, reject) {
// Determine if the file can be accessed directly
require(['filesystem'], function (filesystem) {
var method = isFolderRip ?
'directoryExists' :
'fileExists';
filesystem[method](mediaSource.Path).then(function () {
resolve(true);
}, function () {
resolve(false);
});
});
});
}
}
return Promise.resolve(false);
}
function validatePlaybackInfoResult(instance, result) {
if (result.ErrorCode) {
showPlaybackInfoErrorMessage(instance, result.ErrorCode);
return false;
}
return true;
}
function showPlaybackInfoErrorMessage(instance, errorCode, playNextTrack) {
require(['alert'], function (alert) {
alert({
text: globalize.translate('PlaybackError' + errorCode),
title: globalize.translate('HeaderPlaybackError')
}).then(function () {
if (playNextTrack) {
instance.nextTrack();
}
});
});
}
function normalizePlayOptions(playOptions) {
playOptions.fullscreen = playOptions.fullscreen !== false;
}
function truncatePlayOptions(playOptions) {
return {
fullscreen: playOptions.fullscreen,
mediaSourceId: playOptions.mediaSourceId,
audioStreamIndex: playOptions.audioStreamIndex,
subtitleStreamIndex: playOptions.subtitleStreamIndex,
startPositionTicks: playOptions.startPositionTicks
};
}
function getNowPlayingItemForReporting(player, item, mediaSource) {
var nowPlayingItem = Object.assign({}, item);
if (mediaSource) {
nowPlayingItem.RunTimeTicks = mediaSource.RunTimeTicks;
nowPlayingItem.MediaStreams = mediaSource.MediaStreams;
// not needed
nowPlayingItem.MediaSources = null;
}
nowPlayingItem.RunTimeTicks = nowPlayingItem.RunTimeTicks || player.duration() * 10000;
return nowPlayingItem;
}
function displayPlayerIndividually(player) {
return !player.isLocalPlayer;
}
function createTarget(instance, player) {
return {
name: player.name,
id: player.id,
playerName: player.name,
playableMediaTypes: ['Audio', 'Video', 'Photo', 'Book'].map(player.canPlayMediaType),
isLocalPlayer: player.isLocalPlayer,
supportedCommands: instance.getSupportedCommands(player)
};
}
function getPlayerTargets(player) {
if (player.getTargets) {
return player.getTargets();
}
return Promise.resolve([createTarget(player)]);
}
function sortPlayerTargets(a, b) {
var aVal = a.isLocalPlayer ? 0 : 1;
var bVal = b.isLocalPlayer ? 0 : 1;
aVal = aVal.toString() + a.name;
bVal = bVal.toString() + b.name;
return aVal.localeCompare(bVal);
}
function PlaybackManager() {
var self = this;
var players = [];
var currentTargetInfo;
var lastLocalPlayer;
var currentPairingId = null;
this._playNextAfterEnded = true;
var playerStates = {};
this._playQueueManager = new PlayQueueManager();
self.currentItem = function (player) {
if (!player) {
throw new Error('player cannot be null');
}
if (player.currentItem) {
return player.currentItem();
}
var data = getPlayerData(player);
return data.streamInfo ? data.streamInfo.item : null;
};
self.currentMediaSource = function (player) {
if (!player) {
throw new Error('player cannot be null');
}
if (player.currentMediaSource) {
return player.currentMediaSource();
}
var data = getPlayerData(player);
return data.streamInfo ? data.streamInfo.mediaSource : null;
};
self.playMethod = function (player) {
if (!player) {
throw new Error('player cannot be null');
}
if (player.playMethod) {
return player.playMethod();
}
var data = getPlayerData(player);
return data.streamInfo ? data.streamInfo.playMethod : null;
};
self.playSessionId = function (player) {
if (!player) {
throw new Error('player cannot be null');
}
if (player.playSessionId) {
return player.playSessionId();
}
var data = getPlayerData(player);
return data.streamInfo ? data.streamInfo.playSessionId : null;
};
self.getPlayerInfo = function () {
var player = self._currentPlayer;
if (!player) {
return null;
}
var target = currentTargetInfo || {};
return {
name: player.name,
isLocalPlayer: player.isLocalPlayer,
id: target.id,
deviceName: target.deviceName,
playableMediaTypes: target.playableMediaTypes,
supportedCommands: target.supportedCommands
};
};
self.setActivePlayer = function (player, targetInfo) {
if (player === 'localplayer' || player.name === 'localplayer') {
if (self._currentPlayer && self._currentPlayer.isLocalPlayer) {
return;
}
setCurrentPlayerInternal(null, null);
return;
}
if (typeof (player) === 'string') {
player = players.filter(function (p) {
return p.name === player;
})[0];
}
if (!player) {
throw new Error('null player');
}
setCurrentPlayerInternal(player, targetInfo);
};
self.trySetActivePlayer = function (player, targetInfo) {
if (player === 'localplayer' || player.name === 'localplayer') {
if (self._currentPlayer && self._currentPlayer.isLocalPlayer) {
return;
}
return;
}
if (typeof (player) === 'string') {
player = players.filter(function (p) {
return p.name === player;
})[0];
}
if (!player) {
throw new Error('null player');
}
if (currentPairingId === targetInfo.id) {
return;
}
currentPairingId = targetInfo.id;
var promise = player.tryPair ?
player.tryPair(targetInfo) :
Promise.resolve();
events.trigger(self, 'pairing');
promise.then(function () {
events.trigger(self, 'paired');
setCurrentPlayerInternal(player, targetInfo);
}, function () {
events.trigger(self, 'pairerror');
if (currentPairingId === targetInfo.id) {
currentPairingId = null;
}
});
};
self.getTargets = function () {
var promises = players.filter(displayPlayerIndividually).map(getPlayerTargets);
return Promise.all(promises).then(function (responses) {
return connectionManager.currentApiClient().getCurrentUser().then(function (user) {
var targets = [];
targets.push({
name: globalize.translate('HeaderMyDevice'),
id: 'localplayer',
playerName: 'localplayer',
playableMediaTypes: ['Audio', 'Video', 'Photo', 'Book'],
isLocalPlayer: true,
supportedCommands: self.getSupportedCommands({
isLocalPlayer: true
}),
user: user
});
for (var i = 0; i < responses.length; i++) {
var subTargets = responses[i];
for (var j = 0; j < subTargets.length; j++) {
targets.push(subTargets[j]);
}
}
targets = targets.sort(sortPlayerTargets);
return targets;
});
});
};
function getCurrentSubtitleStream(player) {
if (!player) {
throw new Error('player cannot be null');
}
var index = getPlayerData(player).subtitleStreamIndex;
if (index == null || index === -1) {
return null;
}
return getSubtitleStream(player, index);
}
function getSubtitleStream(player, index) {
return self.subtitleTracks(player).filter(function (s) {
return s.Type === 'Subtitle' && s.Index === index;
})[0];
}
self.getPlaylist = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
if (player.getPlaylistSync) {
return Promise.resolve(player.getPlaylistSync());
}
return player.getPlaylist();
}
return Promise.resolve(self._playQueueManager.getPlaylist());
};
function removeCurrentPlayer(player) {
var previousPlayer = self._currentPlayer;
if (!previousPlayer || player.id === previousPlayer.id) {
setCurrentPlayerInternal(null);
}
}
function setCurrentPlayerInternal(player, targetInfo) {
var previousPlayer = self._currentPlayer;
var previousTargetInfo = currentTargetInfo;
if (player && !targetInfo && player.isLocalPlayer) {
targetInfo = createTarget(self, player);
}
if (player && !targetInfo) {
throw new Error('targetInfo cannot be null');
}
currentPairingId = null;
self._currentPlayer = player;
currentTargetInfo = targetInfo;
if (targetInfo) {
console.debug('Active player: ' + JSON.stringify(targetInfo));
}
if (player && player.isLocalPlayer) {
lastLocalPlayer = player;
}
if (previousPlayer) {
self.endPlayerUpdates(previousPlayer);
}
if (player) {
self.beginPlayerUpdates(player);
}
triggerPlayerChange(self, player, targetInfo, previousPlayer, previousTargetInfo);
}
self.isPlaying = function (player) {
player = player || self._currentPlayer;
if (player) {
if (player.isPlaying) {
return player.isPlaying();
}
}
return player != null && player.currentSrc() != null;
};
self.isPlayingMediaType = function (mediaType, player) {
player = player || self._currentPlayer;
if (player) {
if (player.isPlaying) {
return player.isPlaying(mediaType);
}
}
if (self.isPlaying(player)) {
var playerData = getPlayerData(player);
return playerData.streamInfo.mediaType === mediaType;
}
return false;
};
self.isPlayingLocally = function (mediaTypes, player) {
player = player || self._currentPlayer;
if (!player || !player.isLocalPlayer) {
return false;
}
return mediaTypes.filter(function (mediaType) {
return self.isPlayingMediaType(mediaType, player);
}).length > 0;
};
self.isPlayingVideo = function (player) {
return self.isPlayingMediaType('Video', player);
};
self.isPlayingAudio = function (player) {
return self.isPlayingMediaType('Audio', player);
};
self.getPlayers = function () {
return players;
};
function getDefaultPlayOptions() {
return {
fullscreen: true
};
}
self.canPlay = function (item) {
var itemType = item.Type;
if (itemType === 'PhotoAlbum' || itemType === 'MusicGenre' || itemType === 'Season' || itemType === 'Series' || itemType === 'BoxSet' || itemType === 'MusicAlbum' || itemType === 'MusicArtist' || itemType === 'Playlist') {
return true;
}
if (item.LocationType === 'Virtual') {
if (itemType !== 'Program') {
return false;
}
}
if (itemType === 'Program') {
if (!item.EndDate || !item.StartDate) {
return false;
}
if (new Date().getTime() > datetime.parseISO8601Date(item.EndDate).getTime() || new Date().getTime() < datetime.parseISO8601Date(item.StartDate).getTime()) {
return false;
}
}
//var mediaType = item.MediaType;
return getPlayer(item, getDefaultPlayOptions()) != null;
};
self.toggleAspectRatio = function (player) {
player = player || self._currentPlayer;
if (player) {
var current = self.getAspectRatio(player);
var supported = self.getSupportedAspectRatios(player);
var index = -1;
for (var i = 0, length = supported.length; i < length; i++) {
if (supported[i].id === current) {
index = i;
break;
}
}
index++;
if (index >= supported.length) {
index = 0;
}
self.setAspectRatio(supported[index].id, player);
}
};
self.setAspectRatio = function (val, player) {
player = player || self._currentPlayer;
if (player && player.setAspectRatio) {
player.setAspectRatio(val);
}
};
self.getSupportedAspectRatios = function (player) {
player = player || self._currentPlayer;
if (player && player.getSupportedAspectRatios) {
return player.getSupportedAspectRatios();
}
return [];
};
self.getAspectRatio = function (player) {
player = player || self._currentPlayer;
if (player && player.getAspectRatio) {
return player.getAspectRatio();
}
};
var brightnessOsdLoaded;
self.setBrightness = function (val, player) {
player = player || self._currentPlayer;
if (player) {
if (!brightnessOsdLoaded) {
brightnessOsdLoaded = true;
// TODO: Have this trigger an event instead to get the osd out of here
require(['brightnessOsd']);
}
player.setBrightness(val);
}
};
self.getBrightness = function (player) {
player = player || self._currentPlayer;
if (player) {
return player.getBrightness();
}
};
self.setVolume = function (val, player) {
player = player || self._currentPlayer;
if (player) {
player.setVolume(val);
}
};
self.getVolume = function (player) {
player = player || self._currentPlayer;
if (player) {
return player.getVolume();
}
};
self.volumeUp = function (player) {
player = player || self._currentPlayer;
if (player) {
player.volumeUp();
}
};
self.volumeDown = function (player) {
player = player || self._currentPlayer;
if (player) {
player.volumeDown();
}
};
self.changeAudioStream = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.changeAudioStream();
}
if (!player) {
return;
}
var currentMediaSource = self.currentMediaSource(player);
var mediaStreams = [];
var i;
var length;
for (i = 0, length = currentMediaSource.MediaStreams.length; i < length; i++) {
if (currentMediaSource.MediaStreams[i].Type === 'Audio') {
mediaStreams.push(currentMediaSource.MediaStreams[i]);
}
}
// Nothing to change
if (mediaStreams.length <= 1) {
return;
}
var currentStreamIndex = self.getAudioStreamIndex(player);
var indexInList = -1;
for (i = 0, length = mediaStreams.length; i < length; i++) {
if (mediaStreams[i].Index === currentStreamIndex) {
indexInList = i;
break;
}
}
var nextIndex = indexInList + 1;
if (nextIndex >= mediaStreams.length) {
nextIndex = 0;
}
nextIndex = nextIndex === -1 ? -1 : mediaStreams[nextIndex].Index;
self.setAudioStreamIndex(nextIndex, player);
};
self.changeSubtitleStream = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.changeSubtitleStream();
}
if (!player) {
return;
}
var currentMediaSource = self.currentMediaSource(player);
var mediaStreams = [];
var i;
var length;
for (i = 0, length = currentMediaSource.MediaStreams.length; i < length; i++) {
if (currentMediaSource.MediaStreams[i].Type === 'Subtitle') {
mediaStreams.push(currentMediaSource.MediaStreams[i]);
}
}
// No known streams, nothing to change
if (!mediaStreams.length) {
return;
}
var currentStreamIndex = self.getSubtitleStreamIndex(player);
var indexInList = -1;
for (i = 0, length = mediaStreams.length; i < length; i++) {
if (mediaStreams[i].Index === currentStreamIndex) {
indexInList = i;
break;
}
}
var nextIndex = indexInList + 1;
if (nextIndex >= mediaStreams.length) {
nextIndex = -1;
}
nextIndex = nextIndex === -1 ? -1 : mediaStreams[nextIndex].Index;
self.setSubtitleStreamIndex(nextIndex, player);
};
self.getAudioStreamIndex = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.getAudioStreamIndex();
}
return getPlayerData(player).audioStreamIndex;
};
function isAudioStreamSupported(mediaSource, index, deviceProfile) {
var mediaStream;
var i;
var length;
var mediaStreams = mediaSource.MediaStreams;
for (i = 0, length = mediaStreams.length; i < length; i++) {
if (mediaStreams[i].Type === 'Audio' && mediaStreams[i].Index === index) {
mediaStream = mediaStreams[i];
break;
}
}
if (!mediaStream) {
return false;
}
var codec = (mediaStream.Codec || '').toLowerCase();
if (!codec) {
return false;
}
var profiles = deviceProfile.DirectPlayProfiles || [];
return profiles.filter(function (p) {
if (p.Type === 'Video') {
if (!p.AudioCodec) {
return true;
}
// This is an exclusion filter
if (p.AudioCodec.indexOf('-') === 0) {
return p.AudioCodec.toLowerCase().indexOf(codec) === -1;
}
return p.AudioCodec.toLowerCase().indexOf(codec) !== -1;
}
return false;
}).length > 0;
}
self.setAudioStreamIndex = function (index, player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.setAudioStreamIndex(index);
}
if (self.playMethod(player) === 'Transcode' || !player.canSetAudioStreamIndex()) {
changeStream(player, getCurrentTicks(player), { AudioStreamIndex: index });
getPlayerData(player).audioStreamIndex = index;
} else {
// See if the player supports the track without transcoding
player.getDeviceProfile(self.currentItem(player)).then(function (profile) {
if (isAudioStreamSupported(self.currentMediaSource(player), index, profile)) {
player.setAudioStreamIndex(index);
getPlayerData(player).audioStreamIndex = index;
} else {
changeStream(player, getCurrentTicks(player), { AudioStreamIndex: index });
getPlayerData(player).audioStreamIndex = index;
}
});
}
};
function getSavedMaxStreamingBitrate(apiClient, mediaType) {
if (!apiClient) {
// This should hopefully never happen
apiClient = connectionManager.currentApiClient();
}
var endpointInfo = apiClient.getSavedEndpointInfo() || {};
return appSettings.maxStreamingBitrate(endpointInfo.IsInNetwork, mediaType);
}
self.getMaxStreamingBitrate = function (player) {
player = player || self._currentPlayer;
if (player && player.getMaxStreamingBitrate) {
return player.getMaxStreamingBitrate();
}
var playerData = getPlayerData(player);
if (playerData.maxStreamingBitrate) {
return playerData.maxStreamingBitrate;
}
var mediaType = playerData.streamInfo ? playerData.streamInfo.mediaType : null;
var currentItem = self.currentItem(player);
var apiClient = currentItem ? connectionManager.getApiClient(currentItem.ServerId) : connectionManager.currentApiClient();
return getSavedMaxStreamingBitrate(apiClient, mediaType);
};
self.enableAutomaticBitrateDetection = function (player) {
player = player || self._currentPlayer;
if (player && player.enableAutomaticBitrateDetection) {
return player.enableAutomaticBitrateDetection();
}
var playerData = getPlayerData(player);
var mediaType = playerData.streamInfo ? playerData.streamInfo.mediaType : null;
var currentItem = self.currentItem(player);
var apiClient = currentItem ? connectionManager.getApiClient(currentItem.ServerId) : connectionManager.currentApiClient();
var endpointInfo = apiClient.getSavedEndpointInfo() || {};
return appSettings.enableAutomaticBitrateDetection(endpointInfo.IsInNetwork, mediaType);
};
self.setMaxStreamingBitrate = function (options, player) {
player = player || self._currentPlayer;
if (player && player.setMaxStreamingBitrate) {
return player.setMaxStreamingBitrate(options);
}
var apiClient = connectionManager.getApiClient(self.currentItem(player).ServerId);
apiClient.getEndpointInfo().then(function (endpointInfo) {
var playerData = getPlayerData(player);
var mediaType = playerData.streamInfo ? playerData.streamInfo.mediaType : null;
var promise;
if (options.enableAutomaticBitrateDetection) {
appSettings.enableAutomaticBitrateDetection(endpointInfo.IsInNetwork, mediaType, true);
promise = apiClient.detectBitrate(true);
} else {
appSettings.enableAutomaticBitrateDetection(endpointInfo.IsInNetwork, mediaType, false);
promise = Promise.resolve(options.maxBitrate);
}
promise.then(function (bitrate) {
appSettings.maxStreamingBitrate(endpointInfo.IsInNetwork, mediaType, bitrate);
changeStream(player, getCurrentTicks(player), {
MaxStreamingBitrate: bitrate
});
});
});
};
self.isFullscreen = function (player) {
player = player || self._currentPlayer;
if (!player.isLocalPlayer || player.isFullscreen) {
return player.isFullscreen();
}
return screenfull.isFullscreen;
};
self.toggleFullscreen = function (player) {
player = player || self._currentPlayer;
if (!player.isLocalPlayer || player.toggleFulscreen) {
return player.toggleFulscreen();
}
if (screenfull.isEnabled) {
screenfull.toggle();
}
};
self.togglePictureInPicture = function (player) {
player = player || self._currentPlayer;
return player.togglePictureInPicture();
};
self.toggleAirPlay = function (player) {
player = player || self._currentPlayer;
return player.toggleAirPlay();
};
self.getSubtitleStreamIndex = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.getSubtitleStreamIndex();
}
if (!player) {
throw new Error('player cannot be null');
}
return getPlayerData(player).subtitleStreamIndex;
};
function getDeliveryMethod(subtitleStream) {
// This will be null for internal subs for local items
if (subtitleStream.DeliveryMethod) {
return subtitleStream.DeliveryMethod;
}
return subtitleStream.IsExternal ? 'External' : 'Embed';
}
self.setSubtitleStreamIndex = function (index, player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.setSubtitleStreamIndex(index);
}
var currentStream = getCurrentSubtitleStream(player);
var newStream = getSubtitleStream(player, index);
if (!currentStream && !newStream) {
return;
}
var selectedTrackElementIndex = -1;
var currentPlayMethod = self.playMethod(player);
if (currentStream && !newStream) {
if (getDeliveryMethod(currentStream) === 'Encode' || (getDeliveryMethod(currentStream) === 'Embed' && currentPlayMethod === 'Transcode')) {
// Need to change the transcoded stream to remove subs
changeStream(player, getCurrentTicks(player), { SubtitleStreamIndex: -1 });
}
} else if (!currentStream && newStream) {
if (getDeliveryMethod(newStream) === 'External') {
selectedTrackElementIndex = index;
} else if (getDeliveryMethod(newStream) === 'Embed' && currentPlayMethod !== 'Transcode') {
selectedTrackElementIndex = index;
} else {
// Need to change the transcoded stream to add subs
changeStream(player, getCurrentTicks(player), { SubtitleStreamIndex: index });
}
} else if (currentStream && newStream) {
// Switching tracks
// We can handle this clientside if the new track is external or the new track is embedded and we're not transcoding
if (getDeliveryMethod(newStream) === 'External' || (getDeliveryMethod(newStream) === 'Embed' && currentPlayMethod !== 'Transcode')) {
selectedTrackElementIndex = index;
// But in order to handle this client side, if the previous track is being added via transcoding, we'll have to remove it
if (getDeliveryMethod(currentStream) !== 'External' && getDeliveryMethod(currentStream) !== 'Embed') {
changeStream(player, getCurrentTicks(player), { SubtitleStreamIndex: -1 });
}
} else {
// Need to change the transcoded stream to add subs
changeStream(player, getCurrentTicks(player), { SubtitleStreamIndex: index });
}
}
player.setSubtitleStreamIndex(selectedTrackElementIndex);
getPlayerData(player).subtitleStreamIndex = index;
};
self.supportSubtitleOffset = function(player) {
player = player || self._currentPlayer;
return player && 'setSubtitleOffset' in player;
};
self.enableShowingSubtitleOffset = function(player) {
player = player || self._currentPlayer;
player.enableShowingSubtitleOffset();
};
self.disableShowingSubtitleOffset = function(player) {
player = player || self._currentPlayer;
if (player.disableShowingSubtitleOffset) {
player.disableShowingSubtitleOffset();
}
};
self.isShowingSubtitleOffsetEnabled = function(player) {
player = player || self._currentPlayer;
return player.isShowingSubtitleOffsetEnabled();
};
self.isSubtitleStreamExternal = function(index, player) {
var stream = getSubtitleStream(player, index);
return stream ? getDeliveryMethod(stream) === 'External' : false;
};
self.setSubtitleOffset = function (value, player) {
player = player || self._currentPlayer;
if (player.setSubtitleOffset) {
player.setSubtitleOffset(value);
}
};
self.getPlayerSubtitleOffset = function(player) {
player = player || self._currentPlayer;
if (player.getSubtitleOffset) {
return player.getSubtitleOffset();
}
};
self.canHandleOffsetOnCurrentSubtitle = function(player) {
var index = self.getSubtitleStreamIndex(player);
return index !== -1 && self.isSubtitleStreamExternal(index, player);
};
self.seek = function (ticks, player) {
ticks = Math.max(0, ticks);
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
if (player.isLocalPlayer) {
return player.seek((ticks || 0) / 10000);
} else {
return player.seek(ticks);
}
}
changeStream(player, ticks);
};
self.seekRelative = function (offsetTicks, player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player) && player.seekRelative) {
if (player.isLocalPlayer) {
return player.seekRelative((ticks || 0) / 10000);
} else {
return player.seekRelative(ticks);
}
}
var ticks = getCurrentTicks(player) + offsetTicks;
return this.seek(ticks, player);
};
// Returns true if the player can seek using native client-side seeking functions
function canPlayerSeek(player) {
if (!player) {
throw new Error('player cannot be null');
}
var playerData = getPlayerData(player);
var currentSrc = (playerData.streamInfo.url || '').toLowerCase();
if (currentSrc.indexOf('.m3u8') !== -1) {
return true;
}
if (player.seekable) {
return player.seekable();
}
var isPlayMethodTranscode = self.playMethod(player) === 'Transcode';
if (isPlayMethodTranscode) {
return false;
}
return player.duration();
}
function changeStream(player, ticks, params) {
if (canPlayerSeek(player) && params == null) {
player.currentTime(parseInt(ticks / 10000));
return;
}
params = params || {};
var liveStreamId = getPlayerData(player).streamInfo.liveStreamId;
var lastMediaInfoQuery = getPlayerData(player).streamInfo.lastMediaInfoQuery;
var playSessionId = self.playSessionId(player);
var currentItem = self.currentItem(player);
player.getDeviceProfile(currentItem, {
isRetry: params.EnableDirectPlay === false
}).then(function (deviceProfile) {
var audioStreamIndex = params.AudioStreamIndex == null ? getPlayerData(player).audioStreamIndex : params.AudioStreamIndex;
var subtitleStreamIndex = params.SubtitleStreamIndex == null ? getPlayerData(player).subtitleStreamIndex : params.SubtitleStreamIndex;
var currentMediaSource = self.currentMediaSource(player);
var apiClient = connectionManager.getApiClient(currentItem.ServerId);
if (ticks) {
ticks = parseInt(ticks);
}
var maxBitrate = params.MaxStreamingBitrate || self.getMaxStreamingBitrate(player);
var currentPlayOptions = currentItem.playOptions || getDefaultPlayOptions();
getPlaybackInfo(player, apiClient, currentItem, deviceProfile, maxBitrate, ticks, true, currentMediaSource.Id, audioStreamIndex, subtitleStreamIndex, liveStreamId, params.EnableDirectPlay, params.EnableDirectStream, params.AllowVideoStreamCopy, params.AllowAudioStreamCopy).then(function (result) {
if (validatePlaybackInfoResult(self, result)) {
currentMediaSource = result.MediaSources[0];
var streamInfo = createStreamInfo(apiClient, currentItem.MediaType, currentItem, currentMediaSource, ticks);
streamInfo.fullscreen = currentPlayOptions.fullscreen;
streamInfo.lastMediaInfoQuery = lastMediaInfoQuery;
if (!streamInfo.url) {
showPlaybackInfoErrorMessage(self, 'NoCompatibleStream', true);
return;
}
getPlayerData(player).subtitleStreamIndex = subtitleStreamIndex;
getPlayerData(player).audioStreamIndex = audioStreamIndex;
getPlayerData(player).maxStreamingBitrate = maxBitrate;
changeStreamToUrl(apiClient, player, playSessionId, streamInfo);
}
});
});
}
function changeStreamToUrl(apiClient, player, playSessionId, streamInfo, newPositionTicks) {
var playerData = getPlayerData(player);
playerData.isChangingStream = true;
if (playerData.streamInfo && playSessionId) {
apiClient.stopActiveEncodings(playSessionId).then(function () {
// Stop the first transcoding afterwards because the player may still send requests to the original url
var afterSetSrc = function () {
apiClient.stopActiveEncodings(playSessionId);
};
setSrcIntoPlayer(apiClient, player, streamInfo).then(afterSetSrc, afterSetSrc);
});
} else {
setSrcIntoPlayer(apiClient, player, streamInfo);
}
}
function setSrcIntoPlayer(apiClient, player, streamInfo) {
return player.play(streamInfo).then(function () {
var playerData = getPlayerData(player);
playerData.isChangingStream = false;
playerData.streamInfo = streamInfo;
streamInfo.started = true;
streamInfo.ended = false;
sendProgressUpdate(player, 'timeupdate');
}, function (e) {
var playerData = getPlayerData(player);
playerData.isChangingStream = false;
onPlaybackError.call(player, e, {
type: 'mediadecodeerror',
streamInfo: streamInfo
});
});
}
function translateItemsForPlayback(items, options) {
if (items.length > 1 && options && options.ids) {
// Use the original request id array for sorting the result in the proper order
items.sort(function (a, b) {
return options.ids.indexOf(a.Id) - options.ids.indexOf(b.Id);
});
}
var firstItem = items[0];
var promise;
var serverId = firstItem.ServerId;
var queryOptions = options.queryOptions || {};
if (firstItem.Type === 'Program') {
promise = getItemsForPlayback(serverId, {
Ids: firstItem.ChannelId
});
} else if (firstItem.Type === 'Playlist') {
promise = getItemsForPlayback(serverId, {
ParentId: firstItem.Id,
SortBy: options.shuffle ? 'Random' : null
});
} else if (firstItem.Type === 'MusicArtist') {
promise = getItemsForPlayback(serverId, {
ArtistIds: firstItem.Id,
Filters: 'IsNotFolder',
Recursive: true,
SortBy: options.shuffle ? 'Random' : 'SortName',
MediaTypes: 'Audio'
});
} else if (firstItem.MediaType === 'Photo') {
promise = getItemsForPlayback(serverId, {
ParentId: firstItem.ParentId,
Filters: 'IsNotFolder',
// Setting this to true may cause some incorrect sorting
Recursive: false,
SortBy: options.shuffle ? 'Random' : 'SortName',
MediaTypes: 'Photo,Video',
Limit: 500
}).then(function (result) {
var items = result.Items;
var index = items.map(function (i) {
return i.Id;
}).indexOf(firstItem.Id);
if (index === -1) {
index = 0;
}
options.startIndex = index;
return Promise.resolve(result);
});
} else if (firstItem.Type === 'PhotoAlbum') {
promise = getItemsForPlayback(serverId, {
ParentId: firstItem.Id,
Filters: 'IsNotFolder',
// Setting this to true may cause some incorrect sorting
Recursive: false,
SortBy: options.shuffle ? 'Random' : 'SortName',
MediaTypes: 'Photo,Video',
Limit: 1000
});
} else if (firstItem.Type === 'MusicGenre') {
promise = getItemsForPlayback(serverId, {
GenreIds: firstItem.Id,
Filters: 'IsNotFolder',
Recursive: true,
SortBy: options.shuffle ? 'Random' : 'SortName',
MediaTypes: 'Audio'
});
} else if (firstItem.IsFolder) {
promise = getItemsForPlayback(serverId, mergePlaybackQueries({
ParentId: firstItem.Id,
Filters: 'IsNotFolder',
Recursive: true,
// These are pre-sorted
SortBy: options.shuffle ? 'Random' : (['BoxSet'].indexOf(firstItem.Type) === -1 ? 'SortName' : null),
MediaTypes: 'Audio,Video'
}, queryOptions));
} else if (firstItem.Type === 'Episode' && items.length === 1 && getPlayer(firstItem, options).supportsProgress !== false) {
promise = new Promise(function (resolve, reject) {
var apiClient = connectionManager.getApiClient(firstItem.ServerId);
apiClient.getCurrentUser().then(function (user) {
if (!user.Configuration.EnableNextEpisodeAutoPlay || !firstItem.SeriesId) {
resolve(null);
return;
}
apiClient.getEpisodes(firstItem.SeriesId, {
IsVirtualUnaired: false,
IsMissing: false,
UserId: apiClient.getCurrentUserId(),
Fields: 'Chapters'
}).then(function (episodesResult) {
var foundItem = false;
episodesResult.Items = episodesResult.Items.filter(function (e) {
if (foundItem) {
return true;
}
if (e.Id === firstItem.Id) {
foundItem = true;
return true;
}
return false;
});
episodesResult.TotalRecordCount = episodesResult.Items.length;
resolve(episodesResult);
}, reject);
});
});
}
if (promise) {
return promise.then(function (result) {
return result ? result.Items : items;
});
} else {
return Promise.resolve(items);
}
}
self.play = function (options) {
normalizePlayOptions(options);
if (self._currentPlayer) {
if (options.enableRemotePlayers === false && !self._currentPlayer.isLocalPlayer) {
return Promise.reject();
}
if (!self._currentPlayer.isLocalPlayer) {
return self._currentPlayer.play(options);
}
}
if (options.fullscreen) {
loading.show();
}
if (options.items) {
return translateItemsForPlayback(options.items, options).then(function (items) {
return playWithIntros(items, options);
});
} else {
if (!options.serverId) {
throw new Error('serverId required!');
}
return getItemsForPlayback(options.serverId, {
Ids: options.ids.join(',')
}).then(function (result) {
return translateItemsForPlayback(result.Items, options).then(function (items) {
return playWithIntros(items, options);
});
});
}
};
function getPlayerData(player) {
if (!player) {
throw new Error('player cannot be null');
}
if (!player.name) {
throw new Error('player name cannot be null');
}
var state = playerStates[player.name];
if (!state) {
playerStates[player.name] = {};
state = playerStates[player.name];
}
return player;
}
self.getPlayerState = function (player, item, mediaSource) {
player = player || self._currentPlayer;
if (!player) {
throw new Error('player cannot be null');
}
if (!enableLocalPlaylistManagement(player) && player.getPlayerState) {
return player.getPlayerState();
}
item = item || self.currentItem(player);
mediaSource = mediaSource || self.currentMediaSource(player);
var state = {
PlayState: {}
};
if (player) {
state.PlayState.VolumeLevel = player.getVolume();
state.PlayState.IsMuted = player.isMuted();
state.PlayState.IsPaused = player.paused();
state.PlayState.RepeatMode = self.getRepeatMode(player);
state.PlayState.MaxStreamingBitrate = self.getMaxStreamingBitrate(player);
state.PlayState.PositionTicks = getCurrentTicks(player);
state.PlayState.PlaybackStartTimeTicks = self.playbackStartTime(player);
state.PlayState.SubtitleStreamIndex = self.getSubtitleStreamIndex(player);
state.PlayState.AudioStreamIndex = self.getAudioStreamIndex(player);
state.PlayState.BufferedRanges = self.getBufferedRanges(player);
state.PlayState.PlayMethod = self.playMethod(player);
if (mediaSource) {
state.PlayState.LiveStreamId = mediaSource.LiveStreamId;
}
state.PlayState.PlaySessionId = self.playSessionId(player);
state.PlayState.PlaylistItemId = self.getCurrentPlaylistItemId(player);
}
if (mediaSource) {
state.PlayState.MediaSourceId = mediaSource.Id;
state.NowPlayingItem = {
RunTimeTicks: mediaSource.RunTimeTicks
};
state.PlayState.CanSeek = (mediaSource.RunTimeTicks || 0) > 0 || canPlayerSeek(player);
}
if (item) {
state.NowPlayingItem = getNowPlayingItemForReporting(player, item, mediaSource);
}
state.MediaSource = mediaSource;
return state;
};
self.duration = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player) && !player.isLocalPlayer) {
return player.duration();
}
if (!player) {
throw new Error('player cannot be null');
}
var mediaSource = self.currentMediaSource(player);
if (mediaSource && mediaSource.RunTimeTicks) {
return mediaSource.RunTimeTicks;
}
var playerDuration = player.duration();
if (playerDuration) {
playerDuration *= 10000;
}
return playerDuration;
};
function getCurrentTicks(player) {
if (!player) {
throw new Error('player cannot be null');
}
var playerTime = Math.floor(10000 * (player || self._currentPlayer).currentTime());
var streamInfo = getPlayerData(player).streamInfo;
if (streamInfo) {
playerTime += getPlayerData(player).streamInfo.transcodingOffsetTicks || 0;
}
return playerTime;
}
// Only used internally
self.getCurrentTicks = getCurrentTicks;
function playPhotos(items, options, user) {
var playStartIndex = options.startIndex || 0;
var player = getPlayer(items[playStartIndex], options);
loading.hide();
options.items = items;
return player.play(options);
}
function playWithIntros(items, options, user) {
var playStartIndex = options.startIndex || 0;
var firstItem = items[playStartIndex];
// If index was bad, reset it
if (!firstItem) {
playStartIndex = 0;
firstItem = items[playStartIndex];
}
// If it's still null then there's nothing to play
if (!firstItem) {
showPlaybackInfoErrorMessage(self, 'NoCompatibleStream', false);
return Promise.reject();
}
if (firstItem.MediaType === 'Photo') {
return playPhotos(items, options, user);
}
var apiClient = connectionManager.getApiClient(firstItem.ServerId);
return getIntros(firstItem, apiClient, options).then(function (introsResult) {
var introItems = introsResult.Items;
var introPlayOptions;
firstItem.playOptions = truncatePlayOptions(options);
if (introItems.length) {
introPlayOptions = {
fullscreen: firstItem.playOptions.fullscreen
};
} else {
introPlayOptions = firstItem.playOptions;
}
items = introItems.concat(items);
// Needed by players that manage their own playlist
introPlayOptions.items = items;
introPlayOptions.startIndex = playStartIndex;
return playInternal(items[playStartIndex], introPlayOptions, function () {
self._playQueueManager.setPlaylist(items);
setPlaylistState(items[playStartIndex].PlaylistItemId, playStartIndex);
loading.hide();
});
});
}
// Set playlist state. Using a method allows for overloading in derived player implementations
function setPlaylistState(playlistItemId, index) {
if (!isNaN(index)) {
self._playQueueManager.setPlaylistState(playlistItemId, index);
}
}
function playInternal(item, playOptions, onPlaybackStartedFn) {
if (item.IsPlaceHolder) {
loading.hide();
showPlaybackInfoErrorMessage(self, 'PlaceHolder', true);
return Promise.reject();
}
// Normalize defaults to simplfy checks throughout the process
normalizePlayOptions(playOptions);
if (playOptions.isFirstItem) {
playOptions.isFirstItem = false;
} else {
playOptions.isFirstItem = true;
}
return runInterceptors(item, playOptions).then(function () {
if (playOptions.fullscreen) {
loading.show();
}
// TODO: This should be the media type requested, not the original media type
var mediaType = item.MediaType;
var onBitrateDetectionFailure = function () {
return playAfterBitrateDetect(getSavedMaxStreamingBitrate(connectionManager.getApiClient(item.ServerId), mediaType), item, playOptions, onPlaybackStartedFn);
};
if (!isServerItem(item) || itemHelper.isLocalItem(item)) {
return onBitrateDetectionFailure();
}
var apiClient = connectionManager.getApiClient(item.ServerId);
apiClient.getEndpointInfo().then(function (endpointInfo) {
if ((mediaType === 'Video' || mediaType === 'Audio') && appSettings.enableAutomaticBitrateDetection(endpointInfo.IsInNetwork, mediaType)) {
return apiClient.detectBitrate().then(function (bitrate) {
appSettings.maxStreamingBitrate(endpointInfo.IsInNetwork, mediaType, bitrate);
return playAfterBitrateDetect(bitrate, item, playOptions, onPlaybackStartedFn);
}, onBitrateDetectionFailure);
} else {
onBitrateDetectionFailure();
}
}, onBitrateDetectionFailure);
}, onInterceptorRejection);
}
function onInterceptorRejection() {
var player = self._currentPlayer;
if (player) {
destroyPlayer(player);
removeCurrentPlayer(player);
}
events.trigger(self, 'playbackcancelled');
return Promise.reject();
}
function destroyPlayer(player) {
player.destroy();
}
function runInterceptors(item, playOptions) {
return new Promise(function (resolve, reject) {
var interceptors = pluginManager.ofType('preplayintercept');
interceptors.sort(function (a, b) {
return (a.order || 0) - (b.order || 0);
});
if (!interceptors.length) {
resolve();
return;
}
loading.hide();
var options = Object.assign({}, playOptions);
options.mediaType = item.MediaType;
options.item = item;
runNextPrePlay(interceptors, 0, options, resolve, reject);
});
}
function runNextPrePlay(interceptors, index, options, resolve, reject) {
if (index >= interceptors.length) {
resolve();
return;
}
var interceptor = interceptors[index];
interceptor.intercept(options).then(function () {
runNextPrePlay(interceptors, index + 1, options, resolve, reject);
}, reject);
}
function sendPlaybackListToPlayer(player, items, deviceProfile, maxBitrate, apiClient, startPositionTicks, mediaSourceId, audioStreamIndex, subtitleStreamIndex, startIndex) {
return setStreamUrls(items, deviceProfile, maxBitrate, apiClient, startPositionTicks).then(function () {
loading.hide();
return player.play({
items: items,
startPositionTicks: startPositionTicks || 0,
mediaSourceId: mediaSourceId,
audioStreamIndex: audioStreamIndex,
subtitleStreamIndex: subtitleStreamIndex,
startIndex: startIndex
});
});
}
function playAfterBitrateDetect(maxBitrate, item, playOptions, onPlaybackStartedFn) {
var startPosition = playOptions.startPositionTicks;
var player = getPlayer(item, playOptions);
var activePlayer = self._currentPlayer;
var promise;
if (activePlayer) {
// TODO: if changing players within the same playlist, this will cause nextItem to be null
self._playNextAfterEnded = false;
promise = onPlaybackChanging(activePlayer, player, item);
} else {
promise = Promise.resolve();
}
if (!isServerItem(item) || item.MediaType === 'Book') {
return promise.then(function () {
var streamInfo = createStreamInfoFromUrlItem(item);
streamInfo.fullscreen = playOptions.fullscreen;
getPlayerData(player).isChangingStream = false;
return player.play(streamInfo).then(function () {
loading.hide();
onPlaybackStartedFn();
onPlaybackStarted(player, playOptions, streamInfo);
}, function () {
// TODO: show error message
self.stop(player);
});
});
}
return Promise.all([promise, player.getDeviceProfile(item)]).then(function (responses) {
var deviceProfile = responses[1];
var apiClient = connectionManager.getApiClient(item.ServerId);
var mediaSourceId = playOptions.mediaSourceId;
var audioStreamIndex = playOptions.audioStreamIndex;
var subtitleStreamIndex = playOptions.subtitleStreamIndex;
if (player && !enableLocalPlaylistManagement(player)) {
return sendPlaybackListToPlayer(player, playOptions.items, deviceProfile, maxBitrate, apiClient, startPosition, mediaSourceId, audioStreamIndex, subtitleStreamIndex, playOptions.startIndex);
}
// this reference was only needed by sendPlaybackListToPlayer
playOptions.items = null;
return getPlaybackMediaSource(player, apiClient, deviceProfile, maxBitrate, item, startPosition, mediaSourceId, audioStreamIndex, subtitleStreamIndex).then(function (mediaSource) {
var streamInfo = createStreamInfo(apiClient, item.MediaType, item, mediaSource, startPosition);
streamInfo.fullscreen = playOptions.fullscreen;
getPlayerData(player).isChangingStream = false;
getPlayerData(player).maxStreamingBitrate = maxBitrate;
return player.play(streamInfo).then(function () {
loading.hide();
onPlaybackStartedFn();
onPlaybackStarted(player, playOptions, streamInfo, mediaSource);
}, function (err) {
// TODO: Improve this because it will report playback start on a failure
onPlaybackStartedFn();
onPlaybackStarted(player, playOptions, streamInfo, mediaSource);
setTimeout(function () {
onPlaybackError.call(player, err, {
type: 'mediadecodeerror',
streamInfo: streamInfo
});
}, 100);
});
});
});
}
self.getPlaybackInfo = function (item, options) {
options = options || {};
var startPosition = options.startPositionTicks || 0;
var mediaType = options.mediaType || item.MediaType;
var player = getPlayer(item, options);
var apiClient = connectionManager.getApiClient(item.ServerId);
// Call this just to ensure the value is recorded, it is needed with getSavedMaxStreamingBitrate
return apiClient.getEndpointInfo().then(function () {
var maxBitrate = getSavedMaxStreamingBitrate(connectionManager.getApiClient(item.ServerId), mediaType);
return player.getDeviceProfile(item).then(function (deviceProfile) {
return getPlaybackMediaSource(player, apiClient, deviceProfile, maxBitrate, item, startPosition, options.mediaSourceId, options.audioStreamIndex, options.subtitleStreamIndex).then(function (mediaSource) {
return createStreamInfo(apiClient, item.MediaType, item, mediaSource, startPosition);
});
});
});
};
self.getPlaybackMediaSources = function (item, options) {
options = options || {};
var startPosition = options.startPositionTicks || 0;
var mediaType = options.mediaType || item.MediaType;
// TODO: Remove the true forceLocalPlayer hack
var player = getPlayer(item, options, true);
var apiClient = connectionManager.getApiClient(item.ServerId);
// Call this just to ensure the value is recorded, it is needed with getSavedMaxStreamingBitrate
return apiClient.getEndpointInfo().then(function () {
var maxBitrate = getSavedMaxStreamingBitrate(connectionManager.getApiClient(item.ServerId), mediaType);
return player.getDeviceProfile(item).then(function (deviceProfile) {
return getPlaybackInfo(player, apiClient, item, deviceProfile, maxBitrate, startPosition, false, null, null, null, null).then(function (playbackInfoResult) {
return playbackInfoResult.MediaSources;
});
});
});
};
function createStreamInfo(apiClient, type, item, mediaSource, startPosition) {
var mediaUrl;
var contentType;
var transcodingOffsetTicks = 0;
var playerStartPositionTicks = startPosition;
var liveStreamId = mediaSource.LiveStreamId;
var playMethod = 'Transcode';
var mediaSourceContainer = (mediaSource.Container || '').toLowerCase();
var directOptions;
if (type === 'Video' || type === 'Audio') {
contentType = getMimeType(type.toLowerCase(), mediaSourceContainer);
if (mediaSource.enableDirectPlay) {
mediaUrl = mediaSource.Path;
playMethod = 'DirectPlay';
} else if (mediaSource.StreamUrl) {
// Only used for audio
playMethod = 'Transcode';
mediaUrl = mediaSource.StreamUrl;
} else if (mediaSource.SupportsDirectStream) {
directOptions = {
Static: true,
mediaSourceId: mediaSource.Id,
deviceId: apiClient.deviceId(),
api_key: apiClient.accessToken()
};
if (mediaSource.ETag) {
directOptions.Tag = mediaSource.ETag;
}
if (mediaSource.LiveStreamId) {
directOptions.LiveStreamId = mediaSource.LiveStreamId;
}
var prefix = type === 'Video' ? 'Videos' : 'Audio';
mediaUrl = apiClient.getUrl(prefix + '/' + item.Id + '/stream.' + mediaSourceContainer, directOptions);
playMethod = 'DirectStream';
} else if (mediaSource.SupportsTranscoding) {
mediaUrl = apiClient.getUrl(mediaSource.TranscodingUrl);
if (mediaSource.TranscodingSubProtocol === 'hls') {
contentType = 'application/x-mpegURL';
} else {
contentType = getMimeType(type.toLowerCase(), mediaSource.TranscodingContainer);
if (mediaUrl.toLowerCase().indexOf('copytimestamps=true') === -1) {
transcodingOffsetTicks = startPosition || 0;
}
}
}
} else {
// All other media types
mediaUrl = mediaSource.Path;
playMethod = 'DirectPlay';
}
// Fallback (used for offline items)
if (!mediaUrl && mediaSource.SupportsDirectPlay) {
mediaUrl = mediaSource.Path;
playMethod = 'DirectPlay';
}
var resultInfo = {
url: mediaUrl,
mimeType: contentType,
transcodingOffsetTicks: transcodingOffsetTicks,
playMethod: playMethod,
playerStartPositionTicks: playerStartPositionTicks,
item: item,
mediaSource: mediaSource,
textTracks: getTextTracks(apiClient, item, mediaSource),
// TODO: Deprecate
tracks: getTextTracks(apiClient, item, mediaSource),
mediaType: type,
liveStreamId: liveStreamId,
playSessionId: getParam('playSessionId', mediaUrl),
title: item.Name
};
var backdropUrl = backdropImageUrl(apiClient, item, {});
if (backdropUrl) {
resultInfo.backdropUrl = backdropUrl;
}
return resultInfo;
}
function getTextTracks(apiClient, item, mediaSource) {
var subtitleStreams = mediaSource.MediaStreams.filter(function (s) {
return s.Type === 'Subtitle';
});
var textStreams = subtitleStreams.filter(function (s) {
return s.DeliveryMethod === 'External';
});
var tracks = [];
for (var i = 0, length = textStreams.length; i < length; i++) {
var textStream = textStreams[i];
var textStreamUrl;
if (itemHelper.isLocalItem(item)) {
textStreamUrl = textStream.Path;
} else {
textStreamUrl = !textStream.IsExternalUrl ? apiClient.getUrl(textStream.DeliveryUrl) : textStream.DeliveryUrl;
}
tracks.push({
url: textStreamUrl,
language: (textStream.Language || 'und'),
isDefault: textStream.Index === mediaSource.DefaultSubtitleStreamIndex,
index: textStream.Index,
format: textStream.Codec
});
}
return tracks;
}
function getPlaybackMediaSource(player, apiClient, deviceProfile, maxBitrate, item, startPosition, mediaSourceId, audioStreamIndex, subtitleStreamIndex) {
return getPlaybackInfo(player, apiClient, item, deviceProfile, maxBitrate, startPosition, true, mediaSourceId, audioStreamIndex, subtitleStreamIndex, null).then(function (playbackInfoResult) {
if (validatePlaybackInfoResult(self, playbackInfoResult)) {
return getOptimalMediaSource(apiClient, item, playbackInfoResult.MediaSources).then(function (mediaSource) {
if (mediaSource) {
if (mediaSource.RequiresOpening && !mediaSource.LiveStreamId) {
return getLiveStream(player, apiClient, item, playbackInfoResult.PlaySessionId, deviceProfile, maxBitrate, startPosition, mediaSource, null, null).then(function (openLiveStreamResult) {
return supportsDirectPlay(apiClient, item, openLiveStreamResult.MediaSource).then(function (result) {
openLiveStreamResult.MediaSource.enableDirectPlay = result;
return openLiveStreamResult.MediaSource;
});
});
} else {
return mediaSource;
}
} else {
showPlaybackInfoErrorMessage(self, 'NoCompatibleStream');
return Promise.reject();
}
});
} else {
return Promise.reject();
}
});
}
function getPlayer(item, playOptions, forceLocalPlayers) {
var serverItem = isServerItem(item);
return getAutomaticPlayers(self, forceLocalPlayers).filter(function (p) {
if (p.canPlayMediaType(item.MediaType)) {
if (serverItem) {
if (p.canPlayItem) {
return p.canPlayItem(item, playOptions);
}
return true;
} else if (item.Url && p.canPlayUrl) {
return p.canPlayUrl(item.Url);
}
}
return false;
})[0];
}
self.setCurrentPlaylistItem = function (playlistItemId, player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.setCurrentPlaylistItem(playlistItemId);
}
var newItem;
var newItemIndex;
var playlist = self._playQueueManager.getPlaylist();
for (var i = 0, length = playlist.length; i < length; i++) {
if (playlist[i].PlaylistItemId === playlistItemId) {
newItem = playlist[i];
newItemIndex = i;
break;
}
}
if (newItem) {
var newItemPlayOptions = newItem.playOptions || getDefaultPlayOptions();
playInternal(newItem, newItemPlayOptions, function () {
setPlaylistState(newItem.PlaylistItemId, newItemIndex);
});
}
};
self.removeFromPlaylist = function (playlistItemIds, player) {
if (!playlistItemIds) {
throw new Error('Invalid playlistItemIds');
}
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.removeFromPlaylist(playlistItemIds);
}
var removeResult = self._playQueueManager.removeFromPlaylist(playlistItemIds);
if (removeResult.result === 'empty') {
return self.stop(player);
}
var isCurrentIndex = removeResult.isCurrentIndex;
events.trigger(player, 'playlistitemremove', [
{
playlistItemIds: playlistItemIds
}]);
if (isCurrentIndex) {
return self.setCurrentPlaylistItem(self._playQueueManager.getPlaylist()[0].PlaylistItemId, player);
}
return Promise.resolve();
};
self.movePlaylistItem = function (playlistItemId, newIndex, player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.movePlaylistItem(playlistItemId, newIndex);
}
var moveResult = self._playQueueManager.movePlaylistItem(playlistItemId, newIndex);
if (moveResult.result === 'noop') {
return;
}
events.trigger(player, 'playlistitemmove', [
{
playlistItemId: moveResult.playlistItemId,
newIndex: moveResult.newIndex
}]);
};
self.getCurrentPlaylistIndex = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.getCurrentPlaylistIndex();
}
return self._playQueueManager.getCurrentPlaylistIndex();
};
self.getCurrentPlaylistItemId = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.getCurrentPlaylistItemId();
}
return self._playQueueManager.getCurrentPlaylistItemId();
};
self.channelUp = function (player) {
player = player || self._currentPlayer;
return self.nextTrack(player);
};
self.channelDown = function (player) {
player = player || self._currentPlayer;
return self.previousTrack(player);
};
self.nextTrack = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.nextTrack();
}
var newItemInfo = self._playQueueManager.getNextItemInfo();
if (newItemInfo) {
console.debug('playing next track');
var newItemPlayOptions = newItemInfo.item.playOptions || getDefaultPlayOptions();
playInternal(newItemInfo.item, newItemPlayOptions, function () {
setPlaylistState(newItemInfo.item.PlaylistItemId, newItemInfo.index);
});
}
};
self.previousTrack = function (player) {
player = player || self._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.previousTrack();
}
var newIndex = self.getCurrentPlaylistIndex(player) - 1;
if (newIndex >= 0) {
var playlist = self._playQueueManager.getPlaylist();
var newItem = playlist[newIndex];
if (newItem) {
var newItemPlayOptions = newItem.playOptions || getDefaultPlayOptions();
newItemPlayOptions.startPositionTicks = 0;
playInternal(newItem, newItemPlayOptions, function () {
setPlaylistState(newItem.PlaylistItemId, newIndex);
});
}
}
};
self.queue = function (options, player) {
queue(options, '', player);
};
self.queueNext = function (options, player) {
queue(options, 'next', player);
};
function queue(options, mode, player) {
player = player || self._currentPlayer;
if (!player) {
return self.play(options);
}
if (options.items) {
return translateItemsForPlayback(options.items, options).then(function (items) {
// TODO: Handle options.startIndex for photos
queueAll(items, mode, player);
});
} else {
if (!options.serverId) {
throw new Error('serverId required!');
}
return getItemsForPlayback(options.serverId, {
Ids: options.ids.join(',')
}).then(function (result) {
return translateItemsForPlayback(result.Items, options).then(function (items) {
// TODO: Handle options.startIndex for photos
queueAll(items, mode, player);
});
});
}
}
function queueAll(items, mode, player) {
if (!items.length) {
return;
}
if (!player.isLocalPlayer) {
if (mode === 'next') {
player.queueNext({
items: items
});
} else {
player.queue({
items: items
});
}
return;
}
var queueDirectToPlayer = player && !enableLocalPlaylistManagement(player);
if (queueDirectToPlayer) {
var apiClient = connectionManager.getApiClient(items[0].ServerId);
player.getDeviceProfile(items[0]).then(function (profile) {
setStreamUrls(items, profile, self.getMaxStreamingBitrate(player), apiClient, 0).then(function () {
if (mode === 'next') {
player.queueNext(items);
} else {
player.queue(items);
}
});
});
return;
}
if (mode === 'next') {
self._playQueueManager.queueNext(items);
} else {
self._playQueueManager.queue(items);
}
}
function onPlayerProgressInterval() {
var player = this;
sendProgressUpdate(player, 'timeupdate');
}
function startPlaybackProgressTimer(player) {
stopPlaybackProgressTimer(player);
player._progressInterval = setInterval(onPlayerProgressInterval.bind(player), 10000);
}
function stopPlaybackProgressTimer(player) {
if (player._progressInterval) {
clearInterval(player._progressInterval);
player._progressInterval = null;
}
}
function onPlaybackStarted(player, playOptions, streamInfo, mediaSource) {
if (!player) {
throw new Error('player cannot be null');
}
setCurrentPlayerInternal(player);
var playerData = getPlayerData(player);
playerData.streamInfo = streamInfo;
streamInfo.playbackStartTimeTicks = new Date().getTime() * 10000;
if (mediaSource) {
playerData.audioStreamIndex = mediaSource.DefaultAudioStreamIndex;
playerData.subtitleStreamIndex = mediaSource.DefaultSubtitleStreamIndex;
} else {
playerData.audioStreamIndex = null;
playerData.subtitleStreamIndex = null;
}
self._playNextAfterEnded = true;
var isFirstItem = playOptions.isFirstItem;
var fullscreen = playOptions.fullscreen;
var state = self.getPlayerState(player, streamInfo.item, streamInfo.mediaSource);
reportPlayback(self, state, player, true, state.NowPlayingItem.ServerId, 'reportPlaybackStart');
state.IsFirstItem = isFirstItem;
state.IsFullscreen = fullscreen;
events.trigger(player, 'playbackstart', [state]);
events.trigger(self, 'playbackstart', [player, state]);
// only used internally as a safeguard to avoid reporting other events to the server before playback start
streamInfo.started = true;
startPlaybackProgressTimer(player);
}
function onPlaybackStartedFromSelfManagingPlayer(e, item, mediaSource) {
var player = this;
setCurrentPlayerInternal(player);
var playOptions = item.playOptions || getDefaultPlayOptions();
var isFirstItem = playOptions.isFirstItem;
var fullscreen = playOptions.fullscreen;
playOptions.isFirstItem = false;
var playerData = getPlayerData(player);
playerData.streamInfo = {};
var streamInfo = playerData.streamInfo;
streamInfo.playbackStartTimeTicks = new Date().getTime() * 10000;
var state = self.getPlayerState(player, item, mediaSource);
reportPlayback(self, state, player, true, state.NowPlayingItem.ServerId, 'reportPlaybackStart');
state.IsFirstItem = isFirstItem;
state.IsFullscreen = fullscreen;
events.trigger(player, 'playbackstart', [state]);
events.trigger(self, 'playbackstart', [player, state]);
// only used internally as a safeguard to avoid reporting other events to the server before playback start
streamInfo.started = true;
startPlaybackProgressTimer(player);
}
function onPlaybackStoppedFromSelfManagingPlayer(e, playerStopInfo) {
var player = this;
stopPlaybackProgressTimer(player);
var state = self.getPlayerState(player, playerStopInfo.item, playerStopInfo.mediaSource);
var nextItem = playerStopInfo.nextItem;
var nextMediaType = playerStopInfo.nextMediaType;
var playbackStopInfo = {
player: player,
state: state,
nextItem: (nextItem ? nextItem.item : null),
nextMediaType: nextMediaType
};
state.NextMediaType = nextMediaType;
var streamInfo = getPlayerData(player).streamInfo;
// only used internally as a safeguard to avoid reporting other events to the server after playback stopped
streamInfo.ended = true;
if (isServerItem(playerStopInfo.item)) {
state.PlayState.PositionTicks = (playerStopInfo.positionMs || 0) * 10000;
reportPlayback(self, state, player, true, playerStopInfo.item.ServerId, 'reportPlaybackStopped');
}
state.NextItem = playbackStopInfo.nextItem;
events.trigger(player, 'playbackstop', [state]);
events.trigger(self, 'playbackstop', [playbackStopInfo]);
var nextItemPlayOptions = nextItem ? (nextItem.item.playOptions || getDefaultPlayOptions()) : getDefaultPlayOptions();
var newPlayer = nextItem ? getPlayer(nextItem.item, nextItemPlayOptions) : null;
if (newPlayer !== player) {
destroyPlayer(player);
removeCurrentPlayer(player);
}
}
function enablePlaybackRetryWithTranscoding(streamInfo, errorType, currentlyPreventsVideoStreamCopy, currentlyPreventsAudioStreamCopy) {
// mediadecodeerror, medianotsupported, network, servererror
if (streamInfo.mediaSource.SupportsTranscoding && (!currentlyPreventsVideoStreamCopy || !currentlyPreventsAudioStreamCopy)) {
return true;
}
return false;
}
function onPlaybackError(e, error) {
var player = this;
error = error || {};
// network
// mediadecodeerror
// medianotsupported
var errorType = error.type;
console.debug('playbackmanager playback error type: ' + (errorType || ''));
var streamInfo = error.streamInfo || getPlayerData(player).streamInfo;
if (streamInfo) {
var currentlyPreventsVideoStreamCopy = streamInfo.url.toLowerCase().indexOf('allowvideostreamcopy=false') !== -1;
var currentlyPreventsAudioStreamCopy = streamInfo.url.toLowerCase().indexOf('allowaudiostreamcopy=false') !== -1;
// Auto switch to transcoding
if (enablePlaybackRetryWithTranscoding(streamInfo, errorType, currentlyPreventsVideoStreamCopy, currentlyPreventsAudioStreamCopy)) {
var startTime = getCurrentTicks(player) || streamInfo.playerStartPositionTicks;
changeStream(player, startTime, {
// force transcoding
EnableDirectPlay: false,
EnableDirectStream: false,
AllowVideoStreamCopy: false,
AllowAudioStreamCopy: currentlyPreventsAudioStreamCopy || currentlyPreventsVideoStreamCopy ? false : null
});
return;
}
}
var displayErrorCode = 'NoCompatibleStream';
onPlaybackStopped.call(player, e, displayErrorCode);
}
function onPlaybackStopped(e, displayErrorCode) {
var player = this;
if (getPlayerData(player).isChangingStream) {
return;
}
stopPlaybackProgressTimer(player);
// User clicked stop or content ended
var state = self.getPlayerState(player);
var data = getPlayerData(player);
var streamInfo = data.streamInfo;
var nextItem = self._playNextAfterEnded ? self._playQueueManager.getNextItemInfo() : null;
var nextMediaType = (nextItem ? nextItem.item.MediaType : null);
var playbackStopInfo = {
player: player,
state: state,
nextItem: (nextItem ? nextItem.item : null),
nextMediaType: nextMediaType
};
state.NextMediaType = nextMediaType;
if (isServerItem(streamInfo.item)) {
if (player.supportsProgress === false && state.PlayState && !state.PlayState.PositionTicks) {
state.PlayState.PositionTicks = streamInfo.item.RunTimeTicks;
}
// only used internally as a safeguard to avoid reporting other events to the server after playback stopped
streamInfo.ended = true;
reportPlayback(self, state, player, true, streamInfo.item.ServerId, 'reportPlaybackStopped');
}
state.NextItem = playbackStopInfo.nextItem;
if (!nextItem) {
self._playQueueManager.reset();
}
events.trigger(player, 'playbackstop', [state]);
events.trigger(self, 'playbackstop', [playbackStopInfo]);
var nextItemPlayOptions = nextItem ? (nextItem.item.playOptions || getDefaultPlayOptions()) : getDefaultPlayOptions();
var newPlayer = nextItem ? getPlayer(nextItem.item, nextItemPlayOptions) : null;
if (newPlayer !== player) {
destroyPlayer(player);
removeCurrentPlayer(player);
}
if (displayErrorCode && typeof (displayErrorCode) === 'string') {
showPlaybackInfoErrorMessage(self, displayErrorCode, nextItem);
} else if (nextItem) {
self.nextTrack();
} else {
// Nothing more to play - clear data
data.streamInfo = null;
}
}
function onPlaybackChanging(activePlayer, newPlayer, newItem) {
var state = self.getPlayerState(activePlayer);
var serverId = self.currentItem(activePlayer).ServerId;
// User started playing something new while existing content is playing
var promise;
stopPlaybackProgressTimer(activePlayer);
unbindStopped(activePlayer);
if (activePlayer === newPlayer) {
// If we're staying with the same player, stop it
promise = activePlayer.stop(false);
} else {
// If we're switching players, tear down the current one
promise = activePlayer.stop(true);
}
return promise.then(function () {
bindStopped(activePlayer);
if (enableLocalPlaylistManagement(activePlayer)) {
reportPlayback(self, state, activePlayer, true, serverId, 'reportPlaybackStopped');
}
events.trigger(self, 'playbackstop', [{
player: activePlayer,
state: state,
nextItem: newItem,
nextMediaType: newItem.MediaType
}]);
});
}
function bindStopped(player) {
if (enableLocalPlaylistManagement(player)) {
events.off(player, 'stopped', onPlaybackStopped);
events.on(player, 'stopped', onPlaybackStopped);
}
}
function onPlaybackTimeUpdate(e) {
var player = this;
sendProgressUpdate(player, 'timeupdate');
}
function onPlaybackPause(e) {
var player = this;
sendProgressUpdate(player, 'pause');
}
function onPlaybackUnpause(e) {
var player = this;
sendProgressUpdate(player, 'unpause');
}
function onPlaybackVolumeChange(e) {
var player = this;
sendProgressUpdate(player, 'volumechange');
}
function onRepeatModeChange(e) {
var player = this;
sendProgressUpdate(player, 'repeatmodechange');
}
function onPlaylistItemMove(e) {
var player = this;
sendProgressUpdate(player, 'playlistitemmove', true);
}
function onPlaylistItemRemove(e) {
var player = this;
sendProgressUpdate(player, 'playlistitemremove', true);
}
function onPlaylistItemAdd(e) {
var player = this;
sendProgressUpdate(player, 'playlistitemadd', true);
}
function unbindStopped(player) {
events.off(player, 'stopped', onPlaybackStopped);
}
function initLegacyVolumeMethods(player) {
player.getVolume = function () {
return player.volume();
};
player.setVolume = function (val) {
return player.volume(val);
};
}
function initMediaPlayer(player) {
players.push(player);
players.sort(function (a, b) {
return (a.priority || 0) - (b.priority || 0);
});
if (player.isLocalPlayer !== false) {
player.isLocalPlayer = true;
}
player.currentState = {};
if (!player.getVolume || !player.setVolume) {
initLegacyVolumeMethods(player);
}
if (enableLocalPlaylistManagement(player)) {
events.on(player, 'error', onPlaybackError);
events.on(player, 'timeupdate', onPlaybackTimeUpdate);
events.on(player, 'pause', onPlaybackPause);
events.on(player, 'unpause', onPlaybackUnpause);
events.on(player, 'volumechange', onPlaybackVolumeChange);
events.on(player, 'repeatmodechange', onRepeatModeChange);
events.on(player, 'playlistitemmove', onPlaylistItemMove);
events.on(player, 'playlistitemremove', onPlaylistItemRemove);
events.on(player, 'playlistitemadd', onPlaylistItemAdd);
} else if (player.isLocalPlayer) {
events.on(player, 'itemstarted', onPlaybackStartedFromSelfManagingPlayer);
events.on(player, 'itemstopped', onPlaybackStoppedFromSelfManagingPlayer);
events.on(player, 'timeupdate', onPlaybackTimeUpdate);
events.on(player, 'pause', onPlaybackPause);
events.on(player, 'unpause', onPlaybackUnpause);
events.on(player, 'volumechange', onPlaybackVolumeChange);
events.on(player, 'repeatmodechange', onRepeatModeChange);
events.on(player, 'playlistitemmove', onPlaylistItemMove);
events.on(player, 'playlistitemremove', onPlaylistItemRemove);
events.on(player, 'playlistitemadd', onPlaylistItemAdd);
}
if (player.isLocalPlayer) {
bindToFullscreenChange(player);
}
bindStopped(player);
}
events.on(pluginManager, 'registered', function (e, plugin) {
if (plugin.type === 'mediaplayer') {
initMediaPlayer(plugin);
}
});
pluginManager.ofType('mediaplayer').map(initMediaPlayer);
function sendProgressUpdate(player, progressEventName, reportPlaylist) {
if (!player) {
throw new Error('player cannot be null');
}
var state = self.getPlayerState(player);
if (state.NowPlayingItem) {
var serverId = state.NowPlayingItem.ServerId;
var streamInfo = getPlayerData(player).streamInfo;
if (streamInfo && streamInfo.started && !streamInfo.ended) {
reportPlayback(self, state, player, reportPlaylist, serverId, 'reportPlaybackProgress', progressEventName);
}
if (streamInfo && streamInfo.liveStreamId) {
if (new Date().getTime() - (streamInfo.lastMediaInfoQuery || 0) >= 600000) {
getLiveStreamMediaInfo(player, streamInfo, self.currentMediaSource(player), streamInfo.liveStreamId, serverId);
}
}
}
}
function getLiveStreamMediaInfo(player, streamInfo, mediaSource, liveStreamId, serverId) {
console.debug('getLiveStreamMediaInfo');
streamInfo.lastMediaInfoQuery = new Date().getTime();
var apiClient = connectionManager.getApiClient(serverId);
if (!apiClient.isMinServerVersion('3.2.70.7')) {
return;
}
connectionManager.getApiClient(serverId).getLiveStreamMediaInfo(liveStreamId).then(function (info) {
mediaSource.MediaStreams = info.MediaStreams;
events.trigger(player, 'mediastreamschange');
}, function () {
});
}
self.onAppClose = function () {
var player = this._currentPlayer;
// Try to report playback stopped before the app closes
if (player && this.isPlaying(player)) {
this._playNextAfterEnded = false;
onPlaybackStopped.call(player);
}
};
self.playbackStartTime = function (player) {
player = player || this._currentPlayer;
if (player && !enableLocalPlaylistManagement(player) && !player.isLocalPlayer) {
return player.playbackStartTime();
}
var streamInfo = getPlayerData(player).streamInfo;
return streamInfo ? streamInfo.playbackStartTimeTicks : null;
};
if (apphost.supports('remotecontrol')) {
require(['serverNotifications'], function (serverNotifications) {
events.on(serverNotifications, 'ServerShuttingDown', self.setDefaultPlayerActive.bind(self));
events.on(serverNotifications, 'ServerRestarting', self.setDefaultPlayerActive.bind(self));
});
}
}
PlaybackManager.prototype.getCurrentPlayer = function () {
return this._currentPlayer;
};
PlaybackManager.prototype.currentTime = function (player) {
player = player || this._currentPlayer;
if (player && !enableLocalPlaylistManagement(player) && !player.isLocalPlayer) {
return player.currentTime();
}
return this.getCurrentTicks(player);
};
PlaybackManager.prototype.nextItem = function (player) {
player = player || this._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.nextItem();
}
var nextItem = this._playQueueManager.getNextItemInfo();
if (!nextItem || !nextItem.item) {
return Promise.reject();
}
var apiClient = connectionManager.getApiClient(nextItem.item.ServerId);
return apiClient.getItem(apiClient.getCurrentUserId(), nextItem.item.Id);
};
PlaybackManager.prototype.canQueue = function (item) {
if (item.Type === 'MusicAlbum' || item.Type === 'MusicArtist' || item.Type === 'MusicGenre') {
return this.canQueueMediaType('Audio');
}
return this.canQueueMediaType(item.MediaType);
};
PlaybackManager.prototype.canQueueMediaType = function (mediaType) {
if (this._currentPlayer) {
return this._currentPlayer.canPlayMediaType(mediaType);
}
return false;
};
PlaybackManager.prototype.isMuted = function (player) {
player = player || this._currentPlayer;
if (player) {
return player.isMuted();
}
return false;
};
PlaybackManager.prototype.setMute = function (mute, player) {
player = player || this._currentPlayer;
if (player) {
player.setMute(mute);
}
};
PlaybackManager.prototype.toggleMute = function (mute, player) {
player = player || this._currentPlayer;
if (player) {
if (player.toggleMute) {
player.toggleMute();
} else {
player.setMute(!player.isMuted());
}
}
};
PlaybackManager.prototype.toggleDisplayMirroring = function () {
this.enableDisplayMirroring(!this.enableDisplayMirroring());
};
PlaybackManager.prototype.enableDisplayMirroring = function (enabled) {
if (enabled != null) {
var val = enabled ? '1' : '0';
appSettings.set('displaymirror', val);
return;
}
return (appSettings.get('displaymirror') || '') !== '0';
};
PlaybackManager.prototype.nextChapter = function (player) {
player = player || this._currentPlayer;
var item = this.currentItem(player);
var ticks = this.getCurrentTicks(player);
var nextChapter = (item.Chapters || []).filter(function (i) {
return i.StartPositionTicks > ticks;
})[0];
if (nextChapter) {
this.seek(nextChapter.StartPositionTicks, player);
} else {
this.nextTrack(player);
}
};
PlaybackManager.prototype.previousChapter = function (player) {
player = player || this._currentPlayer;
var item = this.currentItem(player);
var ticks = this.getCurrentTicks(player);
// Go back 10 seconds
ticks -= 100000000;
// If there's no previous track, then at least rewind to beginning
if (this.getCurrentPlaylistIndex(player) === 0) {
ticks = Math.max(ticks, 0);
}
var previousChapters = (item.Chapters || []).filter(function (i) {
return i.StartPositionTicks <= ticks;
});
if (previousChapters.length) {
this.seek(previousChapters[previousChapters.length - 1].StartPositionTicks, player);
} else {
this.previousTrack(player);
}
};
PlaybackManager.prototype.fastForward = function (player) {
player = player || this._currentPlayer;
if (player.fastForward != null) {
player.fastForward(userSettings.skipForwardLength());
return;
}
// Go back 15 seconds
var offsetTicks = userSettings.skipForwardLength() * 10000;
this.seekRelative(offsetTicks, player);
};
PlaybackManager.prototype.rewind = function (player) {
player = player || this._currentPlayer;
if (player.rewind != null) {
player.rewind(userSettings.skipBackLength());
return;
}
// Go back 15 seconds
var offsetTicks = 0 - (userSettings.skipBackLength() * 10000);
this.seekRelative(offsetTicks, player);
};
PlaybackManager.prototype.seekPercent = function (percent, player) {
player = player || this._currentPlayer;
var ticks = this.duration(player) || 0;
percent /= 100;
ticks *= percent;
this.seek(parseInt(ticks), player);
};
PlaybackManager.prototype.playTrailers = function (item) {
var player = this._currentPlayer;
if (player && player.playTrailers) {
return player.playTrailers(item);
}
var apiClient = connectionManager.getApiClient(item.ServerId);
var instance = this;
if (item.LocalTrailerCount) {
return apiClient.getLocalTrailers(apiClient.getCurrentUserId(), item.Id).then(function (result) {
return instance.play({
items: result
});
});
} else {
var remoteTrailers = item.RemoteTrailers || [];
if (!remoteTrailers.length) {
return Promise.reject();
}
return this.play({
items: remoteTrailers.map(function (t) {
return {
Name: t.Name || (item.Name + ' Trailer'),
Url: t.Url,
MediaType: 'Video',
Type: 'Trailer',
ServerId: apiClient.serverId()
};
})
});
}
};
PlaybackManager.prototype.getSubtitleUrl = function (textStream, serverId) {
var apiClient = connectionManager.getApiClient(serverId);
var textStreamUrl = !textStream.IsExternalUrl ? apiClient.getUrl(textStream.DeliveryUrl) : textStream.DeliveryUrl;
return textStreamUrl;
};
PlaybackManager.prototype.stop = function (player) {
player = player || this._currentPlayer;
if (player) {
if (enableLocalPlaylistManagement(player)) {
this._playNextAfterEnded = false;
}
// TODO: remove second param
return player.stop(true, true);
}
return Promise.resolve();
};
PlaybackManager.prototype.getBufferedRanges = function (player) {
player = player || this._currentPlayer;
if (player) {
if (player.getBufferedRanges) {
return player.getBufferedRanges();
}
}
return [];
};
PlaybackManager.prototype.playPause = function (player) {
player = player || this._currentPlayer;
if (player) {
if (player.playPause) {
return player.playPause();
}
if (player.paused()) {
return this.unpause(player);
} else {
return this.pause(player);
}
}
};
PlaybackManager.prototype.paused = function (player) {
player = player || this._currentPlayer;
if (player) {
return player.paused();
}
};
PlaybackManager.prototype.pause = function (player) {
player = player || this._currentPlayer;
if (player) {
player.pause();
}
};
PlaybackManager.prototype.unpause = function (player) {
player = player || this._currentPlayer;
if (player) {
player.unpause();
}
};
PlaybackManager.prototype.instantMix = function (item, player) {
player = player || this._currentPlayer;
if (player && player.instantMix) {
return player.instantMix(item);
}
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {};
options.UserId = apiClient.getCurrentUserId();
options.Limit = 200;
var instance = this;
apiClient.getInstantMixFromItem(item.Id, options).then(function (result) {
instance.play({
items: result.Items
});
});
};
PlaybackManager.prototype.shuffle = function (shuffleItem, player, queryOptions) {
player = player || this._currentPlayer;
if (player && player.shuffle) {
return player.shuffle(shuffleItem);
}
return this.play({ items: [shuffleItem], shuffle: true });
};
PlaybackManager.prototype.audioTracks = function (player) {
player = player || this._currentPlayer;
if (player.audioTracks) {
var result = player.audioTracks();
if (result) {
return result;
}
}
var mediaSource = this.currentMediaSource(player);
var mediaStreams = (mediaSource || {}).MediaStreams || [];
return mediaStreams.filter(function (s) {
return s.Type === 'Audio';
});
};
PlaybackManager.prototype.subtitleTracks = function (player) {
player = player || this._currentPlayer;
if (player.subtitleTracks) {
var result = player.subtitleTracks();
if (result) {
return result;
}
}
var mediaSource = this.currentMediaSource(player);
var mediaStreams = (mediaSource || {}).MediaStreams || [];
return mediaStreams.filter(function (s) {
return s.Type === 'Subtitle';
});
};
PlaybackManager.prototype.getSupportedCommands = function (player) {
player = player || this._currentPlayer || { isLocalPlayer: true };
if (player.isLocalPlayer) {
var list = [
'GoHome',
'GoToSettings',
'VolumeUp',
'VolumeDown',
'Mute',
'Unmute',
'ToggleMute',
'SetVolume',
'SetAudioStreamIndex',
'SetSubtitleStreamIndex',
'SetMaxStreamingBitrate',
'DisplayContent',
'GoToSearch',
'DisplayMessage',
'SetRepeatMode',
'PlayMediaSource',
'PlayTrailers'
];
if (apphost.supports('fullscreenchange')) {
list.push('ToggleFullscreen');
}
if (player.supports) {
if (player.supports('PictureInPicture')) {
list.push('PictureInPicture');
}
if (player.supports('AirPlay')) {
list.push('AirPlay');
}
if (player.supports('SetBrightness')) {
list.push('SetBrightness');
}
if (player.supports('SetAspectRatio')) {
list.push('SetAspectRatio');
}
}
return list;
}
var info = this.getPlayerInfo();
return info ? info.supportedCommands : [];
};
PlaybackManager.prototype.setRepeatMode = function (value, player) {
player = player || this._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.setRepeatMode(value);
}
this._playQueueManager.setRepeatMode(value);
events.trigger(player, 'repeatmodechange');
};
PlaybackManager.prototype.getRepeatMode = function (player) {
player = player || this._currentPlayer;
if (player && !enableLocalPlaylistManagement(player)) {
return player.getRepeatMode();
}
return this._playQueueManager.getRepeatMode();
};
PlaybackManager.prototype.trySetActiveDeviceName = function (name) {
name = normalizeName(name);
var instance = this;
instance.getTargets().then(function (result) {
var target = result.filter(function (p) {
return normalizeName(p.name) === name;
})[0];
if (target) {
instance.trySetActivePlayer(target.playerName, target);
}
});
};
PlaybackManager.prototype.displayContent = function (options, player) {
player = player || this._currentPlayer;
if (player && player.displayContent) {
player.displayContent(options);
}
};
PlaybackManager.prototype.beginPlayerUpdates = function (player) {
if (player.beginPlayerUpdates) {
player.beginPlayerUpdates();
}
};
PlaybackManager.prototype.endPlayerUpdates = function (player) {
if (player.endPlayerUpdates) {
player.endPlayerUpdates();
}
};
PlaybackManager.prototype.setDefaultPlayerActive = function () {
this.setActivePlayer('localplayer');
};
PlaybackManager.prototype.removeActivePlayer = function (name) {
var playerInfo = this.getPlayerInfo();
if (playerInfo) {
if (playerInfo.name === name) {
this.setDefaultPlayerActive();
}
}
};
PlaybackManager.prototype.removeActiveTarget = function (id) {
var playerInfo = this.getPlayerInfo();
if (playerInfo) {
if (playerInfo.id === id) {
this.setDefaultPlayerActive();
}
}
};
PlaybackManager.prototype.sendCommand = function (cmd, player) {
console.debug('MediaController received command: ' + cmd.Name);
switch (cmd.Name) {
case 'SetRepeatMode':
this.setRepeatMode(cmd.Arguments.RepeatMode, player);
break;
case 'VolumeUp':
this.volumeUp(player);
break;
case 'VolumeDown':
this.volumeDown(player);
break;
case 'Mute':
this.setMute(true, player);
break;
case 'Unmute':
this.setMute(false, player);
break;
case 'ToggleMute':
this.toggleMute(player);
break;
case 'SetVolume':
this.setVolume(cmd.Arguments.Volume, player);
break;
case 'SetAspectRatio':
this.setAspectRatio(cmd.Arguments.AspectRatio, player);
break;
case 'SetBrightness':
this.setBrightness(cmd.Arguments.Brightness, player);
break;
case 'SetAudioStreamIndex':
this.setAudioStreamIndex(parseInt(cmd.Arguments.Index), player);
break;
case 'SetSubtitleStreamIndex':
this.setSubtitleStreamIndex(parseInt(cmd.Arguments.Index), player);
break;
case 'SetMaxStreamingBitrate':
// todo
//this.setMaxStreamingBitrate(parseInt(cmd.Arguments.Bitrate), player);
break;
case 'ToggleFullscreen':
this.toggleFullscreen(player);
break;
default:
if (player.sendCommand) {
player.sendCommand(cmd);
}
break;
}
};
return new PlaybackManager();
});
| 1 | 14,664 | Is this equivalent in JavaScript? | jellyfin-jellyfin-web | js |
@@ -16,8 +16,8 @@ void GetEdgeIndexProcessor::process(const cpp2::GetEdgeIndexReq& req) {
auto edgeIndexIDRet = getIndexID(spaceID, indexName);
if (!nebula::ok(edgeIndexIDRet)) {
auto retCode = nebula::error(edgeIndexIDRet);
- LOG(ERROR) << "Get Edge Index SpaceID: " << spaceID << " Index Name: " << indexName
- << " failed, error: " << apache::thrift::util::enumNameSafe(retCode);
+ LOG(INFO) << "Get Edge Index SpaceID: " << spaceID << " Index Name: " << indexName
+ << " failed, error: " << apache::thrift::util::enumNameSafe(retCode);
handleErrorCode(retCode);
onFinished();
return; | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "meta/processors/index/GetEdgeIndexProcessor.h"
namespace nebula {
namespace meta {
void GetEdgeIndexProcessor::process(const cpp2::GetEdgeIndexReq& req) {
auto spaceID = req.get_space_id();
CHECK_SPACE_ID_AND_RETURN(spaceID);
auto indexName = req.get_index_name();
folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeIndexLock());
auto edgeIndexIDRet = getIndexID(spaceID, indexName);
if (!nebula::ok(edgeIndexIDRet)) {
auto retCode = nebula::error(edgeIndexIDRet);
LOG(ERROR) << "Get Edge Index SpaceID: " << spaceID << " Index Name: " << indexName
<< " failed, error: " << apache::thrift::util::enumNameSafe(retCode);
handleErrorCode(retCode);
onFinished();
return;
}
auto indexId = nebula::value(edgeIndexIDRet);
LOG(INFO) << "Get Edge Index SpaceID: " << spaceID << " Index Name: " << indexName;
const auto& indexKey = MetaKeyUtils::indexKey(spaceID, indexId);
auto indexItemRet = doGet(indexKey);
if (!nebula::ok(indexItemRet)) {
auto retCode = nebula::error(indexItemRet);
if (retCode == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) {
retCode = nebula::cpp2::ErrorCode::E_INDEX_NOT_FOUND;
}
LOG(ERROR) << "Get Edge Index Failed: SpaceID " << spaceID << " Index Name: " << indexName
<< " error: " << apache::thrift::util::enumNameSafe(retCode);
handleErrorCode(retCode);
onFinished();
return;
}
auto item = MetaKeyUtils::parseIndex(nebula::value(indexItemRet));
if (item.get_schema_id().getType() != nebula::cpp2::SchemaID::Type::edge_type) {
LOG(ERROR) << "Get Edge Index Failed: Index Name " << indexName << " is not EdgeIndex";
resp_.code_ref() = nebula::cpp2::ErrorCode::E_INDEX_NOT_FOUND;
onFinished();
return;
}
handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED);
resp_.item_ref() = std::move(item);
onFinished();
}
} // namespace meta
} // namespace nebula
| 1 | 33,170 | get/list operation is not schema change, it is supposed to use VLOG | vesoft-inc-nebula | cpp |
@@ -92,7 +92,7 @@ namespace System.Diagnostics.Metrics
T delta,
params KeyValuePair<string, object?>[] tags)
{
- this.RecordMeasurement(delta, tags);
+ this.RecordMeasurement(delta, new ReadOnlySpan<KeyValuePair<string, object?>>(tags));
}
}
} | 1 | // <copyright file="Counter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Collections.Generic;
#nullable enable
namespace System.Diagnostics.Metrics
{
/// <summary>
/// The counter is a non-observable Instrument that supports non-negative increments.
/// e.g. Number of completed requests.
/// </summary>
/// <typeparam name="T">TBD.</typeparam>
public sealed class Counter<T> : Instrument<T>
where T : struct
{
internal Counter(Meter meter, string name, string? unit, string? description)
: base(meter, name, unit, description)
{
this.Publish();
}
/// <summary>
/// TBD.
/// </summary>
public void Add(T delta)
{
this.RecordMeasurement(delta);
}
/// <summary>
/// TBD.
/// </summary>
public void Add(
T delta,
KeyValuePair<string, object?> tag1)
{
this.RecordMeasurement(delta, tag1);
}
/// <summary>
/// TBD.
/// </summary>
public void Add(
T delta,
KeyValuePair<string, object?> tag1,
KeyValuePair<string, object?> tag2)
{
this.RecordMeasurement(delta, tag1, tag2);
}
/// <summary>
/// TBD.
/// </summary>
public void Add(
T delta,
KeyValuePair<string, object?> tag1,
KeyValuePair<string, object?> tag2,
KeyValuePair<string, object?> tag3)
{
this.RecordMeasurement(delta, tag1, tag2, tag3);
}
/// <summary>
/// TBD.
/// </summary>
public void Add(
T delta,
ReadOnlySpan<KeyValuePair<string, object?>> tags)
{
this.RecordMeasurement(delta, tags);
}
/// <summary>
/// TBD.
/// </summary>
public void Add(
T delta,
params KeyValuePair<string, object?>[] tags)
{
this.RecordMeasurement(delta, tags);
}
}
}
| 1 | 20,240 | we'll need to delete this whole file, right? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -41,6 +41,13 @@ public interface Table {
/**
* Refresh the current table metadata.
+ *
+ * <p>If this table is associated with a TransactionalCatalog, this refresh will be bounded by
+ * the visibility that the {@code IsolationLevel} of that transaction exposes. For example, if
+ * we are in a context of {@code READ_COMMITTED}, this refresh will update to the latest state
+ * of the table. However, in the case of {@code SERIALIZABLE} where this table hasn't mutated
+ * within this transaction, calling refresh will have no impact as the isolation level
+ * constrains all observations to within the transactional snapshot.
*/
void refresh();
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* Represents a table.
*/
public interface Table {
/**
* Return the full name for this table.
*
* @return this table's name
*/
default String name() {
return toString();
}
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of {@link PartitionSpec partition specs} for this table.
*
* @return this table's partition specs map
*/
Map<Integer, PartitionSpec> specs();
/**
* Return the {@link SortOrder sort order} for this table.
*
* @return this table's sort order
*/
SortOrder sortOrder();
/**
* Return a map of sort order IDs to {@link SortOrder sort orders} for this table.
*
* @return this table's sort orders map
*/
Map<Integer, SortOrder> sortOrders();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no
* matching snapshot.
*
* @return the {@link Snapshot} with the given id.
*/
Snapshot snapshot(long snapshotId);
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Get the snapshot history of this table.
*
* @return a list of {@link HistoryEntry history entries}
*/
List<HistoryEntry> history();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link UpdateLocation} to update table location and commit the changes.
*
* @return a new {@link UpdateLocation}
*/
UpdateLocation updateLocation();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
* table and commit.
*
* @return a new {@link RewriteManifests}
*/
RewriteManifests rewriteManifests();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data files.
*
* @return a new {@link RowDelta}
*/
RowDelta newRowDelta();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
* @deprecated Replaced by {@link #manageSnapshots()}
*/
@Deprecated
Rollback rollback();
/**
* Create a new {@link ManageSnapshots manage snapshots API} to manage snapshots in this table and commit.
* @return a new {@link ManageSnapshots}
*/
ManageSnapshots manageSnapshots();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
/**
* Returns a {@link FileIO} to read and write table data and metadata files.
*/
FileIO io();
/**
* Returns an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt data files.
*/
EncryptionManager encryption();
/**
* Returns a {@link LocationProvider} to provide locations for new data files.
*/
LocationProvider locationProvider();
}
| 1 | 29,003 | "this table hasn't mutated within this transaction" may sound like implying that if this transaction contains table mutation changes, `refresh` may have impact, which I think is not true? I guess what you were saying was if other transactions committed to this table successfully when this transaction is half way through, refresh in this transaction will still return the same state as when this transaction begins. | apache-iceberg | java |
@@ -143,6 +143,18 @@ const (
VenafiPickupIDAnnotationKey = "venafi.cert-manager.io/pickup-id"
)
+// TODO
+const (
+ // TODO
+ CertificateSigningRequestDurationAnnotationKey = "cert-manager.io/request-duration"
+
+ // TODO
+ CertificateSigningRequestIsCAAnnotationKey = "cert-manager.io/request-is-ca"
+
+ // TODO
+ CertificateSigningRequestCAAnnotationKey = "cert-manager.io/ca"
+)
+
// KeyUsage specifies valid usage contexts for keys.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
// Common annotation keys added to resources.
const (
// Annotation key for DNS subjectAltNames.
AltNamesAnnotationKey = "cert-manager.io/alt-names"
// Annotation key for IP subjectAltNames.
IPSANAnnotationKey = "cert-manager.io/ip-sans"
// Annotation key for URI subjectAltNames.
URISANAnnotationKey = "cert-manager.io/uri-sans"
// Annotation key for certificate common name.
CommonNameAnnotationKey = "cert-manager.io/common-name"
// Duration key for certificate duration.
DurationAnnotationKey = "cert-manager.io/duration"
// Annotation key for certificate renewBefore.
RenewBeforeAnnotationKey = "cert-manager.io/renew-before"
// Annotation key for certificate key usages.
UsagesAnnotationKey = "cert-manager.io/usages"
// Annotation key the 'name' of the Issuer resource.
IssuerNameAnnotationKey = "cert-manager.io/issuer-name"
// Annotation key for the 'kind' of the Issuer resource.
IssuerKindAnnotationKey = "cert-manager.io/issuer-kind"
// Annotation key for the 'group' of the Issuer resource.
IssuerGroupAnnotationKey = "cert-manager.io/issuer-group"
// Annotation key for the name of the certificate that a resource is related to.
CertificateNameKey = "cert-manager.io/certificate-name"
// Annotation key used to denote whether a Secret is named on a Certificate
// as a 'next private key' Secret resource.
IsNextPrivateKeySecretLabelKey = "cert-manager.io/next-private-key"
)
const (
// issuerNameAnnotation can be used to override the issuer specified on the
// created Certificate resource.
IngressIssuerNameAnnotationKey = "cert-manager.io/issuer"
// clusterIssuerNameAnnotation can be used to override the issuer specified on the
// created Certificate resource. The Certificate will reference the
// specified *ClusterIssuer* instead of normal issuer.
IngressClusterIssuerNameAnnotationKey = "cert-manager.io/cluster-issuer"
// acmeIssuerHTTP01IngressClassAnnotation can be used to override the http01 ingressClass
// if the challenge type is set to http01
IngressACMEIssuerHTTP01IngressClassAnnotationKey = "acme.cert-manager.io/http01-ingress-class"
// IngressClassAnnotationKey picks a specific "class" for the Ingress. The
// controller only processes Ingresses with this annotation either unset, or
// set to either the configured value or the empty string.
IngressClassAnnotationKey = "kubernetes.io/ingress.class"
)
// Annotation names for CertificateRequests
const (
// Annotation added to CertificateRequest resources to denote the name of
// a Secret resource containing the private key used to sign the CSR stored
// on the resource.
// This annotation *may* not be present, and is used by the 'self signing'
// issuer type to self-sign certificates.
CertificateRequestPrivateKeyAnnotationKey = "cert-manager.io/private-key-secret-name"
// Annotation to declare the CertificateRequest "revision", belonging to a Certificate Resource
CertificateRequestRevisionAnnotationKey = "cert-manager.io/certificate-revision"
)
const (
// IssueTemporaryCertificateAnnotation is an annotation that can be added to
// Certificate resources.
// If it is present, a temporary internally signed certificate will be
// stored in the target Secret resource whilst the real Issuer is processing
// the certificate request.
IssueTemporaryCertificateAnnotation = "cert-manager.io/issue-temporary-certificate"
)
// Common/known resource kinds.
const (
ClusterIssuerKind = "ClusterIssuer"
IssuerKind = "Issuer"
CertificateKind = "Certificate"
CertificateRequestKind = "CertificateRequest"
)
const (
// WantInjectAnnotation is the annotation that specifies that a particular
// object wants injection of CAs. It takes the form of a reference to a certificate
// as namespace/name. The certificate is expected to have the is-serving-for annotations.
WantInjectAnnotation = "cert-manager.io/inject-ca-from"
// WantInjectAPIServerCAAnnotation, if set to "true", will make the cainjector
// inject the CA certificate for the Kubernetes apiserver into the resource.
// It discovers the apiserver's CA by inspecting the service account credentials
// mounted into the cainjector pod.
WantInjectAPIServerCAAnnotation = "cert-manager.io/inject-apiserver-ca"
// WantInjectFromSecretAnnotation is the annotation that specifies that a particular
// object wants injection of CAs. It takes the form of a reference to a Secret
// as namespace/name.
WantInjectFromSecretAnnotation = "cert-manager.io/inject-ca-from-secret"
// AllowsInjectionFromSecretAnnotation is an annotation that must be added
// to Secret resource that want to denote that they can be directly
// injected into injectables that have a `inject-ca-from-secret` annotation.
// If an injectable references a Secret that does NOT have this annotation,
// the cainjector will refuse to inject the secret.
AllowsInjectionFromSecretAnnotation = "cert-manager.io/allow-direct-injection"
)
// Issuer specific Annotations
const (
// VenafiCustomFieldsAnnotationKey is the annotation that passes on JSON encoded custom fields to the Venafi issuer
// This will only work with Venafi TPP v19.3 and higher
// The value is an array with objects containing the name and value keys
// for example: `[{"name": "custom-field", "value": "custom-value"}]`
VenafiCustomFieldsAnnotationKey = "venafi.cert-manager.io/custom-fields"
// VenafiPickupIDAnnotationKey is the annotation key used to record the
// Venafi Pickup ID of a certificate signing request that has been submitted
// to the Venafi API for collection later.
VenafiPickupIDAnnotationKey = "venafi.cert-manager.io/pickup-id"
)
// KeyUsage specifies valid usage contexts for keys.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
// Valid KeyUsage values are as follows:
// "signing",
// "digital signature",
// "content commitment",
// "key encipherment",
// "key agreement",
// "data encipherment",
// "cert sign",
// "crl sign",
// "encipher only",
// "decipher only",
// "any",
// "server auth",
// "client auth",
// "code signing",
// "email protection",
// "s/mime",
// "ipsec end system",
// "ipsec tunnel",
// "ipsec user",
// "timestamping",
// "ocsp signing",
// "microsoft sgc",
// "netscape sgc"
// +kubebuilder:validation:Enum="signing";"digital signature";"content commitment";"key encipherment";"key agreement";"data encipherment";"cert sign";"crl sign";"encipher only";"decipher only";"any";"server auth";"client auth";"code signing";"email protection";"s/mime";"ipsec end system";"ipsec tunnel";"ipsec user";"timestamping";"ocsp signing";"microsoft sgc";"netscape sgc"
type KeyUsage string
const (
UsageSigning KeyUsage = "signing"
UsageDigitalSignature KeyUsage = "digital signature"
UsageContentCommittment KeyUsage = "content commitment"
UsageKeyEncipherment KeyUsage = "key encipherment"
UsageKeyAgreement KeyUsage = "key agreement"
UsageDataEncipherment KeyUsage = "data encipherment"
UsageCertSign KeyUsage = "cert sign"
UsageCRLSign KeyUsage = "crl sign"
UsageEncipherOnly KeyUsage = "encipher only"
UsageDecipherOnly KeyUsage = "decipher only"
UsageAny KeyUsage = "any"
UsageServerAuth KeyUsage = "server auth"
UsageClientAuth KeyUsage = "client auth"
UsageCodeSigning KeyUsage = "code signing"
UsageEmailProtection KeyUsage = "email protection"
UsageSMIME KeyUsage = "s/mime"
UsageIPsecEndSystem KeyUsage = "ipsec end system"
UsageIPsecTunnel KeyUsage = "ipsec tunnel"
UsageIPsecUser KeyUsage = "ipsec user"
UsageTimestamping KeyUsage = "timestamping"
UsageOCSPSigning KeyUsage = "ocsp signing"
UsageMicrosoftSGC KeyUsage = "microsoft sgc"
UsageNetscapeSGC KeyUsage = "netscape sgc"
)
// DefaultKeyUsages contains the default list of key usages
func DefaultKeyUsages() []KeyUsage {
// The serverAuth EKU is required as of Mac OS Catalina: https://support.apple.com/en-us/HT210176
// Without this usage, certificates will _always_ flag a warning in newer Mac OS browsers.
// We don't explicitly add it here as it leads to strange behaviour when a user sets isCA: true
// (in which case, 'serverAuth' on the CA can break a lot of clients).
// CAs can (and often do) opt to automatically add usages.
return []KeyUsage{UsageDigitalSignature, UsageKeyEncipherment}
}
| 1 | 26,919 | Can we move these into some kind of experimental API group? I don't think we want to call them `v1` just yet :) | jetstack-cert-manager | go |
@@ -31,6 +31,8 @@ class Section < ActiveRecord::Base
include ActsAsSortable
include VersionableModel
+ # Sort order: Number ASC
+ default_scope { order(number: :asc) }
# ================
# = Associations = | 1 | # frozen_string_literal: true
# == Schema Information
#
# Table name: sections
#
# id :integer not null, primary key
# description :text
# modifiable :boolean
# number :integer
# title :string
# created_at :datetime
# updated_at :datetime
# phase_id :integer
# versionable_id :string(36)
#
# Indexes
#
# index_sections_on_phase_id (phase_id)
# index_sections_on_versionable_id (versionable_id)
#
# Foreign Keys
#
# fk_rails_... (phase_id => phases.id)
#
class Section < ActiveRecord::Base
include ValidationMessages
include ValidationValues
include ActsAsSortable
include VersionableModel
# ================
# = Associations =
# ================
belongs_to :phase
belongs_to :organisation
has_many :questions, dependent: :destroy
has_one :template, through: :phase
# ===============
# = Validations =
# ===============
validates :phase, presence: { message: PRESENCE_MESSAGE }
validates :title, presence: { message: PRESENCE_MESSAGE }
# validates :description, presence: { message: PRESENCE_MESSAGE }
validates :number, presence: { message: PRESENCE_MESSAGE },
uniqueness: { scope: :phase_id,
message: UNIQUENESS_MESSAGE }
validates :modifiable, inclusion: { in: BOOLEAN_VALUES,
message: INCLUSION_MESSAGE }
# =============
# = Callbacks =
# =============
# TODO: Move this down to DB constraints
before_validation :set_modifiable
before_validation :set_number, if: :phase_id_changed?
# =====================
# = Nested Attributes =
# =====================
accepts_nested_attributes_for :questions,
reject_if: -> (a) { a[:text].blank? },
allow_destroy: true
# ==========
# = Scopes =
# ==========
# The sections for this Phase that have been added by the admin
#
# Returns ActiveRecord::Relation
scope :modifiable, -> { where(modifiable: true) }
# The sections for this Phase that were part of the original Template
#
# Returns ActiveRecord::Relation
scope :not_modifiable, -> { where(modifiable: false) }
# ===========================
# = Public instance methods =
# ===========================
# The title of the Section
#
# Returns String
def to_s
"#{title}"
end
# Returns the number of answered questions for a given plan
def num_answered_questions(plan)
return 0 if plan.nil?
plan.answers.includes({ question: :question_format }, :question_options)
.where(question_id: question_ids)
.to_a
.count(&:is_valid?)
end
def deep_copy(**options)
copy = self.dup
copy.modifiable = options.fetch(:modifiable, self.modifiable)
copy.phase_id = options.fetch(:phase_id, nil)
copy.save!(validate: false) if options.fetch(:save, false)
options[:section_id] = copy.id
self.questions.map { |question| copy.questions << question.deep_copy(options) }
copy
end
# Can't be modified as it was duplicatd over from another Phase.
def unmodifiable?
!modifiable?
end
private
# ============================
# = Private instance methods =
# ============================
def set_modifiable
self.modifiable = true if modifiable.nil?
end
def set_number
return if phase.nil?
self.number = phase.sections.where.not(id: id).maximum(:number).to_i + 1
end
end
| 1 | 18,781 | I think this makes a lot of sense but we may want to highlight the change for people doing UAT in case the ordering of sections is off anywhere when customizing or using the drag-drop feature | DMPRoadmap-roadmap | rb |
@@ -0,0 +1,4 @@
+var script = document.createElement('script')
+script.type = 'text/javascript'
+script.src = '{}'
+document.head.appendChild(script) | 1 | 1 | 18,012 | These files should in `/javascript/brython` | SeleniumHQ-selenium | rb |
|
@@ -265,7 +265,7 @@ runLoop:
continue
}
s.close(err)
- break runLoop
+ continue
}
// This is a bit unclean, but works properly, since the packet always
// begins with the public header and we never copy it. | 1 | package quic
import (
"errors"
"fmt"
"net"
"sync/atomic"
"time"
"github.com/lucas-clemente/quic-go/ackhandler"
"github.com/lucas-clemente/quic-go/congestion"
"github.com/lucas-clemente/quic-go/flowcontrol"
"github.com/lucas-clemente/quic-go/frames"
"github.com/lucas-clemente/quic-go/handshake"
"github.com/lucas-clemente/quic-go/protocol"
"github.com/lucas-clemente/quic-go/qerr"
"github.com/lucas-clemente/quic-go/utils"
)
type unpacker interface {
Unpack(publicHeaderBinary []byte, hdr *PublicHeader, data []byte) (*unpackedPacket, error)
}
type receivedPacket struct {
remoteAddr net.Addr
publicHeader *PublicHeader
data []byte
rcvTime time.Time
}
var (
errRstStreamOnInvalidStream = errors.New("RST_STREAM received for unknown stream")
errWindowUpdateOnClosedStream = errors.New("WINDOW_UPDATE received for an already closed stream")
errSessionAlreadyClosed = errors.New("cannot close session; it was already closed before")
)
// cryptoChangeCallback is called every time the encryption level changes
// Once the callback has been called with isForwardSecure = true, it is guarantueed to not be called with isForwardSecure = false after that
type cryptoChangeCallback func(session Session, isForwardSecure bool)
type closeError struct {
err error
remote bool
}
// A Session is a QUIC session
type session struct {
connectionID protocol.ConnectionID
perspective protocol.Perspective
version protocol.VersionNumber
config *Config
cryptoChangeCallback cryptoChangeCallback
conn connection
streamsMap *streamsMap
rttStats *congestion.RTTStats
sentPacketHandler ackhandler.SentPacketHandler
receivedPacketHandler ackhandler.ReceivedPacketHandler
streamFramer *streamFramer
flowControlManager flowcontrol.FlowControlManager
unpacker unpacker
packer *packetPacker
cryptoSetup handshake.CryptoSetup
receivedPackets chan *receivedPacket
sendingScheduled chan struct{}
// closeChan is used to notify the run loop that it should terminate.
closeChan chan closeError
runClosed chan struct{}
closed uint32 // atomic bool
// when we receive too many undecryptable packets during the handshake, we send a Public reset
// but only after a time of protocol.PublicResetTimeout has passed
undecryptablePackets []*receivedPacket
receivedTooManyUndecrytablePacketsTime time.Time
// this channel is passed to the CryptoSetup and receives the current encryption level
// it is closed as soon as the handshake is complete
aeadChanged <-chan protocol.EncryptionLevel
handshakeComplete bool
nextAckScheduledTime time.Time
connectionParameters handshake.ConnectionParametersManager
lastRcvdPacketNumber protocol.PacketNumber
// Used to calculate the next packet number from the truncated wire
// representation, and sent back in public reset packets
largestRcvdPacketNumber protocol.PacketNumber
sessionCreationTime time.Time
lastNetworkActivityTime time.Time
timer *time.Timer
currentDeadline time.Time
timerRead bool
}
var _ Session = &session{}
// newSession makes a new session
func newSession(
conn connection,
v protocol.VersionNumber,
connectionID protocol.ConnectionID,
sCfg *handshake.ServerConfig,
cryptoChangeCallback cryptoChangeCallback,
config *Config,
) (packetHandler, error) {
s := &session{
conn: conn,
connectionID: connectionID,
perspective: protocol.PerspectiveServer,
version: v,
config: config,
cryptoChangeCallback: cryptoChangeCallback,
connectionParameters: handshake.NewConnectionParamatersManager(protocol.PerspectiveServer, v),
}
s.setup()
cryptoStream, _ := s.GetOrOpenStream(1)
_, _ = s.AcceptStream() // don't expose the crypto stream
var sourceAddr []byte
if udpAddr, ok := conn.RemoteAddr().(*net.UDPAddr); ok {
sourceAddr = udpAddr.IP
} else {
sourceAddr = []byte(conn.RemoteAddr().String())
}
aeadChanged := make(chan protocol.EncryptionLevel, 2)
s.aeadChanged = aeadChanged
var err error
s.cryptoSetup, err = handshake.NewCryptoSetup(connectionID, sourceAddr, v, sCfg, cryptoStream, s.connectionParameters, config.Versions, aeadChanged)
if err != nil {
return nil, err
}
s.packer = newPacketPacker(connectionID, s.cryptoSetup, s.connectionParameters, s.streamFramer, s.perspective, s.version)
s.unpacker = &packetUnpacker{aead: s.cryptoSetup, version: s.version}
return s, err
}
func newClientSession(
conn connection,
hostname string,
v protocol.VersionNumber,
connectionID protocol.ConnectionID,
cryptoChangeCallback cryptoChangeCallback,
config *Config,
negotiatedVersions []protocol.VersionNumber,
) (*session, error) {
s := &session{
conn: conn,
connectionID: connectionID,
perspective: protocol.PerspectiveClient,
version: v,
config: config,
cryptoChangeCallback: cryptoChangeCallback,
connectionParameters: handshake.NewConnectionParamatersManager(protocol.PerspectiveClient, v),
}
s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.ackAlarmChanged)
s.setup()
aeadChanged := make(chan protocol.EncryptionLevel, 2)
s.aeadChanged = aeadChanged
cryptoStream, _ := s.OpenStream()
var err error
s.cryptoSetup, err = handshake.NewCryptoSetupClient(
hostname,
connectionID,
v,
cryptoStream,
config.TLSConfig,
s.connectionParameters,
aeadChanged,
&handshake.TransportParameters{RequestConnectionIDTruncation: config.RequestConnectionIDTruncation},
negotiatedVersions,
)
if err != nil {
return nil, err
}
s.packer = newPacketPacker(connectionID, s.cryptoSetup, s.connectionParameters, s.streamFramer, s.perspective, s.version)
s.unpacker = &packetUnpacker{aead: s.cryptoSetup, version: s.version}
return s, err
}
// setup is called from newSession and newClientSession and initializes values that are independent of the perspective
func (s *session) setup() {
s.rttStats = &congestion.RTTStats{}
flowControlManager := flowcontrol.NewFlowControlManager(s.connectionParameters, s.rttStats)
sentPacketHandler := ackhandler.NewSentPacketHandler(s.rttStats)
now := time.Now()
s.sentPacketHandler = sentPacketHandler
s.flowControlManager = flowControlManager
s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.ackAlarmChanged)
s.receivedPackets = make(chan *receivedPacket, protocol.MaxSessionUnprocessedPackets)
s.closeChan = make(chan closeError, 1)
s.sendingScheduled = make(chan struct{}, 1)
s.undecryptablePackets = make([]*receivedPacket, 0, protocol.MaxUndecryptablePackets)
s.aeadChanged = make(chan protocol.EncryptionLevel, 2)
s.runClosed = make(chan struct{})
s.timer = time.NewTimer(0)
s.lastNetworkActivityTime = now
s.sessionCreationTime = now
s.streamsMap = newStreamsMap(s.newStream, s.perspective, s.connectionParameters)
s.streamFramer = newStreamFramer(s.streamsMap, s.flowControlManager)
}
// run the session main loop
func (s *session) run() error {
// Start the crypto stream handler
go func() {
if err := s.cryptoSetup.HandleCryptoStream(); err != nil {
s.Close(err)
}
}()
var closeErr closeError
aeadChanged := s.aeadChanged
runLoop:
for {
// Close immediately if requested
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
s.maybeResetTimer()
select {
case closeErr = <-s.closeChan:
break runLoop
case <-s.timer.C:
s.timerRead = true
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case <-s.sendingScheduled:
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case p := <-s.receivedPackets:
err := s.handlePacketImpl(p)
if err != nil {
if qErr, ok := err.(*qerr.QuicError); ok && qErr.ErrorCode == qerr.DecryptionFailure {
s.tryQueueingUndecryptablePacket(p)
continue
}
s.close(err)
break runLoop
}
// This is a bit unclean, but works properly, since the packet always
// begins with the public header and we never copy it.
putPacketBuffer(p.publicHeader.Raw)
case l, ok := <-aeadChanged:
if !ok {
s.handshakeComplete = true
aeadChanged = nil // prevent this case from ever being selected again
} else {
if l == protocol.EncryptionForwardSecure {
s.packer.SetForwardSecure()
}
s.tryDecryptingQueuedPackets()
s.cryptoChangeCallback(s, l == protocol.EncryptionForwardSecure)
}
}
now := time.Now()
if s.sentPacketHandler.GetAlarmTimeout().Before(now) {
// This could cause packets to be retransmitted, so check it before trying
// to send packets.
s.sentPacketHandler.OnAlarm()
}
if err := s.sendPacket(); err != nil {
s.close(err)
}
if !s.receivedTooManyUndecrytablePacketsTime.IsZero() && s.receivedTooManyUndecrytablePacketsTime.Add(protocol.PublicResetTimeout).Before(now) && len(s.undecryptablePackets) != 0 {
s.close(qerr.Error(qerr.DecryptionFailure, "too many undecryptable packets received"))
}
if now.Sub(s.lastNetworkActivityTime) >= s.idleTimeout() {
s.close(qerr.Error(qerr.NetworkIdleTimeout, "No recent network activity."))
}
if !s.handshakeComplete && now.Sub(s.sessionCreationTime) >= protocol.MaxTimeForCryptoHandshake {
s.close(qerr.Error(qerr.NetworkIdleTimeout, "Crypto handshake did not complete in time."))
}
s.garbageCollectStreams()
}
s.handleCloseError(closeErr)
close(s.runClosed)
return closeErr.err
}
func (s *session) maybeResetTimer() {
nextDeadline := s.lastNetworkActivityTime.Add(s.idleTimeout())
if !s.nextAckScheduledTime.IsZero() {
nextDeadline = utils.MinTime(nextDeadline, s.nextAckScheduledTime)
}
if lossTime := s.sentPacketHandler.GetAlarmTimeout(); !lossTime.IsZero() {
nextDeadline = utils.MinTime(nextDeadline, lossTime)
}
if !s.handshakeComplete {
handshakeDeadline := s.sessionCreationTime.Add(protocol.MaxTimeForCryptoHandshake)
nextDeadline = utils.MinTime(nextDeadline, handshakeDeadline)
}
if !s.receivedTooManyUndecrytablePacketsTime.IsZero() {
nextDeadline = utils.MinTime(nextDeadline, s.receivedTooManyUndecrytablePacketsTime.Add(protocol.PublicResetTimeout))
}
if nextDeadline.Equal(s.currentDeadline) {
// No need to reset the timer
return
}
// We need to drain the timer if the value from its channel was not read yet.
// See https://groups.google.com/forum/#!topic/golang-dev/c9UUfASVPoU
if !s.timer.Stop() && !s.timerRead {
<-s.timer.C
}
s.timer.Reset(nextDeadline.Sub(time.Now()))
s.timerRead = false
s.currentDeadline = nextDeadline
}
func (s *session) idleTimeout() time.Duration {
if s.handshakeComplete {
return s.connectionParameters.GetIdleConnectionStateLifetime()
}
return protocol.InitialIdleTimeout
}
func (s *session) handlePacketImpl(p *receivedPacket) error {
if s.perspective == protocol.PerspectiveClient {
diversificationNonce := p.publicHeader.DiversificationNonce
if len(diversificationNonce) > 0 {
s.cryptoSetup.SetDiversificationNonce(diversificationNonce)
}
}
if p.rcvTime.IsZero() {
// To simplify testing
p.rcvTime = time.Now()
}
s.lastNetworkActivityTime = p.rcvTime
hdr := p.publicHeader
data := p.data
// Calculate packet number
hdr.PacketNumber = protocol.InferPacketNumber(
hdr.PacketNumberLen,
s.largestRcvdPacketNumber,
hdr.PacketNumber,
)
packet, err := s.unpacker.Unpack(hdr.Raw, hdr, data)
if utils.Debug() {
if err != nil {
utils.Debugf("<- Reading packet 0x%x (%d bytes) for connection %x @ %s", hdr.PacketNumber, len(data)+len(hdr.Raw), hdr.ConnectionID, time.Now().Format("15:04:05.000"))
} else {
utils.Debugf("<- Reading packet 0x%x (%d bytes) for connection %x, %s @ %s", hdr.PacketNumber, len(data)+len(hdr.Raw), hdr.ConnectionID, packet.encryptionLevel, time.Now().Format("15:04:05.000"))
}
}
// if the decryption failed, this might be a packet sent by an attacker
// don't update the remote address
if quicErr, ok := err.(*qerr.QuicError); ok && quicErr.ErrorCode == qerr.DecryptionFailure {
return err
}
if s.perspective == protocol.PerspectiveServer {
// update the remote address, even if unpacking failed for any other reason than a decryption error
s.conn.SetCurrentRemoteAddr(p.remoteAddr)
}
if err != nil {
return err
}
s.lastRcvdPacketNumber = hdr.PacketNumber
// Only do this after decrypting, so we are sure the packet is not attacker-controlled
s.largestRcvdPacketNumber = utils.MaxPacketNumber(s.largestRcvdPacketNumber, hdr.PacketNumber)
err = s.receivedPacketHandler.ReceivedPacket(hdr.PacketNumber, packet.IsRetransmittable())
// ignore duplicate packets
if err == ackhandler.ErrDuplicatePacket {
utils.Infof("Ignoring packet 0x%x due to ErrDuplicatePacket", hdr.PacketNumber)
return nil
}
// ignore packets with packet numbers smaller than the LeastUnacked of a StopWaiting
if err == ackhandler.ErrPacketSmallerThanLastStopWaiting {
utils.Infof("Ignoring packet 0x%x due to ErrPacketSmallerThanLastStopWaiting", hdr.PacketNumber)
return nil
}
if err != nil {
return err
}
return s.handleFrames(packet.frames)
}
func (s *session) handleFrames(fs []frames.Frame) error {
for _, ff := range fs {
var err error
frames.LogFrame(ff, false)
switch frame := ff.(type) {
case *frames.StreamFrame:
err = s.handleStreamFrame(frame)
case *frames.AckFrame:
err = s.handleAckFrame(frame)
case *frames.ConnectionCloseFrame:
s.registerClose(qerr.Error(frame.ErrorCode, frame.ReasonPhrase), true)
case *frames.GoawayFrame:
err = errors.New("unimplemented: handling GOAWAY frames")
case *frames.StopWaitingFrame:
err = s.receivedPacketHandler.ReceivedStopWaiting(frame)
case *frames.RstStreamFrame:
err = s.handleRstStreamFrame(frame)
case *frames.WindowUpdateFrame:
err = s.handleWindowUpdateFrame(frame)
case *frames.BlockedFrame:
case *frames.PingFrame:
default:
return errors.New("Session BUG: unexpected frame type")
}
if err != nil {
switch err {
case ackhandler.ErrDuplicateOrOutOfOrderAck:
// Can happen e.g. when packets thought missing arrive late
case errRstStreamOnInvalidStream:
// Can happen when RST_STREAMs arrive early or late (?)
utils.Errorf("Ignoring error in session: %s", err.Error())
case errWindowUpdateOnClosedStream:
// Can happen when we already sent the last StreamFrame with the FinBit, but the client already sent a WindowUpdate for this Stream
default:
return err
}
}
}
return nil
}
// handlePacket is called by the server with a new packet
func (s *session) handlePacket(p *receivedPacket) {
// Discard packets once the amount of queued packets is larger than
// the channel size, protocol.MaxSessionUnprocessedPackets
select {
case s.receivedPackets <- p:
default:
}
}
func (s *session) handleStreamFrame(frame *frames.StreamFrame) error {
str, err := s.streamsMap.GetOrOpenStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// Stream is closed and already garbage collected
// ignore this StreamFrame
return nil
}
return str.AddStreamFrame(frame)
}
func (s *session) handleWindowUpdateFrame(frame *frames.WindowUpdateFrame) error {
if frame.StreamID != 0 {
str, err := s.streamsMap.GetOrOpenStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
return errWindowUpdateOnClosedStream
}
}
_, err := s.flowControlManager.UpdateWindow(frame.StreamID, frame.ByteOffset)
return err
}
func (s *session) handleRstStreamFrame(frame *frames.RstStreamFrame) error {
str, err := s.streamsMap.GetOrOpenStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
return errRstStreamOnInvalidStream
}
str.RegisterRemoteError(fmt.Errorf("RST_STREAM received with code %d", frame.ErrorCode))
return s.flowControlManager.ResetStream(frame.StreamID, frame.ByteOffset)
}
func (s *session) handleAckFrame(frame *frames.AckFrame) error {
return s.sentPacketHandler.ReceivedAck(frame, s.lastRcvdPacketNumber, s.lastNetworkActivityTime)
}
func (s *session) registerClose(e error, remoteClose bool) error {
// Only close once
if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
return errSessionAlreadyClosed
}
if e == nil {
e = qerr.PeerGoingAway
}
if e == errCloseSessionForNewVersion {
s.streamsMap.CloseWithError(e)
s.closeStreamsWithError(e)
}
s.closeChan <- closeError{err: e, remote: remoteClose}
return nil
}
// Close the connection. If err is nil it will be set to qerr.PeerGoingAway.
// It waits until the run loop has stopped before returning
func (s *session) Close(e error) error {
err := s.registerClose(e, false)
if err == errSessionAlreadyClosed {
return nil
}
// wait for the run loop to finish
<-s.runClosed
return err
}
// close the connection. Use this when called from the run loop
func (s *session) close(e error) error {
err := s.registerClose(e, false)
if err == errSessionAlreadyClosed {
return nil
}
return err
}
func (s *session) handleCloseError(closeErr closeError) error {
var quicErr *qerr.QuicError
var ok bool
if quicErr, ok = closeErr.err.(*qerr.QuicError); !ok {
quicErr = qerr.ToQuicError(closeErr.err)
}
// Don't log 'normal' reasons
if quicErr.ErrorCode == qerr.PeerGoingAway || quicErr.ErrorCode == qerr.NetworkIdleTimeout {
utils.Infof("Closing connection %x", s.connectionID)
} else {
utils.Errorf("Closing session with error: %s", closeErr.err.Error())
}
if closeErr.err == errCloseSessionForNewVersion {
return nil
}
s.streamsMap.CloseWithError(quicErr)
s.closeStreamsWithError(quicErr)
// If this is a remote close we're done here
if closeErr.remote {
return nil
}
if quicErr.ErrorCode == qerr.DecryptionFailure || quicErr == handshake.ErrHOLExperiment {
return s.sendPublicReset(s.lastRcvdPacketNumber)
}
return s.sendConnectionClose(quicErr)
}
func (s *session) closeStreamsWithError(err error) {
s.streamsMap.Iterate(func(str *stream) (bool, error) {
str.Cancel(err)
return true, nil
})
}
func (s *session) sendPacket() error {
// Repeatedly try sending until we don't have any more data, or run out of the congestion window
for {
if !s.sentPacketHandler.SendingAllowed() {
return nil
}
var controlFrames []frames.Frame
// get WindowUpdate frames
// this call triggers the flow controller to increase the flow control windows, if necessary
windowUpdateFrames := s.getWindowUpdateFrames()
for _, wuf := range windowUpdateFrames {
controlFrames = append(controlFrames, wuf)
}
// check for retransmissions first
for {
retransmitPacket := s.sentPacketHandler.DequeuePacketForRetransmission()
if retransmitPacket == nil {
break
}
utils.Debugf("\tDequeueing retransmission for packet 0x%x", retransmitPacket.PacketNumber)
if retransmitPacket.EncryptionLevel != protocol.EncryptionForwardSecure {
utils.Debugf("\tDequeueing handshake retransmission for packet 0x%x", retransmitPacket.PacketNumber)
stopWaitingFrame := s.sentPacketHandler.GetStopWaitingFrame(true)
var packet *packedPacket
packet, err := s.packer.RetransmitNonForwardSecurePacket(stopWaitingFrame, retransmitPacket)
if err != nil {
return err
}
if packet == nil {
continue
}
err = s.sendPackedPacket(packet)
if err != nil {
return err
}
continue
} else {
// resend the frames that were in the packet
for _, frame := range retransmitPacket.GetFramesForRetransmission() {
switch frame.(type) {
case *frames.StreamFrame:
s.streamFramer.AddFrameForRetransmission(frame.(*frames.StreamFrame))
case *frames.WindowUpdateFrame:
// only retransmit WindowUpdates if the stream is not yet closed and the we haven't sent another WindowUpdate with a higher ByteOffset for the stream
var currentOffset protocol.ByteCount
f := frame.(*frames.WindowUpdateFrame)
currentOffset, err := s.flowControlManager.GetReceiveWindow(f.StreamID)
if err == nil && f.ByteOffset >= currentOffset {
controlFrames = append(controlFrames, frame)
}
default:
controlFrames = append(controlFrames, frame)
}
}
}
}
ack := s.receivedPacketHandler.GetAckFrame()
if ack != nil {
controlFrames = append(controlFrames, ack)
}
hasRetransmission := s.streamFramer.HasFramesForRetransmission()
var stopWaitingFrame *frames.StopWaitingFrame
if ack != nil || hasRetransmission {
stopWaitingFrame = s.sentPacketHandler.GetStopWaitingFrame(hasRetransmission)
}
packet, err := s.packer.PackPacket(stopWaitingFrame, controlFrames, s.sentPacketHandler.GetLeastUnacked())
if err != nil {
return err
}
if packet == nil {
return nil
}
// send every window update twice
for _, f := range windowUpdateFrames {
s.packer.QueueControlFrameForNextPacket(f)
}
err = s.sendPackedPacket(packet)
if err != nil {
return err
}
s.nextAckScheduledTime = time.Time{}
}
}
func (s *session) sendPackedPacket(packet *packedPacket) error {
err := s.sentPacketHandler.SentPacket(&ackhandler.Packet{
PacketNumber: packet.number,
Frames: packet.frames,
Length: protocol.ByteCount(len(packet.raw)),
EncryptionLevel: packet.encryptionLevel,
})
if err != nil {
return err
}
s.logPacket(packet)
err = s.conn.Write(packet.raw)
putPacketBuffer(packet.raw)
return err
}
func (s *session) sendConnectionClose(quicErr *qerr.QuicError) error {
packet, err := s.packer.PackConnectionClose(&frames.ConnectionCloseFrame{ErrorCode: quicErr.ErrorCode, ReasonPhrase: quicErr.ErrorMessage}, s.sentPacketHandler.GetLeastUnacked())
if err != nil {
return err
}
if packet == nil {
return errors.New("Session BUG: expected packet not to be nil")
}
s.logPacket(packet)
return s.conn.Write(packet.raw)
}
func (s *session) logPacket(packet *packedPacket) {
if !utils.Debug() {
// We don't need to allocate the slices for calling the format functions
return
}
if utils.Debug() {
utils.Debugf("-> Sending packet 0x%x (%d bytes), %s, @ %s", packet.number, len(packet.raw), packet.encryptionLevel, time.Now().Format("15:04:05.000"))
for _, frame := range packet.frames {
frames.LogFrame(frame, true)
}
}
}
// GetOrOpenStream either returns an existing stream, a newly opened stream, or nil if a stream with the provided ID is already closed.
// Newly opened streams should only originate from the client. To open a stream from the server, OpenStream should be used.
func (s *session) GetOrOpenStream(id protocol.StreamID) (Stream, error) {
str, err := s.streamsMap.GetOrOpenStream(id)
if str != nil {
return str, err
}
// make sure to return an actual nil value here, not an Stream with value nil
return nil, err
}
// AcceptStream returns the next stream openend by the peer
func (s *session) AcceptStream() (Stream, error) {
return s.streamsMap.AcceptStream()
}
// OpenStream opens a stream
func (s *session) OpenStream() (Stream, error) {
return s.streamsMap.OpenStream()
}
func (s *session) OpenStreamSync() (Stream, error) {
return s.streamsMap.OpenStreamSync()
}
func (s *session) queueResetStreamFrame(id protocol.StreamID, offset protocol.ByteCount) {
s.packer.QueueControlFrameForNextPacket(&frames.RstStreamFrame{
StreamID: id,
ByteOffset: offset,
})
s.scheduleSending()
}
func (s *session) newStream(id protocol.StreamID) (*stream, error) {
stream, err := newStream(id, s.scheduleSending, s.queueResetStreamFrame, s.flowControlManager)
if err != nil {
return nil, err
}
// TODO: find a better solution for determining which streams contribute to connection level flow control
if id == 1 || id == 3 {
s.flowControlManager.NewStream(id, false)
} else {
s.flowControlManager.NewStream(id, true)
}
return stream, nil
}
// garbageCollectStreams goes through all streams and removes EOF'ed streams
// from the streams map.
func (s *session) garbageCollectStreams() {
s.streamsMap.Iterate(func(str *stream) (bool, error) {
id := str.StreamID()
if str.finished() {
err := s.streamsMap.RemoveStream(id)
if err != nil {
return false, err
}
s.flowControlManager.RemoveStream(id)
}
return true, nil
})
}
func (s *session) sendPublicReset(rejectedPacketNumber protocol.PacketNumber) error {
utils.Infof("Sending public reset for connection %x, packet number %d", s.connectionID, rejectedPacketNumber)
return s.conn.Write(writePublicReset(s.connectionID, rejectedPacketNumber, 0))
}
// scheduleSending signals that we have data for sending
func (s *session) scheduleSending() {
select {
case s.sendingScheduled <- struct{}{}:
default:
}
}
func (s *session) tryQueueingUndecryptablePacket(p *receivedPacket) {
if s.handshakeComplete {
return
}
if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets {
// if this is the first time the undecryptablePackets runs full, start the timer to send a Public Reset
if s.receivedTooManyUndecrytablePacketsTime.IsZero() {
s.receivedTooManyUndecrytablePacketsTime = time.Now()
s.maybeResetTimer()
}
utils.Infof("Dropping undecrytable packet 0x%x (undecryptable packet queue full)", p.publicHeader.PacketNumber)
return
}
utils.Infof("Queueing packet 0x%x for later decryption", p.publicHeader.PacketNumber)
s.undecryptablePackets = append(s.undecryptablePackets, p)
}
func (s *session) tryDecryptingQueuedPackets() {
for _, p := range s.undecryptablePackets {
s.handlePacket(p)
}
s.undecryptablePackets = s.undecryptablePackets[:0]
}
func (s *session) getWindowUpdateFrames() []*frames.WindowUpdateFrame {
updates := s.flowControlManager.GetWindowUpdates()
res := make([]*frames.WindowUpdateFrame, len(updates))
for i, u := range updates {
res[i] = &frames.WindowUpdateFrame{StreamID: u.StreamID, ByteOffset: u.Offset}
}
return res
}
func (s *session) ackAlarmChanged(t time.Time) {
s.nextAckScheduledTime = t
s.maybeResetTimer()
}
func (s *session) LocalAddr() net.Addr {
return s.conn.LocalAddr()
}
// RemoteAddr returns the net.Addr of the client
func (s *session) RemoteAddr() net.Addr {
return s.conn.RemoteAddr()
}
| 1 | 6,121 | I'm not sure this is the right fix - I'd be more happy with a `continue`. That way, we don't enter the code at the bottom of the run loop (e.g. sending packets). Keep in mind that this error here may be triggered by a peer doing something security-relevant, so I don't think we should do much more work other than sending the close frame. | lucas-clemente-quic-go | go |
@@ -107,7 +107,11 @@ bool load_model::load_model_weights(const std::string& ckpt_dir,
closedir(weight_dir);
// load weights that appear in weight list.
- m->reload_weights(active_ckpt_dir, weight_list);
+ // load weights that appear in weight list.
+ // for(auto&& w : m_weights) {
+ // w->load_from_save(latest,weight_list);
+ // }
+ // m->reload_weights(active_ckpt_dir, weight_list);
return true;
}
| 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// load_model .hpp .cpp - Callbacks to load pretrained model(s)
////////////////////////////////////////////////////////////////////////////////
#include "lbann/callbacks/load_model.hpp"
#include "lbann/callbacks/checkpoint.hpp"
#include "lbann/training_algorithms/training_algorithm.hpp"
#include <callbacks.pb.h>
#include <model.pb.h>
#include <unistd.h>
#include <dirent.h>
#include <cstdlib>
#include <fstream>
#include <string>
namespace lbann {
namespace callback {
void load_model::on_train_begin(model *m) {
if(!m_loaded) {
for (const auto& d : m_dirs) {
m_loaded = load_model_weights(d, "", m, true);
if(!m_loaded) LBANN_ERROR("Unable to reload model on train begin");
}
}
}
void load_model::on_test_begin(model *m) {
if(!m_loaded) {
for (const auto& d : m_dirs) {
m_loaded = load_model_weights(d, "", m, true);
if(!m_loaded) LBANN_ERROR("Unable to reload model on test begin");
}
}
}
bool load_model::load_model_weights(const std::string& ckpt_dir,
const std::string& alg_name,
model *m,
bool ckptdir_is_fullpath) {
std::vector<std::string> weight_list = std::vector<std::string>();
std::string active_ckpt_dir;
if(ckptdir_is_fullpath) {
active_ckpt_dir = add_delimiter(ckpt_dir);
}else {
size_t epochLast = std::numeric_limits<size_t>::max();;
size_t stepLast = std::numeric_limits<size_t>::max();;
execution_mode mode = execution_mode::invalid;
active_ckpt_dir = get_last_shared_checkpoint_filename(alg_name, ckpt_dir);
// get last epoch and step saved.
int success = read_latest(active_ckpt_dir, &mode, &epochLast, &stepLast);
if(!success) {
LBANN_WARNING("Unable to find the latest checkpoint ", active_ckpt_dir);
return false;
}
active_ckpt_dir = get_shared_checkpoint_dirname(alg_name, ckpt_dir, mode, epochLast, stepLast) + m->get_name() + '/';
}
lbann_comm *comm = m->get_comm();
if(comm->am_trainer_master()) {
std::cout << "Loading model weights from " << active_ckpt_dir << std::endl;
}
DIR *weight_dir = opendir(active_ckpt_dir.c_str());
if(weight_dir == nullptr)
{
LBANN_WARNING("error opening ", active_ckpt_dir);
return false;
}
// Populate weight list
struct dirent *weight_file;
while ((weight_file = readdir(weight_dir)) != nullptr){
if(!strncmp(weight_file->d_name,"model_weights_",14))
weight_list.push_back(std::string(weight_file->d_name));
}
closedir(weight_dir);
// load weights that appear in weight list.
m->reload_weights(active_ckpt_dir, weight_list);
return true;
}
std::unique_ptr<callback_base>
build_load_model_callback_from_pbuf(
const google::protobuf::Message& proto_msg, const std::shared_ptr<lbann_summary>&) {
const auto& params =
dynamic_cast<const lbann_data::Callback::CallbackLoadModel&>(proto_msg);
if(params.extension().size() != 0) {
return make_unique<load_model>(
parse_list<std::string>(params.dirs()),
params.extension());
}
else {
return make_unique<load_model>(
parse_list<std::string>(params.dirs()));
}
}
} // namespace callback
} // namespace lbann
| 1 | 15,934 | Can we fix this, or is it going to stay broken. | LLNL-lbann | cpp |
@@ -57,7 +57,6 @@ except Exception:
# to avoid copying:
-@attr('postgres')
class CopyToTestDB(postgres.CopyToTable):
host = host
database = database | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from helpers import unittest
from nose.plugins.attrib import attr
import luigi
import luigi.notifications
from luigi import postgres
"""
Typical use cases that should be tested:
* Daily overwrite of all data in table
* Daily inserts of new segment in table
* (Daily insertion/creation of new table)
* Daily insertion of multiple (different) new segments into table
"""
host = 'localhost'
database = 'spotify'
user = os.getenv('POSTGRES_USER', 'spotify')
password = 'guest'
try:
import psycopg2
conn = psycopg2.connect(
user=user,
host=host,
database=database,
password=password,
)
conn.close()
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
except Exception:
raise unittest.SkipTest('Unable to connect to postgres')
# to avoid copying:
@attr('postgres')
class CopyToTestDB(postgres.CopyToTable):
host = host
database = database
user = user
password = password
class TestPostgresTask(CopyToTestDB):
table = 'test_table'
columns = (('test_text', 'text'),
('test_int', 'int'),
('test_float', 'float'))
def create_table(self, connection):
connection.cursor().execute(
"CREATE TABLE {table} (id SERIAL PRIMARY KEY, test_text TEXT, test_int INT, test_float FLOAT)"
.format(table=self.table))
def rows(self):
yield 'foo', 123, 123.45
yield None, '-100', '5143.213'
yield '\t\n\r\\N', 0, 0
yield u'éцү我', 0, 0
yield '', 0, r'\N' # Test working default null charcter
class MetricBase(CopyToTestDB):
table = 'metrics'
columns = [('metric', 'text'),
('value', 'int')
]
class Metric1(MetricBase):
param = luigi.Parameter()
def rows(self):
yield 'metric1', 1
yield 'metric1', 2
yield 'metric1', 3
class Metric2(MetricBase):
param = luigi.Parameter()
def rows(self):
yield 'metric2', 1
yield 'metric2', 4
yield 'metric2', 3
class TestPostgresImportTask(unittest.TestCase):
def test_default_escape(self):
self.assertEqual(postgres.default_escape('foo'), 'foo')
self.assertEqual(postgres.default_escape('\n'), '\\n')
self.assertEqual(postgres.default_escape('\\\n'), '\\\\\\n')
self.assertEqual(postgres.default_escape('\n\r\\\t\\N\\'),
'\\n\\r\\\\\\t\\\\N\\\\')
def test_repeat(self):
task = TestPostgresTask()
conn = task.output().connect()
conn.autocommit = True
cursor = conn.cursor()
cursor.execute('DROP TABLE IF EXISTS {table}'.format(table=task.table))
cursor.execute('DROP TABLE IF EXISTS {marker_table}'.format(marker_table=postgres.PostgresTarget.marker_table))
luigi.build([task], local_scheduler=True)
luigi.build([task], local_scheduler=True) # try to schedule twice
cursor.execute("""SELECT test_text, test_int, test_float
FROM test_table
ORDER BY id ASC""")
rows = tuple(cursor)
self.assertEqual(rows, (
('foo', 123, 123.45),
(None, -100, 5143.213),
('\t\n\r\\N', 0.0, 0),
(u'éцү我', 0, 0),
(u'', 0, None), # Test working default null charcter
))
def test_multimetric(self):
metrics = MetricBase()
conn = metrics.output().connect()
conn.autocommit = True
conn.cursor().execute('DROP TABLE IF EXISTS {table}'.format(table=metrics.table))
conn.cursor().execute('DROP TABLE IF EXISTS {marker_table}'.format(marker_table=postgres.PostgresTarget.marker_table))
luigi.build([Metric1(20), Metric1(21), Metric2("foo")], local_scheduler=True)
cursor = conn.cursor()
cursor.execute('select count(*) from {table}'.format(table=metrics.table))
self.assertEqual(tuple(cursor), ((9,),))
def test_clear(self):
class Metric2Copy(Metric2):
def init_copy(self, connection):
query = "TRUNCATE {0}".format(self.table)
connection.cursor().execute(query)
clearer = Metric2Copy(21)
conn = clearer.output().connect()
conn.autocommit = True
conn.cursor().execute('DROP TABLE IF EXISTS {table}'.format(table=clearer.table))
conn.cursor().execute('DROP TABLE IF EXISTS {marker_table}'.format(marker_table=postgres.PostgresTarget.marker_table))
luigi.build([Metric1(0), Metric1(1)], local_scheduler=True)
luigi.build([clearer], local_scheduler=True)
cursor = conn.cursor()
cursor.execute('select count(*) from {table}'.format(table=clearer.table))
self.assertEqual(tuple(cursor), ((3,),))
| 1 | 13,668 | Is there a reason to remove this attr? | spotify-luigi | py |
@@ -0,0 +1,19 @@
+package config
+
+import (
+ "github.com/kubeedge/beehive/pkg/common/config"
+ "github.com/kubeedge/beehive/pkg/common/log"
+ "github.com/kubeedge/kubeedge/cloud/edgecontroller/pkg/devicecontroller/constants"
+)
+
+// MessageLayer used, context or ssmq, default is context
+var MessageLayer string
+
+func init() {
+ if ml, err := config.CONFIG.GetValue("devicecontroller.message-layer").ToString(); err != nil {
+ MessageLayer = constants.DefaultMessageLayer
+ } else {
+ MessageLayer = ml
+ }
+ log.LOGGER.Infof("message layer: %s", MessageLayer)
+} | 1 | 1 | 9,878 | log message should be started with upper-case word. | kubeedge-kubeedge | go |
|
@@ -17,6 +17,9 @@ const (
// aim to create chunks of 20 bits or about 1MiB on average.
averageBits = 20
+ // default buffer size
+ bufSize = 512 * KiB
+
// MinSize is the minimal size of a chunk.
MinSize = 512 * KiB
// MaxSize is the maximal size of a chunk. | 1 | package chunker
import (
"errors"
"hash"
"io"
"sync"
)
const (
KiB = 1024
MiB = 1024 * KiB
// WindowSize is the size of the sliding window.
windowSize = 64
// aim to create chunks of 20 bits or about 1MiB on average.
averageBits = 20
// MinSize is the minimal size of a chunk.
MinSize = 512 * KiB
// MaxSize is the maximal size of a chunk.
MaxSize = 8 * MiB
splitmask = (1 << averageBits) - 1
)
type tables struct {
out [256]Pol
mod [256]Pol
}
// cache precomputed tables, these are read-only anyway
var cache struct {
entries map[Pol]*tables
sync.Mutex
}
func init() {
cache.entries = make(map[Pol]*tables)
}
// Chunk is one content-dependent chunk of bytes whose end was cut when the
// Rabin Fingerprint had the value stored in Cut.
type Chunk struct {
Start uint
Length uint
Cut uint64
Digest []byte
}
func (c Chunk) Reader(r io.ReaderAt) io.Reader {
return io.NewSectionReader(r, int64(c.Start), int64(c.Length))
}
// Chunker splits content with Rabin Fingerprints.
type Chunker struct {
pol Pol
polShift uint
tables *tables
rd io.Reader
closed bool
window [windowSize]byte
wpos int
buf []byte
bpos uint
bmax uint
start uint
count uint
pos uint
pre uint // wait for this many bytes before start calculating an new chunk
digest uint64
h hash.Hash
}
// New returns a new Chunker based on polynomial p that reads from data from rd
// with bufsize and pass all data to hash along the way.
func New(rd io.Reader, p Pol, bufsize int, hash hash.Hash) *Chunker {
c := &Chunker{
buf: make([]byte, bufsize),
h: hash,
}
c.Reset(rd, p)
return c
}
// Reset restarts a chunker so that it can be reused with a different
// polynomial and reader.
func (c *Chunker) Reset(rd io.Reader, p Pol) {
c.pol = p
c.polShift = uint(p.Deg() - 8)
c.fillTables()
c.rd = rd
for i := 0; i < windowSize; i++ {
c.window[i] = 0
}
c.closed = false
c.digest = 0
c.wpos = 0
c.pos = 0
c.start = 0
c.count = 0
if p != 0 {
c.slide(1)
}
if c.h != nil {
c.h.Reset()
}
// do not start a new chunk unless at least MinSize bytes have been read
c.pre = MinSize - windowSize
}
// Calculate out_table and mod_table for optimization. Must be called only
// once. This implementation uses a cache in the global variable cache.
func (c *Chunker) fillTables() {
// if polynomial hasn't been specified, do not compute anything for now
if c.pol == 0 {
return
}
// test if the tables are cached for this polynomial
cache.Lock()
defer cache.Unlock()
if t, ok := cache.entries[c.pol]; ok {
c.tables = t
return
}
// else create a new entry
c.tables = &tables{}
cache.entries[c.pol] = c.tables
// calculate table for sliding out bytes. The byte to slide out is used as
// the index for the table, the value contains the following:
// out_table[b] = Hash(b || 0 || ... || 0)
// \ windowsize-1 zero bytes /
// To slide out byte b_0 for window size w with known hash
// H := H(b_0 || ... || b_w), it is sufficient to add out_table[b_0]:
// H(b_0 || ... || b_w) + H(b_0 || 0 || ... || 0)
// = H(b_0 + b_0 || b_1 + 0 || ... || b_w + 0)
// = H( 0 || b_1 || ... || b_w)
//
// Afterwards a new byte can be shifted in.
for b := 0; b < 256; b++ {
var h Pol
h = appendByte(h, byte(b), c.pol)
for i := 0; i < windowSize-1; i++ {
h = appendByte(h, 0, c.pol)
}
c.tables.out[b] = h
}
// calculate table for reduction mod Polynomial
k := c.pol.Deg()
for b := 0; b < 256; b++ {
// mod_table[b] = A | B, where A = (b(x) * x^k mod pol) and B = b(x) * x^k
//
// The 8 bits above deg(Polynomial) determine what happens next and so
// these bits are used as a lookup to this table. The value is split in
// two parts: Part A contains the result of the modulus operation, part
// B is used to cancel out the 8 top bits so that one XOR operation is
// enough to reduce modulo Polynomial
c.tables.mod[b] = Pol(uint64(b)<<uint(k)).Mod(c.pol) | (Pol(b) << uint(k))
}
}
// Next returns the position and length of the next chunk of data. If an error
// occurs while reading, the error is returned with a nil chunk. The state of
// the current chunk is undefined. When the last chunk has been returned, all
// subsequent calls yield a nil chunk and an io.EOF error.
func (c *Chunker) Next() (*Chunk, error) {
if c.tables == nil {
return nil, errors.New("polynomial is not set")
}
for {
if c.bpos >= c.bmax {
n, err := io.ReadFull(c.rd, c.buf[:])
if err == io.ErrUnexpectedEOF {
err = nil
}
// io.ReadFull only returns io.EOF when no bytes could be read. If
// this is the case and we're in this branch, there are no more
// bytes to buffer, so this was the last chunk. If a different
// error has occurred, return that error and abandon the current
// chunk.
if err == io.EOF && !c.closed {
c.closed = true
// return current chunk, if any bytes have been processed
if c.count > 0 {
return &Chunk{
Start: c.start,
Length: c.count,
Cut: c.digest,
Digest: c.hashDigest(),
}, nil
}
}
if err != nil {
return nil, err
}
c.bpos = 0
c.bmax = uint(n)
}
// check if bytes have to be dismissed before starting a new chunk
if c.pre > 0 {
n := c.bmax - c.bpos
if c.pre > uint(n) {
c.pre -= uint(n)
c.updateHash(c.buf[c.bpos:c.bmax])
c.count += uint(n)
c.pos += uint(n)
c.bpos = c.bmax
continue
}
c.updateHash(c.buf[c.bpos : c.bpos+c.pre])
c.bpos += c.pre
c.count += c.pre
c.pos += c.pre
c.pre = 0
}
add := c.count
for _, b := range c.buf[c.bpos:c.bmax] {
// inline c.slide(b) and append(b) to increase performance
out := c.window[c.wpos]
c.window[c.wpos] = b
c.digest ^= uint64(c.tables.out[out])
c.wpos = (c.wpos + 1) % windowSize
// c.append(b)
index := c.digest >> c.polShift
c.digest <<= 8
c.digest |= uint64(b)
c.digest ^= uint64(c.tables.mod[index])
// end inline
add++
if add < MinSize {
continue
}
if (c.digest&splitmask) == 0 || add >= MaxSize {
i := add - c.count - 1
c.updateHash(c.buf[c.bpos : c.bpos+uint(i)+1])
c.count = add
c.pos += uint(i) + 1
c.bpos += uint(i) + 1
chunk := &Chunk{
Start: c.start,
Length: c.count,
Cut: c.digest,
Digest: c.hashDigest(),
}
if c.h != nil {
c.h.Reset()
}
// reset chunker, but keep position
pos := c.pos
c.Reset(c.rd, c.pol)
c.pos = pos
c.start = pos
c.pre = MinSize - windowSize
return chunk, nil
}
}
steps := c.bmax - c.bpos
if steps > 0 {
c.updateHash(c.buf[c.bpos : c.bpos+steps])
}
c.count += steps
c.pos += steps
c.bpos = c.bmax
}
}
func (c *Chunker) updateHash(data []byte) {
if c.h != nil {
// the hashes from crypto/sha* do not return an error
_, err := c.h.Write(data)
if err != nil {
panic(err)
}
}
}
func (c *Chunker) hashDigest() []byte {
if c.h == nil {
return nil
}
return c.h.Sum(nil)
}
func (c *Chunker) append(b byte) {
index := c.digest >> c.polShift
c.digest <<= 8
c.digest |= uint64(b)
c.digest ^= uint64(c.tables.mod[index])
}
func (c *Chunker) slide(b byte) {
out := c.window[c.wpos]
c.window[c.wpos] = b
c.digest ^= uint64(c.tables.out[out])
c.wpos = (c.wpos + 1) % windowSize
c.append(b)
}
func appendByte(hash Pol, b byte, pol Pol) Pol {
hash <<= 8
hash |= Pol(b)
return hash.Mod(pol)
}
| 1 | 6,486 | I guess this is not needed anymore now? | restic-restic | go |
@@ -24,7 +24,7 @@
#include "h2o.h"
static ssize_t add_header(h2o_mem_pool_t *pool, h2o_headers_t *headers, h2o_iovec_t *name, const char *orig_name, const char *value,
- size_t value_len, h2o_header_flags_t flags)
+ size_t value_len)
{
h2o_header_t *slot;
| 1 | /*
* Copyright (c) 2014 DeNA Co., Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stddef.h>
#include <stdio.h>
#include "h2o.h"
static ssize_t add_header(h2o_mem_pool_t *pool, h2o_headers_t *headers, h2o_iovec_t *name, const char *orig_name, const char *value,
size_t value_len, h2o_header_flags_t flags)
{
h2o_header_t *slot;
h2o_vector_reserve(pool, headers, headers->size + 1);
slot = headers->entries + headers->size++;
slot->name = name;
slot->value.base = (char *)value;
slot->value.len = value_len;
slot->orig_name = orig_name ? h2o_strdup(pool, orig_name, name->len).base : NULL;
slot->flags = flags;
return headers->size - 1;
}
ssize_t h2o_find_header(const h2o_headers_t *headers, const h2o_token_t *token, ssize_t cursor)
{
for (++cursor; cursor < headers->size; ++cursor) {
if (headers->entries[cursor].name == &token->buf) {
return cursor;
}
}
return -1;
}
ssize_t h2o_find_header_by_str(const h2o_headers_t *headers, const char *name, size_t name_len, ssize_t cursor)
{
for (++cursor; cursor < headers->size; ++cursor) {
h2o_header_t *t = headers->entries + cursor;
if (h2o_memis(t->name->base, t->name->len, name, name_len)) {
return cursor;
}
}
return -1;
}
ssize_t h2o_add_header(h2o_mem_pool_t *pool, h2o_headers_t *headers, const h2o_token_t *token, const char *orig_name,
const char *value, size_t value_len)
{
return add_header(pool, headers, (h2o_iovec_t *)&token->buf, orig_name, value, value_len, (h2o_header_flags_t){0});
}
ssize_t h2o_add_header_by_str(h2o_mem_pool_t *pool, h2o_headers_t *headers, const char *name, size_t name_len, int maybe_token,
const char *orig_name, const char *value, size_t value_len)
{
h2o_iovec_t *name_buf;
if (maybe_token) {
const h2o_token_t *token = h2o_lookup_token(name, name_len);
if (token != NULL) {
return add_header(pool, headers, (h2o_iovec_t *)token, orig_name, value, value_len, (h2o_header_flags_t){0});
}
}
name_buf = h2o_mem_alloc_pool(pool, *name_buf, 1);
name_buf->base = (char *)name;
name_buf->len = name_len;
return add_header(pool, headers, name_buf, orig_name, value, value_len, (h2o_header_flags_t){0});
}
ssize_t h2o_set_header(h2o_mem_pool_t *pool, h2o_headers_t *headers, const h2o_token_t *token, const char *value, size_t value_len,
int overwrite_if_exists)
{
ssize_t cursor = h2o_find_header(headers, token, -1);
if (cursor != -1) {
if (overwrite_if_exists) {
h2o_iovec_t *slot = &headers->entries[cursor].value;
slot->base = (char *)value;
slot->len = value_len;
}
return cursor;
} else {
return h2o_add_header(pool, headers, token, NULL, value, value_len);
}
}
ssize_t h2o_set_header_by_str(h2o_mem_pool_t *pool, h2o_headers_t *headers, const char *name, size_t name_len, int maybe_token,
const char *value, size_t value_len, int overwrite_if_exists)
{
ssize_t cursor;
if (maybe_token) {
const h2o_token_t *token = h2o_lookup_token(name, name_len);
if (token != NULL) {
return h2o_set_header(pool, headers, token, value, value_len, overwrite_if_exists);
}
}
cursor = h2o_find_header_by_str(headers, name, name_len, -1);
if (cursor != -1) {
if (overwrite_if_exists) {
h2o_iovec_t *slot = &headers->entries[cursor].value;
slot->base = (char *)value;
slot->len = value_len;
}
return cursor;
} else {
h2o_iovec_t *name_buf = h2o_mem_alloc_pool(pool, *name_buf, 1);
name_buf->base = (char *)name;
name_buf->len = name_len;
return add_header(pool, headers, name_buf, NULL, value, value_len, (h2o_header_flags_t){0});
}
}
ssize_t h2o_set_header_token(h2o_mem_pool_t *pool, h2o_headers_t *headers, const h2o_token_t *token, const char *value,
size_t value_len)
{
size_t found = -1;
size_t i;
for (i = 0; i != headers->size; ++i) {
if (headers->entries[i].name == &token->buf) {
if (h2o_contains_token(headers->entries[i].value.base, headers->entries[i].value.len, value, value_len, ','))
return -1;
found = i;
}
}
if (found != -1) {
h2o_header_t *dest = headers->entries + found;
dest->value = h2o_concat(pool, dest->value, h2o_iovec_init(H2O_STRLIT(", ")), h2o_iovec_init(value, value_len));
return found;
} else {
return h2o_add_header(pool, headers, token, NULL, value, value_len);
}
}
ssize_t h2o_delete_header(h2o_headers_t *headers, ssize_t cursor)
{
assert(cursor != -1);
--headers->size;
memmove(headers->entries + cursor, headers->entries + cursor + 1, sizeof(h2o_header_t) * (headers->size - cursor));
return cursor;
}
| 1 | 13,671 | I think I would prefer retaining the argument. It's true that we are not using it now, but it's harmless, it's good to have a constructor function that accepts all the field values as arguments. Performance-wise, it does not matter. | h2o-h2o | c |
@@ -836,8 +836,11 @@ Blockly.Gesture.prototype.duplicateOnDrag_ = function() {
try {
// Note: targetBlock_ should have no children. If it has children we would
// need to update shadow block IDs to avoid problems in the VM.
+ // Resizes will be reenabled at the end of the drag.
+ this.startWorkspace_.setResizesEnabled(false);
var xmlBlock = Blockly.Xml.blockToDom(this.targetBlock_);
newBlock = Blockly.Xml.domToBlock(xmlBlock, this.startWorkspace_);
+
// Move the duplicate to original position.
var xy = this.targetBlock_.getRelativeToSurfaceXY();
newBlock.moveBy(xy.x, xy.y); | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2017 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview The class representing an in-progress gesture, usually a drag
* or a tap.
* @author [email protected] (Rachel Fenichel)
*/
'use strict';
goog.provide('Blockly.Gesture');
goog.require('Blockly.BlockDragger');
goog.require('Blockly.constants');
goog.require('Blockly.Events');
goog.require('Blockly.FlyoutDragger');
goog.require('Blockly.Tooltip');
goog.require('Blockly.Touch');
goog.require('Blockly.WorkspaceDragger');
goog.require('goog.asserts');
goog.require('goog.math.Coordinate');
/**
* NB: In this file "start" refers to touchstart, mousedown, and pointerstart
* events. "End" refers to touchend, mouseup, and pointerend events.
* TODO: Consider touchcancel/pointercancel.
*/
/**
* Class for one gesture.
* @param {!Event} e The event that kicked off this gesture.
* @param {!Blockly.WorkspaceSvg} creatorWorkspace The workspace that created
* this gesture and has a reference to it.
* @constructor
*/
Blockly.Gesture = function(e, creatorWorkspace) {
/**
* The position of the mouse when the gesture started. Units are css pixels,
* with (0, 0) at the top left of the browser window (mouseEvent clientX/Y).
* @type {goog.math.Coordinate}
*/
this.mouseDownXY_ = null;
/**
* How far the mouse has moved during this drag, in pixel units.
* (0, 0) is at this.mouseDownXY_.
* @type {goog.math.Coordinate}
* private
*/
this.currentDragDeltaXY_ = new goog.math.Coordinate(0, 0);
/**
* The field that the gesture started on, or null if it did not start on a
* field.
* @type {Blockly.Field}
* @private
*/
this.startField_ = null;
/**
* The block that the gesture started on, or null if it did not start on a
* block.
* @type {Blockly.BlockSvg}
* @private
*/
this.startBlock_ = null;
/**
* The block that this gesture targets. If the gesture started on a
* shadow block, this is the first non-shadow parent of the block. If the
* gesture started in the flyout, this is the root block of the block group
* that was clicked or dragged.
* @type {Blockly.BlockSvg}
* @private
*/
this.targetBlock_ = null;
/**
* The workspace that the gesture started on. There may be multiple
* workspaces on a page; this is more accurate than using
* Blockly.getMainWorkspace().
* @type {Blockly.WorkspaceSvg}
* @private
*/
this.startWorkspace_ = null;
/**
* The workspace that created this gesture. This workspace keeps a reference
* to the gesture, which will need to be cleared at deletion.
* This may be different from the start workspace. For instance, a flyout is
* a workspace, but its parent workspace manages gestures for it.
* @type {Blockly.WorkspaceSvg}
* @private
*/
this.creatorWorkspace_ = creatorWorkspace;
/**
* Whether the pointer has at any point moved out of the drag radius.
* A gesture that exceeds the drag radius is a drag even if it ends exactly at
* its start point.
* @type {boolean}
* @private
*/
this.hasExceededDragRadius_ = false;
/**
* Whether the workspace is currently being dragged.
* @type {boolean}
* @private
*/
this.isDraggingWorkspace_ = false;
/**
* Whether the block is currently being dragged.
* @type {boolean}
* @private
*/
this.isDraggingBlock_ = false;
/**
* The event that most recently updated this gesture.
* @type {!Event}
* @private
*/
this.mostRecentEvent_ = e;
/**
* A handle to use to unbind a mouse move listener at the end of a drag.
* Opaque data returned from Blockly.bindEventWithChecks_.
* @type {Array.<!Array>}
* @private
*/
this.onMoveWrapper_ = null;
/**
* A handle to use to unbind a mouse up listener at the end of a drag.
* Opaque data returned from Blockly.bindEventWithChecks_.
* @type {Array.<!Array>}
* @private
*/
this.onUpWrapper_ = null;
/**
* The object tracking a block drag, or null if none is in progress.
* @type {Blockly.BlockDragger}
* @private
*/
this.blockDragger_ = null;
/**
* The object tracking a workspace or flyout workspace drag, or null if none
* is in progress.
* @type {Blockly.WorkspaceDragger}
* @private
*/
this.workspaceDragger_ = null;
/**
* The flyout a gesture started in, if any.
* @type {Blockly.Flyout}
* @private
*/
this.flyout_ = null;
/**
* Boolean for sanity-checking that some code is only called once.
* @type {boolean}
* @private
*/
this.calledUpdateIsDragging_ = false;
/**
* Boolean for sanity-checking that some code is only called once.
* @type {boolean}
* @private
*/
this.hasStarted_ = false;
/**
* Boolean used internally to break a cycle in disposal.
* @type {boolean}
* @private
*/
this.isEnding_ = false;
/**
* True if dragging from the target block should duplicate the target block
* and drag the duplicate instead. This has a lot of side effects.
* @type {boolean}
* @private
*/
this.shouldDuplicateOnDrag_ = false;
};
/**
* Sever all links from this object.
* @package
*/
Blockly.Gesture.prototype.dispose = function() {
Blockly.Touch.clearTouchIdentifier();
Blockly.Tooltip.unblock();
// Clear the owner's reference to this gesture.
this.creatorWorkspace_.clearGesture();
if (this.onMoveWrapper_) {
Blockly.unbindEvent_(this.onMoveWrapper_);
}
if (this.onUpWrapper_) {
Blockly.unbindEvent_(this.onUpWrapper_);
}
this.startField_ = null;
this.startBlock_ = null;
this.targetBlock_ = null;
this.startWorkspace_ = null;
this.flyout_ = null;
if (this.blockDragger_) {
this.blockDragger_.dispose();
this.blockDragger_ = null;
}
if (this.workspaceDragger_) {
this.workspaceDragger_.dispose();
this.workspaceDragger_ = null;
}
};
/**
* Update internal state based on an event.
* @param {!Event} e The most recent mouse or touch event.
* @private
*/
Blockly.Gesture.prototype.updateFromEvent_ = function(e) {
var currentXY = new goog.math.Coordinate(e.clientX, e.clientY);
var changed = this.updateDragDelta_(currentXY);
// Exceeded the drag radius for the first time.
if (changed){
this.updateIsDragging_();
Blockly.longStop_();
}
this.mostRecentEvent_ = e;
};
/**
* DO MATH to set currentDragDeltaXY_ based on the most recent mouse position.
* @param {!goog.math.Coordinate} currentXY The most recent mouse/pointer
* position, in pixel units, with (0, 0) at the window's top left corner.
* @return {boolean} True if the drag just exceeded the drag radius for the
* first time.
* @private
*/
Blockly.Gesture.prototype.updateDragDelta_ = function(currentXY) {
this.currentDragDeltaXY_ = goog.math.Coordinate.difference(currentXY,
this.mouseDownXY_);
if (!this.hasExceededDragRadius_) {
var currentDragDelta = goog.math.Coordinate.magnitude(
this.currentDragDeltaXY_);
// The flyout has a different drag radius from the rest of Blockly.
var limitRadius = this.flyout_ ? Blockly.FLYOUT_DRAG_RADIUS :
Blockly.DRAG_RADIUS;
this.hasExceededDragRadius_ = currentDragDelta > limitRadius;
return this.hasExceededDragRadius_;
}
return false;
};
/**
* Update this gesture to record whether a block is being dragged from the
* flyout.
* This function should be called on a mouse/touch move event the first time the
* drag radius is exceeded. It should be called no more than once per gesture.
* If a block should be dragged from the flyout this function creates the new
* block on the main workspace and updates targetBlock_ and startWorkspace_.
* @return {boolean} True if a block is being dragged from the flyout.
* @private
*/
Blockly.Gesture.prototype.updateIsDraggingFromFlyout_ = function() {
// Disabled blocks may not be dragged from the flyout.
if (this.targetBlock_.disabled) {
return false;
}
if (!this.flyout_.isScrollable() ||
this.flyout_.isDragTowardWorkspace(this.currentDragDeltaXY_)) {
this.startWorkspace_ = this.flyout_.targetWorkspace_;
this.startWorkspace_.updateScreenCalculationsIfScrolled();
// Start the event group now, so that the same event group is used for block
// creation and block dragging.
if (!Blockly.Events.getGroup()) {
Blockly.Events.setGroup(true);
}
// The start block is no longer relevant, because this is a drag.
this.startBlock_ = null;
this.targetBlock_ = this.flyout_.createBlock(this.targetBlock_);
this.targetBlock_.select();
return true;
}
return false;
};
/**
* Update this gesture to record whether a block is being dragged.
* This function should be called on a mouse/touch move event the first time the
* drag radius is exceeded. It should be called no more than once per gesture.
* If a block should be dragged, either from the flyout or in the workspace,
* this function creates the necessary BlockDragger and starts the drag.
* @return {boolean} true if a block is being dragged.
* @private
*/
Blockly.Gesture.prototype.updateIsDraggingBlock_ = function() {
if (!this.targetBlock_) {
return false;
}
if (this.flyout_) {
this.isDraggingBlock_ = this.updateIsDraggingFromFlyout_();
} else if (this.targetBlock_.isMovable() || this.shouldDuplicateOnDrag_){
this.isDraggingBlock_ = true;
}
if (this.isDraggingBlock_) {
this.startDraggingBlock_();
return true;
}
return false;
};
/**
* Update this gesture to record whether a workspace is being dragged.
* This function should be called on a mouse/touch move event the first time the
* drag radius is exceeded. It should be called no more than once per gesture.
* If a workspace is being dragged this function creates the necessary
* WorkspaceDragger or FlyoutDragger and starts the drag.
* @private
*/
Blockly.Gesture.prototype.updateIsDraggingWorkspace_ = function() {
var wsMovable = this.flyout_ ? this.flyout_.isScrollable() :
this.startWorkspace_ && this.startWorkspace_.isDraggable();
if (!wsMovable) {
return;
}
if (this.flyout_) {
this.workspaceDragger_ = new Blockly.FlyoutDragger(this.flyout_);
} else {
this.workspaceDragger_ = new Blockly.WorkspaceDragger(this.startWorkspace_);
}
this.isDraggingWorkspace_ = true;
this.workspaceDragger_.startDrag();
};
/**
* Update this gesture to record whether anything is being dragged.
* This function should be called on a mouse/touch move event the first time the
* drag radius is exceeded. It should be called no more than once per gesture.
* @private
*/
Blockly.Gesture.prototype.updateIsDragging_ = function() {
// Sanity check.
goog.asserts.assert(!this.calledUpdateIsDragging_,
'updateIsDragging_ should only be called once per gesture.');
this.calledUpdateIsDragging_ = true;
// First check if it was a block drag.
if (this.updateIsDraggingBlock_()) {
return;
}
// Then check if it's a workspace drag.
this.updateIsDraggingWorkspace_();
};
/**
* Create a block dragger and start dragging the selected block.
* @private
*/
Blockly.Gesture.prototype.startDraggingBlock_ = function() {
if (this.shouldDuplicateOnDrag_) {
this.duplicateOnDrag_();
}
this.blockDragger_ = new Blockly.BlockDragger(this.targetBlock_,
this.startWorkspace_);
this.blockDragger_.startBlockDrag(this.currentDragDeltaXY_);
this.blockDragger_.dragBlock(this.mostRecentEvent_,
this.currentDragDeltaXY_);
};
/**
* Start a gesture: update the workspace to indicate that a gesture is in
* progress and bind mousemove and mouseup handlers.
* @param {!Event} e A mouse down or touch start event.
* @package
*/
Blockly.Gesture.prototype.doStart = function(e) {
if (Blockly.utils.isTargetInput(e)) {
this.cancel();
return;
}
this.hasStarted_ = true;
Blockly.BlockSvg.disconnectUiStop_();
this.startWorkspace_.updateScreenCalculationsIfScrolled();
if (this.startWorkspace_.isMutator) {
// Mutator's coordinate system could be out of date because the bubble was
// dragged, the block was moved, the parent workspace zoomed, etc.
this.startWorkspace_.resize();
}
this.startWorkspace_.markFocused();
this.mostRecentEvent_ = e;
// Hide chaff also hides the flyout, so don't do it if the click is in a flyout.
Blockly.hideChaff(!!this.flyout_);
Blockly.Tooltip.block();
if (this.targetBlock_) {
this.targetBlock_.select();
}
if (Blockly.utils.isRightButton(e)) {
this.handleRightClick(e);
return;
}
if (goog.string.caseInsensitiveEquals(e.type, 'touchstart')) {
Blockly.longStart_(e, this);
}
this.mouseDownXY_ = new goog.math.Coordinate(e.clientX, e.clientY);
this.onMoveWrapper_ = Blockly.bindEventWithChecks_(
document, 'mousemove', null, this.handleMove.bind(this));
this.onUpWrapper_ = Blockly.bindEventWithChecks_(
document, 'mouseup', null, this.handleUp.bind(this));
e.preventDefault();
e.stopPropagation();
};
/**
* Handle a mouse move or touch move event.
* @param {!Event} e A mouse move or touch move event.
* @package
*/
Blockly.Gesture.prototype.handleMove = function(e) {
this.updateFromEvent_(e);
if (this.isDraggingWorkspace_) {
this.workspaceDragger_.drag(this.currentDragDeltaXY_);
} else if (this.isDraggingBlock_) {
this.blockDragger_.dragBlock(this.mostRecentEvent_,
this.currentDragDeltaXY_);
}
e.preventDefault();
e.stopPropagation();
};
/**
* Handle a mouse up or touch end event.
* @param {!Event} e A mouse up or touch end event.
* @package
*/
Blockly.Gesture.prototype.handleUp = function(e) {
this.updateFromEvent_(e);
Blockly.longStop_();
if (this.isEnding_) {
console.log('Trying to end a gesture recursively.');
return;
}
this.isEnding_ = true;
// The ordering of these checks is important: drags have higher priority than
// clicks. Fields have higher priority than blocks; blocks have higher
// priority than workspaces.
if (this.isDraggingBlock_) {
this.blockDragger_.endBlockDrag(e, this.currentDragDeltaXY_);
} else if (this.isDraggingWorkspace_) {
this.workspaceDragger_.endDrag(this.currentDragDeltaXY_);
} else if (this.isFieldClick_()) {
this.doFieldClick_();
} else if (this.isBlockClick_()) {
this.doBlockClick_();
} else if (this.isWorkspaceClick_()) {
this.doWorkspaceClick_();
}
e.preventDefault();
e.stopPropagation();
this.dispose();
};
/**
* Cancel an in-progress gesture. If a workspace or block drag is in progress,
* end the drag at the most recent location.
* @package
*/
Blockly.Gesture.prototype.cancel = function() {
// Disposing of a block cancels in-progress drags, but dragging to a delete
// area disposes of a block and leads to recursive disposal. Break that cycle.
if (this.isEnding_) {
console.log('Trying to cancel a gesture recursively.');
return;
}
this.isEnding_ = true;
Blockly.longStop_();
if (this.isDraggingBlock_) {
this.blockDragger_.endBlockDrag(this.mostRecentEvent_,
this.currentDragDeltaXY_);
} else if (this.isDraggingWorkspace_) {
this.workspaceDragger_.endDrag(this.currentDragDeltaXY_);
}
this.dispose();
};
/**
* Handle a real or faked right-click event by showing a context menu.
* @param {!Event} e A mouse move or touch move event.
* @package
*/
Blockly.Gesture.prototype.handleRightClick = function(e) {
if (this.targetBlock_) {
this.bringBlockToFront_();
Blockly.hideChaff(this.flyout_);
this.targetBlock_.showContextMenu_(e);
} else if (this.startWorkspace_ && !this.flyout_) {
Blockly.hideChaff();
this.startWorkspace_.showContextMenu_(e);
}
e.preventDefault();
e.stopPropagation();
this.dispose();
};
/**
* Handle a mousedown/touchstart event on a workspace.
* @param {!Event} e A mouse down or touch start event.
* @param {!Blockly.Workspace} ws The workspace the event hit.
* @package
*/
Blockly.Gesture.prototype.handleWsStart = function(e, ws) {
goog.asserts.assert(!this.hasStarted_,
'Tried to call gesture.handleWsStart, but the gesture had already been ' +
'started.');
this.setStartWorkspace_(ws);
this.mostRecentEvent_ = e;
this.doStart(e);
};
/**
* Handle a mousedown/touchstart event on a flyout.
* @param {!Event} e A mouse down or touch start event.
* @param {!Blockly.Flyout} flyout The flyout the event hit.
* @package
*/
Blockly.Gesture.prototype.handleFlyoutStart = function(e, flyout) {
goog.asserts.assert(!this.hasStarted_,
'Tried to call gesture.handleFlyoutStart, but the gesture had already been ' +
'started.');
this.setStartFlyout_(flyout);
this.handleWsStart(e, flyout.getWorkspace());
};
/**
* Handle a mousedown/touchstart event on a block.
* @param {!Event} e A mouse down or touch start event.
* @param {!Blockly.BlockSvg} block The block the event hit.
* @package
*/
Blockly.Gesture.prototype.handleBlockStart = function(e, block) {
goog.asserts.assert(!this.hasStarted_,
'Tried to call gesture.handleBlockStart, but the gesture had already been ' +
'started.');
this.setStartBlock(block);
this.mostRecentEvent_ = e;
};
/* Begin functions defining what actions to take to execute clicks on each type
* of target. Any developer wanting to add behaviour on clicks should modify
* only this code. */
/**
* Execute a field click.
* @private
*/
Blockly.Gesture.prototype.doFieldClick_ = function() {
this.startField_.showEditor_();
this.bringBlockToFront_();
};
/**
* Execute a block click.
* @private
*/
Blockly.Gesture.prototype.doBlockClick_ = function() {
// Block click in an autoclosing flyout.
if (this.flyout_ && this.flyout_.autoClose) {
if (!this.targetBlock_.disabled) {
if (!Blockly.Events.getGroup()) {
Blockly.Events.setGroup(true);
}
var newBlock = this.flyout_.createBlock(this.targetBlock_);
newBlock.scheduleSnapAndBump();
}
} else {
// A field is being edited if either the WidgetDiv or DropDownDiv is currently open.
// If a field is being edited, don't fire any click events.
var fieldEditing = Blockly.WidgetDiv.isVisible() || Blockly.DropDownDiv.isVisible();
if (!fieldEditing) {
Blockly.Events.fire(
new Blockly.Events.Ui(this.startBlock_, 'click', undefined, undefined));
// Scratch-specific: also fire a "stack click" event for this stack.
// This is used to toggle the stack when any block in the stack is clicked.
var rootBlock = this.startBlock_.getRootBlock();
Blockly.Events.fire(
new Blockly.Events.Ui(rootBlock, 'stackclick', undefined, undefined));
}
}
this.bringBlockToFront_();
Blockly.Events.setGroup(false);
};
/**
* Execute a workspace click.
* @private
*/
Blockly.Gesture.prototype.doWorkspaceClick_ = function() {
if (Blockly.selected) {
Blockly.selected.unselect();
}
};
/* End functions defining what actions to take to execute clicks on each type
* of target. */
/**
* Move the dragged/clicked block to the front of the workspace so that it is
* not occluded by other blocks.
* @private
*/
Blockly.Gesture.prototype.bringBlockToFront_ = function() {
// Blocks in the flyout don't overlap, so skip the work.
if (this.targetBlock_ && !this.flyout_) {
this.targetBlock_.bringToFront();
}
};
/* Begin functions for populating a gesture at mouse down. */
/**
* Record the field that a gesture started on.
* @param {Blockly.Field} field The field the gesture started on.
* @package
*/
Blockly.Gesture.prototype.setStartField = function(field) {
goog.asserts.assert(!this.hasStarted_,
'Tried to call gesture.setStartField, but the gesture had already been ' +
'started.');
if (!this.startField_) {
this.startField_ = field;
}
};
/**
* Record the block that a gesture started on, and set the target block
* appropriately.
* @param {Blockly.BlockSvg} block The block the gesture started on.
* @package
*/
Blockly.Gesture.prototype.setStartBlock = function(block) {
if (!this.startBlock_) {
this.startBlock_ = block;
this.shouldDuplicateOnDrag_ = Blockly.utils.isShadowArgumentReporter(block);
if (block.isInFlyout && block != block.getRootBlock()) {
this.setTargetBlock_(block.getRootBlock());
} else {
this.setTargetBlock_(block);
}
}
};
/**
* Record the block that a gesture targets, meaning the block that will be
* dragged if this turns into a drag. If this block is a shadow, that will be
* its first non-shadow parent.
* @param {Blockly.BlockSvg} block The block the gesture targets.
* @private
*/
Blockly.Gesture.prototype.setTargetBlock_ = function(block) {
if (block.isShadow() && !this.shouldDuplicateOnDrag_) {
this.setTargetBlock_(block.getParent());
} else {
this.targetBlock_ = block;
}
};
/**
* Record the workspace that a gesture started on.
* @param {Blockly.WorkspaceSvg} ws The workspace the gesture started on.
* @private
*/
Blockly.Gesture.prototype.setStartWorkspace_ = function(ws) {
if (!this.startWorkspace_) {
this.startWorkspace_ = ws;
}
};
/**
* Record the flyout that a gesture started on.
* @param {Blockly.Flyout} flyout The flyout the gesture started on.
* @private
*/
Blockly.Gesture.prototype.setStartFlyout_ = function(flyout) {
if (!this.flyout_) {
this.flyout_ = flyout;
}
};
/* End functions for populating a gesture at mouse down. */
/* Begin helper functions defining types of clicks. Any developer wanting
* to change the definition of a click should modify only this code. */
/**
* Whether this gesture is a click on a block. This should only be called when
* ending a gesture (mouse up, touch end).
* @return {boolean} whether this gesture was a click on a block.
* @private
*/
Blockly.Gesture.prototype.isBlockClick_ = function() {
// A block click starts on a block, never escapes the drag radius, and is not
// a field click.
var hasStartBlock = !!this.startBlock_;
return hasStartBlock && !this.hasExceededDragRadius_ && !this.isFieldClick_();
};
/**
* Whether this gesture is a click on a field. This should only be called when
* ending a gesture (mouse up, touch end).
* @return {boolean} whether this gesture was a click on a field.
* @private
*/
Blockly.Gesture.prototype.isFieldClick_ = function() {
var fieldEditable = this.startField_ ?
this.startField_.isCurrentlyEditable() : false;
return fieldEditable && !this.hasExceededDragRadius_;
};
/**
* Whether this gesture is a click on a workspace. This should only be called
* when ending a gesture (mouse up, touch end).
* @return {boolean} whether this gesture was a click on a workspace.
* @private
*/
Blockly.Gesture.prototype.isWorkspaceClick_ = function() {
var onlyTouchedWorkspace = !this.startBlock_ && !this.startField_;
return onlyTouchedWorkspace && !this.hasExceededDragRadius_;
};
/* End helper functions defining types of clicks. */
/**
* Whether this gesture is a drag of either a workspace or block.
* This function is called externally to block actions that cannot be taken
* mid-drag (e.g. using the keyboard to delete the selected blocks).
* @return {boolean} true if this gesture is a drag of a workspace or block.
* @package
*/
Blockly.Gesture.prototype.isDragging = function() {
return this.isDraggingWorkspace_ || this.isDraggingBlock_;
};
/**
* Whether this gesture has already been started. In theory every mouse down
* has a corresponding mouse up, but in reality it is possible to lose a
* mouse up, leaving an in-process gesture hanging.
* @return {boolean} whether this gesture was a click on a workspace.
* @package
*/
Blockly.Gesture.prototype.hasStarted = function() {
return this.hasStarted_;
};
/* Scratch-specific */
/**
* Don't even think about using this function before talking to rachel-fenichel.
*
* Force a drag to start without clicking and dragging the block itself. Used
* to attach duplicated blocks to the mouse pointer.
* @param {!Object} fakeEvent An object with the properties needed to start a
* drag, including clientX and clientY.
* @param {!Blockly.BlockSvg} block The block to start dragging.
* @package
*/
Blockly.Gesture.prototype.forceStartBlockDrag = function(fakeEvent, block) {
this.handleBlockStart(fakeEvent, block);
this.handleWsStart(fakeEvent, block.workspace);
this.isDraggingBlock_ = true;
this.hasExceededDragRadius_ = true;
this.startDraggingBlock_();
};
/**
* Duplicate the target block and start dragging the duplicated block.
* This should be done once we are sure that it is a block drag, and no earlier.
* Specifically for argument reporters in custom block defintions.
* @private
*/
Blockly.Gesture.prototype.duplicateOnDrag_ = function() {
var newBlock = null;
try {
// Note: targetBlock_ should have no children. If it has children we would
// need to update shadow block IDs to avoid problems in the VM.
var xmlBlock = Blockly.Xml.blockToDom(this.targetBlock_);
newBlock = Blockly.Xml.domToBlock(xmlBlock, this.startWorkspace_);
// Move the duplicate to original position.
var xy = this.targetBlock_.getRelativeToSurfaceXY();
newBlock.moveBy(xy.x, xy.y);
newBlock.setShadow(false);
} finally {
Blockly.Events.enable();
}
if (!newBlock) {
// Something went wrong.
console.error('Something went wrong while duplicating a block.');
return;
}
if (Blockly.Events.isEnabled()) {
Blockly.Events.fire(new Blockly.Events.BlockCreate(newBlock));
}
newBlock.select();
this.targetBlock_ = newBlock;
};
| 1 | 8,776 | Is this a different fix? | LLK-scratch-blocks | js |
@@ -45,10 +45,12 @@ func (s *DaemonServer) SetDNSServer(ctx context.Context,
}
// backup the /etc/resolv.conf
- cmd := bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("ls %s.chaos.bak || cp %s %s.chaos.bak", DNSServerConfFile, DNSServerConfFile, DNSServerConfFile)).
- SetNS(pid, bpm.MountNS).
- SetContext(ctx).
- Build()
+ processBuilder := bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("ls %s.chaos.bak || cp %s %s.chaos.bak", DNSServerConfFile, DNSServerConfFile, DNSServerConfFile)).SetContext(ctx)
+ if !req.WithoutNS {
+ processBuilder = processBuilder.SetNS(pid, bpm.MountNS)
+ }
+
+ cmd := processBuilder.Build()
output, err := cmd.CombinedOutput()
if err != nil {
log.Error(err, "execute command error", "command", cmd.String(), "output", output) | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package chaosdaemon
import (
"context"
"fmt"
"github.com/golang/protobuf/ptypes/empty"
"github.com/chaos-mesh/chaos-mesh/pkg/bpm"
pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb"
)
const (
// DNSServerConfFile is the default config file for DNS server
DNSServerConfFile = "/etc/resolv.conf"
)
func (s *DaemonServer) SetDNSServer(ctx context.Context,
req *pb.SetDNSServerRequest) (*empty.Empty, error) {
log.Info("SetDNSServer", "request", req)
pid, err := s.crClient.GetPidFromContainerID(ctx, req.ContainerId)
if err != nil {
log.Error(err, "GetPidFromContainerID")
return nil, err
}
if req.Enable {
// set dns server to the chaos dns server's address
if len(req.DnsServer) == 0 {
return &empty.Empty{}, fmt.Errorf("invalid set dns server request %v", req)
}
// backup the /etc/resolv.conf
cmd := bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("ls %s.chaos.bak || cp %s %s.chaos.bak", DNSServerConfFile, DNSServerConfFile, DNSServerConfFile)).
SetNS(pid, bpm.MountNS).
SetContext(ctx).
Build()
output, err := cmd.CombinedOutput()
if err != nil {
log.Error(err, "execute command error", "command", cmd.String(), "output", output)
return nil, err
}
if len(output) != 0 {
log.Info("command output", "output", string(output))
}
// add chaos dns server to the first line of /etc/resolv.conf
// Note: can not replace the /etc/resolv.conf like `mv temp resolv.conf`, will execute with error `Device or resource busy`
cmd = bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("cp %s temp && sed -i 's/.*nameserver.*/nameserver %s/' temp && cat temp > %s", DNSServerConfFile, req.DnsServer, DNSServerConfFile)).
SetNS(pid, bpm.MountNS).
SetContext(ctx).
Build()
output, err = cmd.CombinedOutput()
if err != nil {
log.Error(err, "execute command error", "command", cmd.String(), "output", output)
return nil, err
}
if len(output) != 0 {
log.Info("command output", "output", string(output))
}
} else {
// recover the dns server's address
cmd := bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("ls %s.chaos.bak && cat %s.chaos.bak > %s || true", DNSServerConfFile, DNSServerConfFile, DNSServerConfFile)).
SetNS(pid, bpm.MountNS).
SetContext(ctx).
Build()
output, err := cmd.CombinedOutput()
if err != nil {
log.Error(err, "execute command error", "command", cmd.String(), "output", output)
return nil, err
}
if len(output) != 0 {
log.Info("command output", "output", string(output))
}
}
return &empty.Empty{}, nil
}
| 1 | 19,117 | Why not add a `EnterNS` filed instead of `WithoutNS`? | chaos-mesh-chaos-mesh | go |
@@ -27,6 +27,13 @@ import (
)
var _ = Describe("Endpoints", func() {
+ const (
+ ProtoUDP = 17
+ ProtoIPIP = 4
+ VXLANPort = 0
+ VXLANVNI = 0
+ )
+
for _, trueOrFalse := range []bool{true, false} {
kubeIPVSEnabled := trueOrFalse
var rrConfigNormalMangleReturn = Config{ | 1 | // Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules_test
import (
"strings"
. "github.com/projectcalico/felix/rules"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/projectcalico/felix/ipsets"
. "github.com/projectcalico/felix/iptables"
)
var _ = Describe("Endpoints", func() {
for _, trueOrFalse := range []bool{true, false} {
kubeIPVSEnabled := trueOrFalse
var rrConfigNormalMangleReturn = Config{
IPIPEnabled: true,
IPIPTunnelAddress: nil,
IPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, "cali", nil, nil),
IPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, "cali", nil, nil),
IptablesMarkAccept: 0x8,
IptablesMarkPass: 0x10,
IptablesMarkScratch0: 0x20,
IptablesMarkScratch1: 0x40,
IptablesMarkEndpoint: 0xff00,
IptablesMarkNonCaliEndpoint: 0x0100,
KubeIPVSSupportEnabled: kubeIPVSEnabled,
IptablesMangleAllowAction: "RETURN",
}
var rrConfigConntrackDisabledReturnAction = Config{
IPIPEnabled: true,
IPIPTunnelAddress: nil,
IPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, "cali", nil, nil),
IPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, "cali", nil, nil),
IptablesMarkAccept: 0x8,
IptablesMarkPass: 0x10,
IptablesMarkScratch0: 0x20,
IptablesMarkScratch1: 0x40,
IptablesMarkEndpoint: 0xff00,
IptablesMarkNonCaliEndpoint: 0x0100,
KubeIPVSSupportEnabled: kubeIPVSEnabled,
DisableConntrackInvalid: true,
IptablesFilterAllowAction: "RETURN",
}
var renderer RuleRenderer
var epMarkMapper EndpointMarkMapper
Context("with normal config", func() {
BeforeEach(func() {
renderer = NewRenderer(rrConfigNormalMangleReturn)
epMarkMapper = NewEndpointMarkMapper(rrConfigNormalMangleReturn.IptablesMarkEndpoint,
rrConfigNormalMangleReturn.IptablesMarkNonCaliEndpoint)
})
It("Song should render a minimal workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234", epMarkMapper,
true,
nil,
nil,
nil)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render a disabled workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234", epMarkMapper,
false,
nil,
nil,
nil,
)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
{Action: DropAction{},
Comment: "Endpoint admin disabled"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
{Action: DropAction{},
Comment: "Endpoint admin disabled"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render a fully-loaded workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234",
epMarkMapper,
true,
[]string{"ai", "bi"},
[]string{"ae", "be"},
[]string{"prof1", "prof2"},
)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-ai"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-bi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pri-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pri-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-ae"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-be"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pro-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pro-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render a host endpoint", func() {
Expect(renderer.HostEndpointToFilterChains("eth0",
epMarkMapper,
[]string{"ai", "bi"}, []string{"ae", "be"},
[]string{"afi", "bfi"}, []string{"afe", "bfe"},
[]string{"prof1", "prof2"})).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-th-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-out"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-ae"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-be"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pro-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pro-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fh-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-ai"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-bi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
{Action: JumpAction{Target: "cali-pri-prof1"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: JumpAction{Target: "cali-pri-prof2"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if profile accepted"},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-thfw-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-afe"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-bfe"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
},
},
{
Name: "cali-fhfw-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-afi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-bfi"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
{Match: Match().MarkClear(0x10),
Action: DropAction{},
Comment: "Drop if no policies passed packet"},
},
},
{
Name: "cali-sm-eth0",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xa200, Mask: 0xff00}},
},
},
})))
})
It("should render host endpoint raw chains with untracked policies", func() {
Expect(renderer.HostEndpointToRawChains("eth0", []string{"c"}, []string{"c"})).To(Equal([]*Chain{
{
Name: "cali-th-eth0",
Rules: []Rule{
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-out"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-po-c"}},
// Extra NOTRACK action before returning in raw table.
{Match: Match().MarkSingleBitSet(0x8),
Action: NoTrackAction{}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
{
Name: "cali-fh-eth0",
Rules: []Rule{
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-c"}},
// Extra NOTRACK action before returning in raw table.
{Match: Match().MarkSingleBitSet(0x8),
Action: NoTrackAction{}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
}))
})
It("should render host endpoint mangle chains with pre-DNAT policies", func() {
Expect(renderer.HostEndpointToMangleChains(
"eth0",
[]string{"c"},
)).To(Equal([]*Chain{
{
Name: "cali-fh-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: 0x8}},
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: ReturnAction{}},
{Match: Match().ConntrackState("INVALID"),
Action: DropAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-c"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
}))
})
})
Describe("with ctstate=INVALID disabled", func() {
BeforeEach(func() {
renderer = NewRenderer(rrConfigConntrackDisabledReturnAction)
epMarkMapper = NewEndpointMarkMapper(rrConfigConntrackDisabledReturnAction.IptablesMarkEndpoint,
rrConfigConntrackDisabledReturnAction.IptablesMarkNonCaliEndpoint)
})
It("should render a minimal workload endpoint", func() {
Expect(renderer.WorkloadEndpointToIptablesChains(
"cali1234",
epMarkMapper,
true,
nil,
nil,
nil,
)).To(Equal(trimSMChain(kubeIPVSEnabled, []*Chain{
{
Name: "cali-tw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: 0x8}},
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: ReturnAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-fw-cali1234",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: 0x8}},
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: ReturnAction{}},
{Action: ClearMarkAction{Mark: 0x8}},
{Action: DropAction{},
Comment: "Drop if no profiles matched"},
},
},
{
Name: "cali-sm-cali1234",
Rules: []Rule{
{Action: SetMaskedMarkAction{Mark: 0xd400, Mask: 0xff00}},
},
},
})))
})
It("should render host endpoint mangle chains with pre-DNAT policies", func() {
Expect(renderer.HostEndpointToMangleChains(
"eth0",
[]string{"c"},
)).To(Equal([]*Chain{
{
Name: "cali-fh-eth0",
Rules: []Rule{
// conntrack rules.
{Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: AcceptAction{}},
// Host endpoints get extra failsafe rules.
{Action: JumpAction{Target: "cali-failsafe-in"}},
{Action: ClearMarkAction{Mark: 0x8}},
{Comment: "Start of policies",
Action: ClearMarkAction{Mark: 0x10}},
{Match: Match().MarkClear(0x10),
Action: JumpAction{Target: "cali-pi-c"}},
{Match: Match().MarkSingleBitSet(0x8),
Action: ReturnAction{},
Comment: "Return if policy accepted"},
// No drop actions or profiles in raw table.
},
},
}))
})
})
}
})
func trimSMChain(ipvsEnable bool, chains []*Chain) []*Chain {
result := []*Chain{}
for _, chain := range chains {
if !ipvsEnable && strings.HasPrefix(chain.Name, "cali-sm") {
continue
}
result = append(result, chain)
}
return result
}
| 1 | 16,990 | Same points as in other test file. | projectcalico-felix | go |
@@ -158,6 +158,16 @@ void SettingsStruct_tmpl<N_TASKS>::ApDontForceSetup(bool value) {
bitWrite(VariousBits1, 14, value);
}
+template<unsigned int N_TASKS>
+bool SettingsStruct_tmpl<N_TASKS>::JSONBoolWithQuotes() const {
+ return bitRead(VariousBits1, 15);
+}
+
+template<unsigned int N_TASKS>
+void SettingsStruct_tmpl<N_TASKS>::JSONBoolWithQuotes(bool value) {
+ bitWrite(VariousBits1, 15, value);
+}
+
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::CombineTaskValues_SingleEvent(taskIndex_t taskIndex) const {
if (validTaskIndex(taskIndex)) | 1 | #include "../DataStructs/SettingsStruct.h"
#include "../Globals/Plugins.h"
#include "../Globals/CPlugins.h"
#include "../CustomBuild/ESPEasyLimits.h"
#include "../DataStructs/DeviceStruct.h"
#include "../../ESPEasy_common.h"
template<unsigned int N_TASKS>
SettingsStruct_tmpl<N_TASKS>::SettingsStruct_tmpl() : ResetFactoryDefaultPreference(0) {
clearAll();
clearNetworkSettings();
}
// VariousBits1 defaults to 0, keep in mind when adding bit lookups.
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::appendUnitToHostname() const {
return !bitRead(VariousBits1, 1);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::appendUnitToHostname(bool value) {
bitWrite(VariousBits1, 1, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::uniqueMQTTclientIdReconnect_unused() const {
return bitRead(VariousBits1, 2);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::uniqueMQTTclientIdReconnect_unused(bool value) {
bitWrite(VariousBits1, 2, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::OldRulesEngine() const {
#ifdef WEBSERVER_NEW_RULES
return !bitRead(VariousBits1, 3);
#else
return true;
#endif
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::OldRulesEngine(bool value) {
bitWrite(VariousBits1, 3, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::ForceWiFi_bg_mode() const {
return bitRead(VariousBits1, 4);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::ForceWiFi_bg_mode(bool value) {
bitWrite(VariousBits1, 4, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::WiFiRestart_connection_lost() const {
return bitRead(VariousBits1, 5);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::WiFiRestart_connection_lost(bool value) {
bitWrite(VariousBits1, 5, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::EcoPowerMode() const {
return bitRead(VariousBits1, 6);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::EcoPowerMode(bool value) {
bitWrite(VariousBits1, 6, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::WifiNoneSleep() const {
return bitRead(VariousBits1, 7);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::WifiNoneSleep(bool value) {
bitWrite(VariousBits1, 7, value);
}
// Enable send gratuitous ARP by default, so invert the values (default = 0)
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::gratuitousARP() const {
return !bitRead(VariousBits1, 8);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::gratuitousARP(bool value) {
bitWrite(VariousBits1, 8, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::TolerantLastArgParse() const {
return bitRead(VariousBits1, 9);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::TolerantLastArgParse(bool value) {
bitWrite(VariousBits1, 9, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::SendToHttp_ack() const {
return bitRead(VariousBits1, 10);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::SendToHttp_ack(bool value) {
bitWrite(VariousBits1, 10, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseESPEasyNow() const {
return bitRead(VariousBits1, 11);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseESPEasyNow(bool value) {
bitWrite(VariousBits1, 11, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::IncludeHiddenSSID() const {
return bitRead(VariousBits1, 12);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::IncludeHiddenSSID(bool value) {
bitWrite(VariousBits1, 12, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseMaxTXpowerForSending() const {
return bitRead(VariousBits1, 13);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseMaxTXpowerForSending(bool value) {
bitWrite(VariousBits1, 13, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::ApDontForceSetup() const {
return bitRead(VariousBits1, 14);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::ApDontForceSetup(bool value) {
bitWrite(VariousBits1, 14, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::CombineTaskValues_SingleEvent(taskIndex_t taskIndex) const {
if (validTaskIndex(taskIndex))
return bitRead(TaskDeviceSendDataFlags[taskIndex], 0);
return false;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::CombineTaskValues_SingleEvent(taskIndex_t taskIndex, bool value) {
if (validTaskIndex(taskIndex))
bitWrite(TaskDeviceSendDataFlags[taskIndex], 0, value);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::validate() {
if (UDPPort > 65535) { UDPPort = 0; }
if ((Latitude < -90.0f) || (Latitude > 90.0f)) { Latitude = 0.0f; }
if ((Longitude < -180.0f) || (Longitude > 180.0f)) { Longitude = 0.0f; }
if (VariousBits1 > (1 << 30)) { VariousBits1 = 0; }
ZERO_TERMINATE(Name);
ZERO_TERMINATE(NTPHost);
if ((I2C_clockSpeed == 0) || (I2C_clockSpeed > 3400000)) { I2C_clockSpeed = DEFAULT_I2C_CLOCK_SPEED; }
if (WebserverPort == 0) { WebserverPort = 80;}
if (SyslogPort == 0) { SyslogPort = 514; }
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::networkSettingsEmpty() const {
return IP[0] == 0 && Gateway[0] == 0 && Subnet[0] == 0 && DNS[0] == 0;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearNetworkSettings() {
for (byte i = 0; i < 4; ++i) {
IP[i] = 0;
Gateway[i] = 0;
Subnet[i] = 0;
DNS[i] = 0;
ETH_IP[i] = 0;
ETH_Gateway[i] = 0;
ETH_Subnet[i] = 0;
ETH_DNS[i] = 0;
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearTimeSettings() {
UseNTP = false;
ZERO_FILL(NTPHost);
TimeZone = 0;
DST = false;
DST_Start = 0;
DST_End = 0;
Latitude = 0.0f;
Longitude = 0.0f;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearNotifications() {
for (byte i = 0; i < NOTIFICATION_MAX; ++i) {
Notification[i] = 0;
NotificationEnabled[i] = false;
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearControllers() {
for (controllerIndex_t i = 0; i < CONTROLLER_MAX; ++i) {
Protocol[i] = 0;
ControllerEnabled[i] = false;
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearTasks() {
for (taskIndex_t task = 0; task < N_TASKS; ++task) {
clearTask(task);
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearLogSettings() {
SyslogLevel = 0;
SerialLogLevel = 0;
WebLogLevel = 0;
SDLogLevel = 0;
SyslogFacility = DEFAULT_SYSLOG_FACILITY;
for (byte i = 0; i < 4; ++i) { Syslog_IP[i] = 0; }
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearUnitNameSettings() {
Unit = 0;
ZERO_FILL(Name);
UDPPort = 0;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearMisc() {
PID = 0;
Version = 0;
Build = 0;
IP_Octet = 0;
Delay = 0;
Pin_i2c_sda = DEFAULT_PIN_I2C_SDA;
Pin_i2c_scl = DEFAULT_PIN_I2C_SCL;
Pin_status_led = DEFAULT_PIN_STATUS_LED;
Pin_sd_cs = -1;
ETH_Phy_Addr = DEFAULT_ETH_PHY_ADDR;
ETH_Pin_mdc = DEFAULT_ETH_PIN_MDC;
ETH_Pin_mdio = DEFAULT_ETH_PIN_MDIO;
ETH_Pin_power = DEFAULT_ETH_PIN_POWER;
ETH_Phy_Type = DEFAULT_ETH_PHY_TYPE;
ETH_Clock_Mode = DEFAULT_ETH_CLOCK_MODE;
NetworkMedium = DEFAULT_NETWORK_MEDIUM;
I2C_clockSpeed_Slow = DEFAULT_I2C_CLOCK_SPEED_SLOW;
I2C_Multiplexer_Type = I2C_MULTIPLEXER_NONE;
I2C_Multiplexer_Addr = -1;
for (taskIndex_t x = 0; x < TASKS_MAX; x++) {
I2C_Multiplexer_Channel[x] = -1;
}
I2C_Multiplexer_ResetPin = -1;
{
// Here we initialize all data to 0, so this is the ONLY reason why PinBootStates
// can now be directly accessed.
// In all other use cases, use the get and set functions for it.
constexpr byte maxStates = sizeof(PinBootStates) / sizeof(PinBootStates[0]);
for (byte i = 0; i < maxStates; ++i) {
PinBootStates[i] = 0;
}
#ifdef ESP32
constexpr byte maxStatesesp32 = sizeof(PinBootStates_ESP32) / sizeof(PinBootStates_ESP32[0]);
for (byte i = 0; i < maxStatesesp32; ++i) {
PinBootStates_ESP32[i] = 0;
}
#endif
}
BaudRate = 0;
MessageDelay_unused = 0;
deepSleep_wakeTime = 0;
CustomCSS = false;
WDI2CAddress = 0;
UseRules = false;
UseSerial = true;
UseSSDP = false;
WireClockStretchLimit = 0;
I2C_clockSpeed = 400000;
WebserverPort = 80;
SyslogPort = 514;
GlobalSync = false;
ConnectionFailuresThreshold = 0;
MQTTRetainFlag_unused = false;
InitSPI = DEFAULT_SPI;
Pin_status_led_Inversed = false;
deepSleepOnFail = false;
UseValueLogger = false;
ArduinoOTAEnable = false;
UseRTOSMultitasking = false;
Pin_Reset = -1;
StructSize = sizeof(SettingsStruct_tmpl<N_TASKS>);
MQTTUseUnitNameAsClientId_unused = 0;
VariousBits1 = 0;
OldRulesEngine(DEFAULT_RULES_OLDENGINE);
ForceWiFi_bg_mode(DEFAULT_WIFI_FORCE_BG_MODE);
WiFiRestart_connection_lost(DEFAULT_WIFI_RESTART_WIFI_CONN_LOST);
EcoPowerMode(DEFAULT_ECO_MODE);
WifiNoneSleep(DEFAULT_WIFI_NONE_SLEEP);
gratuitousARP(DEFAULT_GRATUITOUS_ARP);
TolerantLastArgParse(DEFAULT_TOLERANT_LAST_ARG_PARSE);
SendToHttp_ack(DEFAULT_SEND_TO_HTTP_ACK);
ApDontForceSetup(DEFAULT_AP_DONT_FORCE_SETUP);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearAll() {
clearMisc();
clearTimeSettings();
clearNetworkSettings();
clearNotifications();
clearControllers();
clearTasks();
clearLogSettings();
clearUnitNameSettings();
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearTask(taskIndex_t task) {
if (task >= N_TASKS) { return; }
for (controllerIndex_t i = 0; i < CONTROLLER_MAX; ++i) {
TaskDeviceID[i][task] = 0;
TaskDeviceSendData[i][task] = false;
}
TaskDeviceNumber[task] = 0;
OLD_TaskDeviceID[task] = 0; // UNUSED: this can be removed
TaskDevicePin1[task] = -1;
TaskDevicePin2[task] = -1;
TaskDevicePin3[task] = -1;
TaskDevicePort[task] = 0;
TaskDevicePin1PullUp[task] = false;
for (byte cv = 0; cv < PLUGIN_CONFIGVAR_MAX; ++cv) {
TaskDevicePluginConfig[task][cv] = 0;
}
TaskDevicePin1Inversed[task] = false;
for (byte cv = 0; cv < PLUGIN_CONFIGFLOATVAR_MAX; ++cv) {
TaskDevicePluginConfigFloat[task][cv] = 0.0f;
}
for (byte cv = 0; cv < PLUGIN_CONFIGLONGVAR_MAX; ++cv) {
TaskDevicePluginConfigLong[task][cv] = 0;
}
TaskDeviceSendDataFlags[task] = 0;
OLD_TaskDeviceGlobalSync[task]= 0;
TaskDeviceDataFeed[task] = 0;
TaskDeviceTimer[task] = 0;
TaskDeviceEnabled[task] = false;
I2C_Multiplexer_Channel[task] = -1;
}
template<unsigned int N_TASKS>
String SettingsStruct_tmpl<N_TASKS>::getHostname() const {
return this->getHostname(this->appendUnitToHostname());
}
template<unsigned int N_TASKS>
String SettingsStruct_tmpl<N_TASKS>::getHostname(bool appendUnit) const {
String hostname = this->Name;
if ((this->Unit != 0) && appendUnit) { // only append non-zero unit number
hostname += '_';
hostname += this->Unit;
}
return hostname;
}
template<unsigned int N_TASKS>
PinBootState SettingsStruct_tmpl<N_TASKS>::getPinBootState(uint8_t gpio_pin) const {
constexpr byte maxStates = sizeof(PinBootStates) / sizeof(PinBootStates[0]);
if (gpio_pin < maxStates) {
return static_cast<PinBootState>(PinBootStates[gpio_pin]);
}
#ifdef ESP32
constexpr byte maxStatesesp32 = sizeof(PinBootStates_ESP32) / sizeof(PinBootStates_ESP32[0]);
const uint8_t addr = gpio_pin - maxStates;
if (addr < maxStatesesp32) {
return static_cast<PinBootState>(PinBootStates_ESP32[addr]);
}
#endif
return PinBootState::Default_state;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::setPinBootState(uint8_t gpio_pin, PinBootState state) {
constexpr byte maxStates = sizeof(PinBootStates) / sizeof(PinBootStates[0]);
if (gpio_pin < maxStates) {
PinBootStates[gpio_pin] = static_cast<int8_t>(state);
}
#ifdef ESP32
constexpr byte maxStatesesp32 = sizeof(PinBootStates_ESP32) / sizeof(PinBootStates_ESP32[0]);
const uint8_t addr = gpio_pin - maxStates;
if (addr < maxStatesesp32) {
PinBootStates_ESP32[addr] = static_cast<int8_t>(state);
}
#endif
}
template<unsigned int N_TASKS>
float SettingsStruct_tmpl<N_TASKS>::getWiFi_TX_power() const {
return WiFi_TX_power / 4.0f;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::setWiFi_TX_power(float dBm) {
WiFi_TX_power = dBm * 4.0f;
}
| 1 | 21,651 | I have a PR pending, can you change this bit index to `16`? | letscontrolit-ESPEasy | cpp |
@@ -310,11 +310,12 @@ public class JavaParserInterfaceDeclaration extends AbstractTypeDeclaration impl
private ResolvedReferenceType toReferenceType(ClassOrInterfaceType classOrInterfaceType) {
SymbolReference<? extends ResolvedTypeDeclaration> ref = null;
- if (classOrInterfaceType.toString().indexOf('.') > -1) {
- ref = typeSolver.tryToSolveType(classOrInterfaceType.toString());
+ String typeName = classOrInterfaceType.getNameAsString();
+ if (typeName.indexOf('.') > -1) {
+ ref = typeSolver.tryToSolveType(typeName);
}
if (ref == null || !ref.isSolved()) {
- ref = solveType(classOrInterfaceType.toString(), typeSolver);
+ ref = solveType(typeName, typeSolver);
}
if (!ref.isSolved()) {
ref = solveType(classOrInterfaceType.getName().getId(), typeSolver); | 1 | /*
* Copyright 2016 Federico Tomassetti
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.javaparser.symbolsolver.javaparsermodel.declarations;
import com.github.javaparser.ast.AccessSpecifier;
import com.github.javaparser.ast.Node;
import com.github.javaparser.ast.body.BodyDeclaration;
import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration;
import com.github.javaparser.ast.expr.AnnotationExpr;
import com.github.javaparser.ast.type.ClassOrInterfaceType;
import com.github.javaparser.resolution.UnsolvedSymbolException;
import com.github.javaparser.resolution.declarations.*;
import com.github.javaparser.resolution.types.ResolvedReferenceType;
import com.github.javaparser.resolution.types.ResolvedType;
import com.github.javaparser.symbolsolver.core.resolution.Context;
import com.github.javaparser.symbolsolver.javaparsermodel.JavaParserFacade;
import com.github.javaparser.symbolsolver.javaparsermodel.JavaParserFactory;
import com.github.javaparser.symbolsolver.logic.AbstractTypeDeclaration;
import com.github.javaparser.symbolsolver.model.resolution.SymbolReference;
import com.github.javaparser.symbolsolver.model.resolution.TypeSolver;
import com.github.javaparser.symbolsolver.model.typesystem.LazyType;
import com.github.javaparser.symbolsolver.model.typesystem.ReferenceTypeImpl;
import com.github.javaparser.symbolsolver.resolution.SymbolSolver;
import java.util.*;
import java.util.stream.Collectors;
/**
* @author Federico Tomassetti
*/
public class JavaParserInterfaceDeclaration extends AbstractTypeDeclaration implements ResolvedInterfaceDeclaration {
private TypeSolver typeSolver;
private ClassOrInterfaceDeclaration wrappedNode;
private JavaParserTypeAdapter<ClassOrInterfaceDeclaration> javaParserTypeAdapter;
public JavaParserInterfaceDeclaration(ClassOrInterfaceDeclaration wrappedNode, TypeSolver typeSolver) {
if (!wrappedNode.isInterface()) {
throw new IllegalArgumentException();
}
this.wrappedNode = wrappedNode;
this.typeSolver = typeSolver;
this.javaParserTypeAdapter = new JavaParserTypeAdapter<>(wrappedNode, typeSolver);
}
@Override
public Set<ResolvedMethodDeclaration> getDeclaredMethods() {
Set<ResolvedMethodDeclaration> methods = new HashSet<>();
for (BodyDeclaration<?> member : wrappedNode.getMembers()) {
if (member instanceof com.github.javaparser.ast.body.MethodDeclaration) {
methods.add(new JavaParserMethodDeclaration((com.github.javaparser.ast.body.MethodDeclaration) member, typeSolver));
}
}
return methods;
}
public Context getContext() {
return JavaParserFactory.getContext(wrappedNode, typeSolver);
}
public ResolvedType getUsage(Node node) {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JavaParserInterfaceDeclaration that = (JavaParserInterfaceDeclaration) o;
if (!wrappedNode.equals(that.wrappedNode)) return false;
return true;
}
@Override
public int hashCode() {
return wrappedNode.hashCode();
}
@Override
public String getName() {
return wrappedNode.getName().getId();
}
@Override
public ResolvedInterfaceDeclaration asInterface() {
return this;
}
@Override
public boolean hasDirectlyAnnotation(String canonicalName) {
for (AnnotationExpr annotationExpr : wrappedNode.getAnnotations()) {
if (solveType(annotationExpr.getName().getId(), typeSolver).getCorrespondingDeclaration().getQualifiedName().equals(canonicalName)) {
return true;
}
}
return false;
}
@Override
public boolean isInterface() {
return true;
}
@Override
public List<ResolvedReferenceType> getInterfacesExtended() {
List<ResolvedReferenceType> interfaces = new ArrayList<>();
for (ClassOrInterfaceType t : wrappedNode.getExtendedTypes()) {
interfaces.add(new ReferenceTypeImpl(solveType(t.getName().getId(), typeSolver).getCorrespondingDeclaration().asInterface(), typeSolver));
}
return interfaces;
}
@Override
public String getPackageName() {
return javaParserTypeAdapter.getPackageName();
}
@Override
public String getClassName() {
return javaParserTypeAdapter.getClassName();
}
@Override
public String getQualifiedName() {
return javaParserTypeAdapter.getQualifiedName();
}
@Override
public boolean isAssignableBy(ResolvedReferenceTypeDeclaration other) {
return javaParserTypeAdapter.isAssignableBy(other);
}
@Override
public boolean isAssignableBy(ResolvedType type) {
return javaParserTypeAdapter.isAssignableBy(type);
}
@Override
public boolean canBeAssignedTo(ResolvedReferenceTypeDeclaration other) {
// TODO consider generic types
if (this.getQualifiedName().equals(other.getQualifiedName())) {
return true;
}
if (this.wrappedNode.getExtendedTypes() != null) {
for (ClassOrInterfaceType type : wrappedNode.getExtendedTypes()) {
ResolvedReferenceTypeDeclaration ancestor = (ResolvedReferenceTypeDeclaration) new SymbolSolver(typeSolver).solveType(type);
if (ancestor.canBeAssignedTo(other)) {
return true;
}
}
}
if (this.wrappedNode.getImplementedTypes() != null) {
for (ClassOrInterfaceType type : wrappedNode.getImplementedTypes()) {
ResolvedReferenceTypeDeclaration ancestor = (ResolvedReferenceTypeDeclaration) new SymbolSolver(typeSolver).solveType(type);
if (ancestor.canBeAssignedTo(other)) {
return true;
}
}
}
return false;
}
@Override
public boolean isTypeParameter() {
return false;
}
@Override
public List<ResolvedFieldDeclaration> getAllFields() {
List<ResolvedFieldDeclaration> fields = javaParserTypeAdapter.getFieldsForDeclaredVariables();
getAncestors().forEach(ancestor -> ancestor.getTypeDeclaration().getAllFields().forEach(f -> {
fields.add(new ResolvedFieldDeclaration() {
@Override
public AccessSpecifier accessSpecifier() {
return f.accessSpecifier();
}
@Override
public String getName() {
return f.getName();
}
@Override
public ResolvedType getType() {
return ancestor.useThisTypeParametersOnTheGivenType(f.getType());
}
@Override
public boolean isStatic() {
return f.isStatic();
}
@Override
public ResolvedTypeDeclaration declaringType() {
return f.declaringType();
}
});
}));
return fields;
}
@Override
public String toString() {
return "JavaParserInterfaceDeclaration{" +
"wrappedNode=" + wrappedNode +
'}';
}
@Deprecated
public SymbolReference<ResolvedTypeDeclaration> solveType(String name, TypeSolver typeSolver) {
if (this.wrappedNode.getName().getId().equals(name)) {
return SymbolReference.solved(this);
}
SymbolReference<ResolvedTypeDeclaration> ref = javaParserTypeAdapter.solveType(name, typeSolver);
if (ref.isSolved()) {
return ref;
}
String prefix = wrappedNode.getName() + ".";
if (name.startsWith(prefix) && name.length() > prefix.length()) {
return new JavaParserInterfaceDeclaration(this.wrappedNode, typeSolver).solveType(name.substring(prefix.length()), typeSolver);
}
return getContext().getParent().solveType(name, typeSolver);
}
@Override
public List<ResolvedReferenceType> getAncestors() {
List<ResolvedReferenceType> ancestors = new ArrayList<>();
if (wrappedNode.getExtendedTypes() != null) {
for (ClassOrInterfaceType extended : wrappedNode.getExtendedTypes()) {
ancestors.add(toReferenceType(extended));
}
}
if (wrappedNode.getImplementedTypes() != null) {
for (ClassOrInterfaceType implemented : wrappedNode.getImplementedTypes()) {
ancestors.add(toReferenceType(implemented));
}
}
return ancestors;
}
@Override
public List<ResolvedTypeParameterDeclaration> getTypeParameters() {
if (this.wrappedNode.getTypeParameters() == null) {
return Collections.emptyList();
} else {
return this.wrappedNode.getTypeParameters().stream().map(
(tp) -> new JavaParserTypeParameter(tp, typeSolver)
).collect(Collectors.toList());
}
}
/**
* Returns the JavaParser node associated with this JavaParserInterfaceDeclaration.
*
* @return A visitable JavaParser node wrapped by this object.
*/
public ClassOrInterfaceDeclaration getWrappedNode() {
return wrappedNode;
}
@Override
public AccessSpecifier accessSpecifier() {
return Helper.toAccessLevel(wrappedNode.getModifiers());
}
@Override
public Set<ResolvedReferenceTypeDeclaration> internalTypes() {
Set<ResolvedReferenceTypeDeclaration> res = new HashSet<>();
for (BodyDeclaration<?> member : this.wrappedNode.getMembers()) {
if (member instanceof com.github.javaparser.ast.body.TypeDeclaration) {
res.add(JavaParserFacade.get(typeSolver).getTypeDeclaration((com.github.javaparser.ast.body.TypeDeclaration)member));
}
}
return res;
}
@Override
public Optional<ResolvedReferenceTypeDeclaration> containerType() {
return javaParserTypeAdapter.containerType();
}
///
/// Private methods
///
private ResolvedReferenceType toReferenceType(ClassOrInterfaceType classOrInterfaceType) {
SymbolReference<? extends ResolvedTypeDeclaration> ref = null;
if (classOrInterfaceType.toString().indexOf('.') > -1) {
ref = typeSolver.tryToSolveType(classOrInterfaceType.toString());
}
if (ref == null || !ref.isSolved()) {
ref = solveType(classOrInterfaceType.toString(), typeSolver);
}
if (!ref.isSolved()) {
ref = solveType(classOrInterfaceType.getName().getId(), typeSolver);
}
if (!ref.isSolved()) {
throw new UnsolvedSymbolException(classOrInterfaceType.getName().getId());
}
if (!classOrInterfaceType.getTypeArguments().isPresent()) {
return new ReferenceTypeImpl(ref.getCorrespondingDeclaration().asReferenceType(), typeSolver);
}
List<ResolvedType> superClassTypeParameters = classOrInterfaceType.getTypeArguments().get()
.stream().map(ta -> new LazyType(v -> JavaParserFacade.get(typeSolver).convert(ta, ta)))
.collect(Collectors.toList());
return new ReferenceTypeImpl(ref.getCorrespondingDeclaration().asReferenceType(), superClassTypeParameters, typeSolver);
}
}
| 1 | 12,345 | we should probably have some utility class for this sort of things | javaparser-javaparser | java |
@@ -1,4 +1,4 @@
-#pylint: disable=missing-docstring, no-else-return, invalid-name, unused-variable, superfluous-parens
+#pylint: disable=missing-docstring, no-else-return, invalid-name, unused-variable, superfluous-parens, try-except-raise
"""Testing inconsistent returns"""
import math
import sys | 1 | #pylint: disable=missing-docstring, no-else-return, invalid-name, unused-variable, superfluous-parens
"""Testing inconsistent returns"""
import math
import sys
# These ones are consistent
def explicit_returns(var):
if var >= 0:
return math.sqrt(var)
else:
return None
def explicit_returns2(var):
if var < 0:
return None
return math.sqrt(var)
def empty_implicit_returns(var):
if var < 0:
return
def returns_in_exceptions():
try:
raise ValueError('test')
except ValueError:
return 1
except (OSError, TypeError):
return 2
def returns_and_exceptions(var):
if var < 10:
return var**2
else:
raise ValueError("Incorrect value")
def returns_and_exceptions_issue1770(var):
try:
if var == 1:
return 'a'
elif var == 2:
return 'b'
else:
raise ValueError
except AssertionError:
return None
def explicit_returns3(arg):
if arg:
return False
else:
if arg < 3:
print('arg < 3')
return True
def explicit_returns4(arg):
if arg:
if arg > 2:
print('arg > 2')
return False
else:
if arg < 3:
print('arg < 3')
return True
def explicit_returns5(arg):
if arg:
if arg > 2:
print('arg > 2')
return False
else:
return True
def nested_function():
def dummy_return():
return True
return dummy_return
def explicit_returns6(x, y, z):
if x: # pylint: disable=no-else-return
a = 1
if y: # pylint: disable=no-else-return
b = 2
return y
else:
c = 3
return x
else:
d = 4
return z
def explicit_returns7(arg):
if arg < 0:
arg = 2 * arg
return 'below 0'
elif arg == 0:
print("Null arg")
return '0'
else:
arg = 3 * arg
return 'above 0'
def bug_1772():
"""Don't check inconsistent return statements inside while loop"""
counter = 1
while True:
counter += 1
if counter == 100:
return 7
def bug_1771(var):
if var == 1:
sys.exit(1)
else:
return var * 2
def bug_1771_with_user_config(var):
# sys.getdefaultencoding is considered as a never
# returning function in the inconsistent_returns.rc file.
if var == 1:
sys.getdefaultencoding()
else:
return var * 2
def bug_1794_inner_func_in_if(var):
# pylint: disable = no-else-return,useless-return
if var:
def _inner():
return None
return None
else:
return None
try:
import ConfigParser as configparser
except ImportError:
import configparser
# Due to the try/except import above, astroid cannot safely
# infer the exception type. It doesn't matter here, because
# as the raise statement is not inside a try/except one, there
# is no need to infer the exception type. It is just an exception
# that is raised.
def bug_1794(a):
for x in range(a):
if x == 100:
return a
raise configparser.NoSectionError('toto')
# Next ones are not consistent
def explicit_implicit_returns(var): # [inconsistent-return-statements]
if var >= 0:
return math.sqrt(var)
def empty_explicit_returns(var): # [inconsistent-return-statements]
if var < 0:
return
return math.sqrt(var)
def explicit_implicit_returns2(arg): # [inconsistent-return-statements]
if arg:
if arg > 2:
print('arg > 2')
return False
else:
return True
def explicit_implicit_returns3(arg): # [inconsistent-return-statements]
if arg:
if arg > 2:
print('arg > 2')
return False
else:
return True
def returns_missing_in_catched_exceptions(arg): # [inconsistent-return-statements]
try:
arg = arg**2
raise ValueError('test')
except ValueError:
print('ValueError')
arg = 0
except (OSError, TypeError):
return 2
def complex_func(arg): # [inconsistent-return-statements]
for i in range(arg):
if i > arg / 2:
break
else:
return arg
def inconsistent_returns_in_nested_function():
def not_consistent_returns_inner(arg): # [inconsistent-return-statements]
for i in range(arg):
if i > arg / 2:
break
else:
return arg
return not_consistent_returns_inner
def bug_1771_counter_example(var): # [inconsistent-return-statements]
if var == 1:
inconsistent_returns_in_nested_function()
else:
return var * 2
class BlargException(Exception):
pass
def blarg(someval):
try:
if someval:
raise BlargException()
return 5
except BlargException:
raise
def bug_1772_counter_example(): # [inconsistent-return-statements]
counter = 1
if counter == 1:
while True:
counter += 1
if counter == 100:
return 7
def bug_1794_inner_func_in_if_counter_example_1(var): # [inconsistent-return-statements]
# pylint: disable = no-else-return,useless-return
if var:
def _inner():
return None
return None
else:
return
def bug_1794_inner_func_in_if_counter_example_2(var): # [inconsistent-return-statements]
# pylint: disable = no-else-return,useless-return
if var:
def _inner():
return
return None
else:
return
def bug_1794_inner_func_in_if_counter_example_3(var): # [inconsistent-return-statements]
# pylint: disable = no-else-return,useless-return
if var:
def _inner():
return None
return None
else:
def _inner2(var_bis): # [inconsistent-return-statements]
if var_bis:
return True
return
| 1 | 9,926 | Curious that I'm not seeing any occurrence of the new error check in this file. Why was it disabled? | PyCQA-pylint | py |
@@ -341,6 +341,13 @@ class Realm {
*/
static deleteFile(config) { }
+ /**
+ * Checks if the Realm already exists on disk.
+ * @param {Realm~Configuration} config The configuration for the Realm.
+ * @throws {Error} if anything in the provided `config` is invalid.
+ */
+ static exists(config) { }
+
/**
* Copy all bundled Realm files to app's default file folder.
* This is only implemented for React Native. | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
/* eslint getter-return: "off" */
/**
* A Realm instance represents a Realm database.
*
* ```js
* const Realm = require('realm');
* ```
*
*/
class Realm {
/**
* Indicates if this Realm contains any objects.
* @type {boolean}
* @readonly
* @since 1.10.0
*/
get empty() { }
/**
* The path to the file where this Realm is stored.
* @type {string}
* @readonly
* @since 0.12.0
*/
get path() { }
/**
* Indicates if this Realm was opened as read-only.
* @type {boolean}
* @readonly
* @since 0.12.0
*/
get readOnly() { }
/**
* A normalized representation of the schema provided in the
* {@link Realm~Configuration Configuration} when this Realm was constructed.
* @type {Realm~ObjectSchema[]}
* @readonly
* @since 0.12.0
*/
get schema() { }
/**
* The current schema version of this Realm.
* @type {number}
* @readonly
* @since 0.12.0
*/
get schemaVersion() { }
/**
* Indicates if this Realm is in a write transaction.
* @type {boolean}
* @readonly
* @since 1.10.3
*/
get isInTransaction() { }
/**
* Indicates if this Realm has been closed.
* @type {boolean}
* @readonly
* @since 2.1.0
*/
get isClosed() { }
/**
* Gets the sync session if this is a synced Realm
* @type {Session}
*/
get syncSession() { }
/**
* Create a new `Realm` instance using the provided `config`. If a Realm does not yet exist
* at `config.path` (or {@link Realm.defaultPath} if not provided), then this constructor
* will create it with the provided `config.schema` (which is _required_ in this case).
* Otherwise, the instance will access the existing Realm from the file at that path.
* In this case, `config.schema` is _optional_ or not have changed, unless
* `config.schemaVersion` is incremented, in which case the Realm will be automatically
* migrated to use the new schema.
* In the case of query-based sync, `config.schema` is required. An exception will be
* thrown if `config.schema` is not defined.
* @param {Realm~Configuration} [config] - **Required** when first creating the Realm.
* @throws {Error} If anything in the provided `config` is invalid.
* @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened
*/
constructor(config) { }
/**
* Open a Realm asynchronously with a promise. If the Realm is synced, it will be fully
* synchronized before it is available.
* In the case of query-based sync, `config.schema` is required. An exception will be
* thrown if `config.schema` is not defined.
* @param {Realm~Configuration} config - if no config is defined, it will open the default realm
* @returns {ProgressPromise} - a promise that will be resolved with the Realm instance when it's available.
* @throws {Error} If anything in the provided `config` is invalid.
*/
static open(config) { }
/**
* Open a Realm asynchronously with a callback. If the Realm is synced, it will be fully
* synchronized before it is available.
* @param {Realm~Configuration} config
* @param {callback(error, realm)} - will be called when the Realm is ready.
* @param {callback(transferred, transferable)} [progressCallback] - an optional callback for download progress notifications
* @throws {Error} If anything in the provided `config` is invalid
* @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened
*/
static openAsync(config, callback, progressCallback) { }
/**
* Return a configuration for a default synced Realm. The server URL for the user will be used as base for
* the URL for the synced Realm. If no user is supplied, the current user will be used.
* @param {Realm.Sync.User} - an optional sync user
* @throws {Error} if zero or multiple users are logged in
* @returns {Realm~Configuration} - a configuration matching a default synced Realm.
* @since 2.3.0
* @deprecated use {@link Sync.User.createConfiguration()} instead.
*/
static automaticSyncConfiguration(user) { }
/**
* Creates a template object for a Realm model class where all optional fields are `undefined` and all required
* fields have the default value for the given data type, either the value set by the `default` property in the
* schema or the default value for the datatype if the schema doesn't specify one, i.e. `0`, false and `""`.
*
* @param {Realm~ObjectSchema} schema object describing the class
*/
static createTemplateObject(objectSchema) { }
/**
* Closes this Realm so it may be re-opened with a newer schema version.
* All objects and collections from this Realm are no longer valid after calling this method.
*/
close() { }
/**
* Returns the granted privileges.
*
* This combines all privileges granted on the Realm/Class/Object by all Roles which
* the current User is a member of into the final privileges which will
* be enforced by the server.
*
* The privilege calculation is done locally using cached data, and inherently may
* be stale. It is possible that this method may indicate that an operation is
* permitted but the server will still reject it if permission is revoked before
* the changes have been integrated on the server.
*
* Non-synchronized Realms always have permission to perform all operations.
*
* @param {(Realm~ObjectType|Realm.Object)} arg - the object type or the object to compute privileges from. If no
* argument is given, the privileges for the Realm is returned.
* @returns {Realm.Permissions.RealmPrivileges|Realm.Permissions.ClassPrivileges|Realm.Permissions.ObjectPrivileges} as the computed privileges as properties
* @since 2.3.0
* @see {Realm.Permissions} for details of privileges and roles.
*/
privileges(arg) { }
/**
* Returns the fine-grained permissions object associated with either the Realm itself or a Realm model class.
*
* @param {Realm~ObjectType} [arg] - If no argument is provided, the Realm-level permissions are returned.
* Otherwise, the Class-level permissions for the provided type is returned.
* @returns {Realm.Permissions.Realm|Realm.Permissions.Class} The permissions object
* @since 2.18.0
* @see {Realm.Permissions} for details of priviliges and roles.
*/
permissions(arg) { }
/**
* Create a new Realm object of the given type and with the specified properties.
* @param {Realm~ObjectType} type - The type of Realm object to create.
* @param {Object} properties - Property values for all required properties without a
* default value.
* @param {boolean|string} [updateMode='never'] - Optional update mode. It can be one of the following values
* - 'never': Objects are only created. If an existing object exists, an exception is thrown. This is the
* default value.
* - 'all': If an existing object is found, all properties provided will be updated, any other properties will
* remain unchanged.
* - 'modified: If an existing object exists, only properties where the value has actually changed will be
* updated. This improves notifications and server side performance but also have implications for how changes
* across devices are merged. For most use cases, the behaviour will match the intuitive behaviour of how
* changes should be merged, but if updating an entire object is considered an atomic operation, this mode
* should not be used.
* @returns {Realm.Object}
*/
create(type, properties, updateMode) {}
/**
* Deletes the provided Realm object, or each one inside the provided collection.
* @param {Realm.Object|Realm.Object[]|Realm.List|Realm.Results} object
*/
delete(object) { }
/**
* Deletes a Realm model, including all of its objects.
* @param {string} name - the model name
*/
deleteModel(name) { }
/**
* **WARNING:** This will delete **all** objects in the Realm!
*/
deleteAll() { }
/**
* Returns all objects of the given `type` in the Realm.
* @param {Realm~ObjectType} type - The type of Realm objects to retrieve.
* @throws {Error} If type passed into this method is invalid.
* @returns {Realm.Results} that will live-update as objects are created and destroyed.
*/
objects(type) { }
/**
* Searches for a Realm object by its primary key.
* @param {Realm~ObjectType} type - The type of Realm object to search for.
* @param {number|string} key - The primary key value of the object to search for.
* @throws {Error} If type passed into this method is invalid or if the object type did
* not have a `primaryKey` specified in its {@link Realm~ObjectSchema ObjectSchema}.
* @returns {Realm.Object|undefined} if no object is found.
* @since 0.14.0
*/
objectForPrimaryKey(type, key) { }
/**
* Add a listener `callback` for the specified event `name`.
* @param {string} name - The name of event that should cause the callback to be called.
* _Currently, only the "change" and "schema" events are supported_.
* @param {callback(Realm, string)|callback(Realm, string, Schema)} callback - Function to be called when a change event occurs.
* Each callback will only be called once per event, regardless of the number of times
* it was added.
* @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
*/
addListener(name, callback) { }
/**
* Remove the listener `callback` for the specfied event `name`.
* @param {string} name - The event name.
* _Currently, only the "change" and "schema" events are supported_.
* @param {callback(Realm, string)|callback(Realm, string, Schema)} callback - Function that was previously added as a
* listener for this event through the {@link Realm#addListener addListener} method.
* @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
*/
removeListener(name, callback) { }
/**
* Remove all event listeners (restricted to the event `name`, if provided).
* @param {string} [name] - The name of the event whose listeners should be removed.
* _Currently, only the "change" and "schema" events are supported_.
* @throws {Error} When invalid event `name` is supplied
*/
removeAllListeners(name) { }
/**
* Synchronously call the provided `callback` inside a write transaction.
* @param {function()} callback
*/
write(callback) { }
/**
* Initiate a write transaction.
* @throws {Error} When already in write transaction
*/
beginTransaction() { }
/**
* Commit a write transaction.
*/
commitTransaction() { }
/**
* Cancel a write transaction.
*/
cancelTransaction() { }
/**
* Replaces all string columns in this Realm with a string enumeration column and compacts the
* database file.
*
* Cannot be called from a write transaction.
*
* Compaction will not occur if other `Realm` instances exist.
*
* While compaction is in progress, attempts by other threads or processes to open the database will
* wait.
*
* Be warned that resource requirements for compaction is proportional to the amount of live data in
* the database. Compaction works by writing the database contents to a temporary database file and
* then replacing the database with the temporary one.
* @returns {true} if compaction succeeds.
*/
compact() { }
/**
* Writes a compacted copy of the Realm to the given path.
*
* The destination file cannot already exist.
*
* Note that if this method is called from within a write transaction, the current data is written,
* not the data from the point when the previous write transaction was committed.
* @param {string} path path to save the Realm to
* @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Optional 64-byte encryption key to encrypt the new file with.
*/
writeCopyTo(path, encryptionKey) { }
/**
* Get the current schema version of the Realm at the given path.
* @param {string} path - The path to the file where the
* Realm database is stored.
* @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Required only when
* accessing encrypted Realms.
* @throws {Error} When passing an invalid or non-matching encryption key.
* @returns {number} version of the schema, or `-1` if no Realm exists at `path`.
*/
static schemaVersion(path, encryptionKey) { }
/**
* Delete the Realm file for the given configuration.
* @param {Realm~Configuration} config
* @throws {Error} If anything in the provided `config` is invalid.
*/
static deleteFile(config) { }
/**
* Copy all bundled Realm files to app's default file folder.
* This is only implemented for React Native.
* @throws {Error} If an I/O error occured or method is not implemented.
*/
static copyBundledRealmFiles() { }
/**
* Get a list of subscriptions. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - Optional parameter to query for either a specific name or pattern (using
* cards `?` and `*`).
* @throws {Error} If `name` is not a string.
* @returns {Realm.Results} containing all current {@link Realm.Sync.NamedSubscription}s.
*/
subscriptions(name) { }
/**
* Unsubscribe a named subscription. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - The name of the subscription.
* @throws {Error} If `name` is not a string or an empty string.
*/
unsubscribe(name) { }
}
/**
* This describes the different options used to create a {@link Realm} instance.
* @typedef Realm~Configuration
* @type {Object}
* @property {ArrayBuffer|ArrayBufferView} [encryptionKey] - The 512-bit (64-byte) encryption
* key used to encrypt and decrypt all data in the Realm.
* @property {callback(Realm, Realm)} [migration] - The function to run if a migration is needed.
* This function should provide all the logic for converting data models from previous schemas
* to the new schema.
* This function takes two arguments:
* - `oldRealm` - The Realm before migration is performed.
* - `newRealm` - The Realm that uses the latest `schema`, which should be modified as necessary.
* @property {boolean} [deleteRealmIfMigrationNeeded=false] - Specifies if this Realm should be deleted
* if a migration is needed.
* @property {callback(number, number)} [shouldCompactOnLaunch] - The function called when opening
* a Realm for the first time during the life of a process to determine if it should be compacted
* before being returned to the user. The function takes two arguments:
* - `totalSize` - The total file size (data + free space)
* - `usedSize` - The total bytes used by data in the file.
* It returns `true` to indicate that an attempt to compact the file should be made. The compaction
* will be skipped if another process is accessing it.
* @property {string} [path={@link Realm.defaultPath}] - The path to the file where the
* Realm database should be stored.
* @property {string} [fifoFilesFallbackPath] - Opening a Realm creates a number of FIFO special files in order to
* coordinate access to the Realm across threads and processes. If the Realm file is stored in a location
* that does not allow the creation of FIFO special files (e.g. FAT32 filesystems), then the Realm cannot be opened.
* In that case Realm needs a different location to store these files and this property defines that location.
* The FIFO special files are very lightweight and the main Realm file will still be stored in the location defined
* by the `path` property. This property is ignored if the directory defined by `path` allow FIFO special files.
* @property {boolean} [inMemory=false] - Specifies if this Realm should be opened in-memory. This
* still requires a path (can be the default path) to identify the Realm so other processes can
* open the same Realm. The file will also be used as swap space if the Realm becomes bigger than
* what fits in memory, but it is not persistent and will be removed when the last instance
* is closed.
* @property {boolean} [readOnly=false] - Specifies if this Realm should be opened as read-only.
* @property {boolean} [disableFormatUpgrade=false] - Specifies if this Realm's file format should
* be automatically upgraded if it was created with an older version of the Realm library.
* If set to `true` and a file format upgrade is required, an error will be thrown instead.
* @property {Array<Realm~ObjectClass|Realm~ObjectSchema>} [schema] - Specifies all the
* object types in this Realm. **Required** when first creating a Realm at this `path`.
* If omitted, the schema will be read from the existing Realm file.
* @property {number} [schemaVersion] - **Required** (and must be incremented) after
* changing the `schema`.
* @property {Realm.Sync~SyncConfiguration} [sync] - Sync configuration parameters.
*/
/**
* Realm objects will inherit methods, getters, and setters from the `prototype` of this
* constructor. It is **highly recommended** that this constructor inherit from
* {@link Realm.Object}.
* @typedef Realm~ObjectClass
* @type {Class}
* @property {Realm~ObjectSchema} schema - Static property specifying object schema information.
*/
/**
* @typedef Realm~ObjectSchema
* @type {Object}
* @property {string} name - Represents the object type.
* @property {string} [primaryKey] - The name of a `"string"` or `"int"` property
* that must be unique across all objects of this type within the same Realm.
* @property {Object<string, (Realm~PropertyType|Realm~ObjectSchemaProperty)>} properties -
* An object where the keys are property names and the values represent the property type.
*
* @example
* let MyClassSchema = {
* name: 'MyClass',
* primaryKey: 'pk',
* properties: {
* pk: 'int',
* optionalFloatValue: 'float?' // or {type: 'float', optional: true}
* listOfStrings: 'string[]',
* listOfOptionalDates: 'date?[]',
* indexedInt: {type: 'int', indexed: true}
*
* linkToObject: 'MyClass',
* listOfObjects: 'MyClass[]', // or {type: 'list', objectType: 'MyClass'}
* objectsLinkingToThisObject: {type: 'linkingObjects', objectType: 'MyClass', property: 'linkToObject'}
* }
* };
*/
/**
* @typedef Realm~ObjectSchemaProperty
* @type {Object}
* @property {Realm~PropertyType} type - The type of this property.
* @property {Realm~PropertyType} [objectType] - **Required** when `type` is `"list"` or `"linkingObjects"`,
* and must match the type of an object in the same schema, or, for `"list"`
* only, any other type which may be stored as a Realm property.
* @property {string} [property] - **Required** when `type` is `"linkingObjects"`, and must match
* the name of a property on the type specified in `objectType` that links to the type this property belongs to.
* @property {any} [default] - The default value for this property on creation when not
* otherwise specified.
* @property {boolean} [optional] - Signals if this property may be assigned `null` or `undefined`.
* For `"list"` properties of non-object types, this instead signals whether the values inside the list may be assigned `null` or `undefined`.
* This is not supported for `"list"` properties of object types and `"linkingObjects"` properties.
* @property {boolean} [indexed] - Signals if this property should be indexed. Only supported for
* `"string"`, `"int"`, and `"bool"` properties.
* @property {string} [mapTo] - Set this to the name of the underlying property in the Realm file if the Javascript property
* name is different than the name used in the Realm file. This can e.g. be used to have different naming convention in
* Javascript than what is being used in the Realm file. Reading and writing properties must be done using the public
* name. Queries can be done using both the public and the underlying property name.
*/
/**
* The type of an object may either be specified as a string equal to the `name` in a
* {@link Realm~ObjectSchema ObjectSchema} definition, **or** a constructor that was specified
* in the {@link Realm~Configuration configuration} `schema`.
* @typedef Realm~ObjectType
* @type {string|Realm~ObjectClass}
*/
/**
* A property type may be specified as one of the standard builtin types, or as
* an object type inside the same schema.
*
* When specifying property types in an {@linkplain Realm~ObjectSchema object schema}, you
* may append `?` to any of the property types to indicate that it is optional
* (i.e. it can be `null` in addition to the normal values) and `[]` to
* indicate that it is instead a list of that type. For example,
* `optionalIntList: 'int?[]'` would declare a property which is a list of
* nullable integers. The property types reported by {@linkplain Realm.Collection
* collections} and in a Realm's schema will never
* use these forms.
*
* @typedef Realm~PropertyType
* @type {("bool"|"int"|"float"|"double"|"string"|"date"|"data"|"list"|"linkingObjects"|"<ObjectType>")}
*
* @property {boolean} "bool" - Property value may either be `true` or `false`.
* @property {number} "int" - Property may be assigned any number, but will be stored as a
* round integer, meaning anything after the decimal will be truncated.
* @property {number} "float" - Property may be assigned any number, but will be stored as a
* `float`, which may result in a loss of precision.
* @property {number} "double" - Property may be assigned any number, and will have no loss
* of precision.
* @property {string} "string" - Property value may be any arbitrary string.
* @property {Date} "date" - Property may be assigned any `Date` instance.
* @property {ArrayBuffer} "data" - Property may either be assigned an `ArrayBuffer`
* or `ArrayBufferView` (e.g. `DataView`, `Int8Array`, `Float32Array`, etc.) instance,
* but will always be returned as an `ArrayBuffer`.
* @property {Realm.List} "list" - Property may be assigned any ordered collection
* (e.g. `Array`, {@link Realm.List}, {@link Realm.Results}) of objects all matching the
* `objectType` specified in the {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}.
* @property {Realm.Results} "linkingObjects" - Property is read-only and always returns a {@link Realm.Results}
* of all the objects matching the `objectType` that are linking to the current object
* through the `property` relationship specified in {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}.
* @property {Realm.Object} "<ObjectType>" - A string that matches the `name` of an object in the
* same schema (see {@link Realm~ObjectSchema ObjectSchema}) – this property may be assigned
* any object of this type from inside the same Realm, and will always be _optional_
* (meaning it may also be assigned `null` or `undefined`).
*/
| 1 | 17,866 | does it return something? | realm-realm-js | js |
@@ -265,7 +265,7 @@ const withData = (
// If we have an error, display the DataErrorComponent.
if ( error ) {
- return ( 'string' !== typeof error ) ? error : getDataErrorComponent( moduleName, error, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid );
+ return ( 'string' !== typeof error ) ? error : getDataErrorComponent( moduleName, error, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid, data );
}
// If we have zeroData, display the NoDataComponent. | 1 | /**
* withData higher-order component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { each } from 'lodash';
/**
* WordPress dependencies
*/
import { addFilter, addAction } from '@wordpress/hooks';
import { Component } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import { getModulesData } from '../../util';
import getNoDataComponent from '../notifications/nodata';
import getDataErrorComponent from '../notifications/data-error';
import getSetupIncompleteComponent from '../notifications/setup-incomplete';
/**
* A Higher order Component that provides data functionality to Components.
*
* This function takes a React Component that is data dependent, resolving via the data API.
*
* Automatically detects data errors, displaying an error CTA Component. Components can extend the default
* error handling to enable custom error messaging or data shapes.
*
* Components can provide a callback that checks if the data is "zero" - typically when an account is newly established and not yet providing data. In most cases the API returns all 0s, however some APIs may return empty strings or null.
*
* Components can optionally include `handleDataError` and `handleDataSuccess` function as props. `handleDataError` will be
* called with the error message string if there is a data error and called with no string if the data is empty.
* `handleDataSuccess` will be called when data resolves correctly.
*
* @param {WPElement} DataDependentComponent The React Component to render once we have its required data.
* @param {Array} selectData An array of data objects to resolve.
* Each object includes the following properties:
* {string} type The data type. Either 'core' or 'modules'.
* {string} identifier The data identifier, for example a module slug.
* {string} datapoint The datapoint.
* {Object?} data Optional arguments to pass along.
* {number} priority The data request priority, used for batching.
* {number} maxAge How long to cache the data results.
* {string | array} context The context(s) to resolve data, eg 'Dashboard'.
*
* @param {WPElement} loadingComponent A React Component to render while the data is resolving.
* @param {Object} layoutOptions An object with layout options that are passed to the getNoDataComponent and getDataErrorComponent components.
* @param {Function} isDataZero A callback function that is passed the resolved data and returns true
* if the data is "zero".
* @param {Function} getDataError A callback function that is passed the resolved data and returns the
* error message.
*
* @return {WPElement} Component Returns React.Components based on data and state.
* If has data Return DataDependentComponent with data.
* has no data Fallback message when no data.
* in loading state Return loadingComponent.
* has an error Returns error.
*
*/
const withData = (
DataDependentComponent,
selectData,
loadingComponent = null,
layoutOptions = {
inGrid: false,
fullWidth: false,
createGrid: false,
},
// The default isDataZero handler always returns false, Components must define when data is zero.
// `isDataZero` is passed `returnedData`and `datapoint`.
isDataZero = () => {
return false;
},
// The default getDataError handler detects data.error and extracts the message from data.error.message or data.error.errors[0].message.
getDataError = ( data ) => {
if ( data && data.error ) {
if ( data.error.message ) {
return data.error.message;
}
if ( data.error.errors && data.error.errors[ 0 ] && data.error.errors[ 0 ].message ) {
return data.error.errors[ 0 ].message;
}
return __( 'Unidentified error', 'google-site-kit' );
}
if ( data && data.errors && data.errors[ 0 ] && data.errors[ 0 ].message ) {
return data.errors[ 0 ].message;
}
if ( data && data.error_data ) {
const errors = Object.values( data.error_data );
// Catch RateLimitExceeded specifically.
if ( errors[ 0 ] && 'RateLimitExceeded' === errors[ 0 ].reason ) {
return __( 'Too many requests have been sent within a given time span. Please reload this page again in a few seconds', 'google-site-kit' );
}
}
if ( data && data.errors ) {
const errors = Object.values( data.errors );
if ( errors[ 0 ] && errors[ 0 ][ 0 ] ) {
return errors[ 0 ][ 0 ];
}
}
// If error is the root of the response, ensure all expected parts are
// present, just to "be sure" that it is an error. All above error
// handlers are legacy and are likely never hit, but let's keep them
// because nobody will ever know.
if ( data.code && data.message && data.data && data.data.status ) {
return data.message;
}
// No error.
return false;
}
) => {
// ...and returns another component...
return class NewComponent extends Component {
constructor( props ) {
super( props );
this.state = {
data: false,
zeroData: false,
error: false,
};
addAction(
'googlesitekit.moduleDataReset',
'googlesitekit.moduleDataResetHandler',
() => {
this.setState( { data: false } );
}
);
/**
* Handle a single datapoint returned from the data API.
*
* Each resolved data point is passed thru this handler to detect errors and zero data conditions, and
* to trigger `handleDataError` and `handleDataSuccess` helpers.
*
* @param {Object} returnedData The data returned from the API.
* @param {Object} requestData The data object for the request.
*/
const handleReturnedData = ( returnedData, requestData ) => {
// If available, `handleDataError` will be called for errors (with a string) and empty data.
const {
handleDataError,
handleDataSuccess,
} = this.props;
const { datapoint, identifier, toState } = requestData;
// Check to see if the returned data is an error. If so, getDataError will return a string.
const error = getDataError( returnedData );
if ( error ) {
// Set an error state on the Component.
this.setState( {
error,
module: identifier,
} );
// If the Component included a `handleDataError` helper, pass it the error message.
if ( handleDataError ) {
handleDataError( error );
}
} else if ( isDataZero( returnedData, datapoint, requestData ) ) { // No data error, next check for zero data.
// If we have a `handleDataError` call it without any parameters (indicating empty data).
if ( handleDataError ) {
handleDataError( error );
}
// Set a zeroData state on the Component.
this.setState( { zeroData: true } );
} else if ( handleDataSuccess ) {
// Success! `handleDataSuccess` will be called (ie. not error or zero).
handleDataSuccess();
}
// Resolve the returned data my setting state on the Component.
this.setState( {
requestDataToState: toState,
data: returnedData,
datapoint,
module: identifier,
} );
};
// Resolve all selectedData.
each( selectData, ( data ) => {
// Handle single contexts, or arrays of contexts.
if ( Array.isArray( data.context ) ) {
each( data.context, ( acontext ) => {
/**
* Request data for the context.
*/
addFilter( `googlesitekit.module${ acontext }DataRequest`,
`googlesitekit.data${ acontext }`, ( moduleData ) => {
data.callback = ( returnedData ) => {
handleReturnedData( returnedData, data );
};
moduleData.push( data );
return moduleData;
} );
} );
} else {
/**
* Request data for the context.
*/
addFilter( `googlesitekit.module${ data.context }DataRequest`,
`googlesitekit.data${ data.context }`, ( moduleData ) => {
data.callback = ( returnedData ) => {
handleReturnedData( returnedData, data );
};
moduleData.push( data );
return moduleData;
} );
}
} );
}
render() {
const {
data,
datapoint,
module,
zeroData,
error,
requestDataToState,
} = this.state;
// Render the loading component until we have data.
if ( ! data ) {
return loadingComponent;
}
const modulesData = getModulesData();
const moduleName = module ? modulesData[ module ].name : __( 'Site Kit', 'google-site-kit' );
// If module is active but setup not complete.
if ( module && modulesData[ module ].active && ! modulesData[ module ].setupComplete ) {
return getSetupIncompleteComponent( module, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid );
}
// If we have an error, display the DataErrorComponent.
if ( error ) {
return ( 'string' !== typeof error ) ? error : getDataErrorComponent( moduleName, error, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid );
}
// If we have zeroData, display the NoDataComponent.
if ( zeroData ) {
return getNoDataComponent( moduleName, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid );
}
// Render the Component when we have data, passing the datapoint.
return (
<DataDependentComponent
data={ data }
datapoint={ datapoint }
requestDataToState={ requestDataToState }
{ ...this.props }
/>
);
}
};
};
export default withData;
| 1 | 30,436 | This function is also called in various Site Kit module components, where the error object (here `data`) also needs to be passed, otherwise the link to fix the issue won't appear. | google-site-kit-wp | js |
@@ -14,6 +14,8 @@ var log = logging.MustGetLogger("scm")
// An SCM represents an SCM implementation that we can ask for various things.
type SCM interface {
+ // DescribeIdentifier returns the string that is a "human-readable" identifier of the given revision.
+ DescribeIdentifier(sha string) string
// CurrentRevIdentifier returns the string that specifies what the current revision is.
CurrentRevIdentifier() string
// ChangesIn returns a list of modified files in the given diffSpec. | 1 | // Package scm abstracts operations on various tools like git
// Currently, only git is supported.
package scm
import (
"path"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/fs"
)
var log = logging.MustGetLogger("scm")
// An SCM represents an SCM implementation that we can ask for various things.
type SCM interface {
// CurrentRevIdentifier returns the string that specifies what the current revision is.
CurrentRevIdentifier() string
// ChangesIn returns a list of modified files in the given diffSpec.
ChangesIn(diffSpec string, relativeTo string) []string
// ChangedFiles returns a list of modified files since the given commit, optionally including untracked files.
ChangedFiles(fromCommit string, includeUntracked bool, relativeTo string) []string
// IgnoreFile marks a file to be ignored by the SCM.
IgnoreFile(name string) error
// Remove deletes the given files from the SCM.
Remove(names []string) error
// ChangedLines returns the set of lines that have been modified,
// as a map of filename -> affected line numbers.
ChangedLines() (map[string][]int, error)
// Checkout checks out the given revision.
Checkout(revision string) error
}
// New returns a new SCM instance for this repo root.
// It returns nil if there is no known implementation there.
func New(repoRoot string) SCM {
if fs.PathExists(path.Join(repoRoot, ".git")) {
return &git{repoRoot: repoRoot}
}
return nil
}
// NewFallback returns a new SCM instance for this repo root.
// If there is no known implementation it returns a stub.
func NewFallback(repoRoot string) SCM {
if scm := New(repoRoot); scm != nil {
return scm
}
log.Warning("Cannot determine SCM, revision identifiers will be unavailable and `plz query changes/changed` will not work correctly.")
return &stub{}
}
// MustNew returns a new SCM instance for this repo root. It dies on any errors.
func MustNew(repoRoot string) SCM {
scm := New(repoRoot)
if scm == nil {
log.Fatalf("Cannot determine SCM implementation")
}
return scm
}
| 1 | 8,931 | super nit: `revision string` (the passed value might not be a SHA hash). | thought-machine-please | go |
@@ -54,7 +54,7 @@ namespace NLog.Layouts
/// Is this layout initialized? See <see cref="Initialize(NLog.Config.LoggingConfiguration)"/>
/// </summary>
internal bool IsInitialized;
- private bool _scannedForObjects;
+ internal bool _scannedForObjects;
/// <summary>
/// Gets a value indicating whether this layout is thread-agnostic (can be rendered on any thread). | 1 | //
// Copyright (c) 2004-2021 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Layouts
{
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using NLog.Config;
using NLog.Internal;
using NLog.Common;
using JetBrains.Annotations;
/// <summary>
/// Abstract interface that layouts must implement.
/// </summary>
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Naming", "CA1724:TypeNamesShouldNotMatchNamespaces", Justification = "Few people will see this conflict.")]
[NLogConfigurationItem]
public abstract class Layout : ISupportsInitialize, IRenderable
{
/// <summary>
/// Is this layout initialized? See <see cref="Initialize(NLog.Config.LoggingConfiguration)"/>
/// </summary>
internal bool IsInitialized;
private bool _scannedForObjects;
/// <summary>
/// Gets a value indicating whether this layout is thread-agnostic (can be rendered on any thread).
/// </summary>
/// <remarks>
/// Layout is thread-agnostic if it has been marked with [ThreadAgnostic] attribute and all its children are
/// like that as well.
///
/// Thread-agnostic layouts only use contents of <see cref="LogEventInfo"/> for its output.
/// </remarks>
internal bool ThreadAgnostic { get; set; }
internal bool ThreadSafe { get; set; }
internal bool MutableUnsafe { get; set; }
/// <summary>
/// Gets the level of stack trace information required for rendering.
/// </summary>
internal StackTraceUsage StackTraceUsage { get; private set; }
private const int MaxInitialRenderBufferLength = 16384;
private int _maxRenderedLength;
/// <summary>
/// Gets the logging configuration this target is part of.
/// </summary>
[CanBeNull]
protected internal LoggingConfiguration LoggingConfiguration { get; private set; }
/// <summary>
/// Converts a given text to a <see cref="Layout" />.
/// </summary>
/// <param name="text">Text to be converted.</param>
/// <returns><see cref="SimpleLayout"/> object represented by the text.</returns>
public static implicit operator Layout([Localizable(false)] string text)
{
return FromString(text, ConfigurationItemFactory.Default);
}
/// <summary>
/// Implicitly converts the specified string to a <see cref="SimpleLayout"/>.
/// </summary>
/// <param name="layoutText">The layout string.</param>
/// <returns>Instance of <see cref="SimpleLayout"/>.</returns>'
public static Layout FromString(string layoutText)
{
return FromString(layoutText, ConfigurationItemFactory.Default);
}
/// <summary>
/// Implicitly converts the specified string to a <see cref="SimpleLayout"/>.
/// </summary>
/// <param name="layoutText">The layout string.</param>
/// <param name="configurationItemFactory">The NLog factories to use when resolving layout renderers.</param>
/// <returns>Instance of <see cref="SimpleLayout"/>.</returns>
public static Layout FromString(string layoutText, ConfigurationItemFactory configurationItemFactory)
{
return new SimpleLayout(layoutText, configurationItemFactory);
}
/// <summary>
/// Implicitly converts the specified string to a <see cref="SimpleLayout"/>.
/// </summary>
/// <param name="layoutText">The layout string.</param>
/// <param name="throwConfigExceptions">Whether <see cref="NLogConfigurationException"/> should be thrown on parse errors (false = replace unrecognized tokens with a space).</param>
/// <returns>Instance of <see cref="SimpleLayout"/>.</returns>
public static Layout FromString(string layoutText, bool throwConfigExceptions)
{
try
{
return new SimpleLayout(layoutText, ConfigurationItemFactory.Default, throwConfigExceptions);
}
catch (NLogConfigurationException)
{
throw;
}
catch (Exception ex)
{
if (!throwConfigExceptions || ex.MustBeRethrownImmediately())
throw;
throw new NLogConfigurationException($"Invalid Layout: {layoutText}", ex);
}
}
/// <summary>
/// Create a <see cref="SimpleLayout"/> from a lambda method.
/// </summary>
/// <param name="layoutMethod">Method that renders the layout.</param>
/// <param name="options">Tell if method is safe for concurrent threading.</param>
/// <returns>Instance of <see cref="SimpleLayout"/>.</returns>
public static Layout FromMethod(Func<LogEventInfo, object> layoutMethod, LayoutRenderOptions options = LayoutRenderOptions.None)
{
if (layoutMethod == null)
throw new ArgumentNullException(nameof(layoutMethod));
#if !NETSTANDARD1_3 && !NETSTANDARD1_5
var name = $"{layoutMethod.Method?.DeclaringType?.ToString()}.{layoutMethod.Method?.Name}";
#else
var name = $"{layoutMethod.Target?.ToString()}";
#endif
var layoutRenderer = CreateFuncLayoutRenderer((l, c) => layoutMethod(l), options, name);
return new SimpleLayout(new[] { layoutRenderer }, layoutRenderer.LayoutRendererName, ConfigurationItemFactory.Default);
}
internal static LayoutRenderers.FuncLayoutRenderer CreateFuncLayoutRenderer(Func<LogEventInfo, LoggingConfiguration, object> layoutMethod, LayoutRenderOptions options, string name)
{
if ((options & LayoutRenderOptions.ThreadAgnostic) == LayoutRenderOptions.ThreadAgnostic)
return new LayoutRenderers.FuncThreadAgnosticLayoutRenderer(name, layoutMethod);
else if ((options & LayoutRenderOptions.ThreadSafe) != 0)
return new LayoutRenderers.FuncThreadSafeLayoutRenderer(name, layoutMethod);
else
return new LayoutRenderers.FuncLayoutRenderer(name, layoutMethod);
}
/// <summary>
/// Precalculates the layout for the specified log event and stores the result
/// in per-log event cache.
///
/// Only if the layout doesn't have [ThreadAgnostic] and doesn't contain layouts with [ThreadAgnostic].
/// </summary>
/// <param name="logEvent">The log event.</param>
/// <remarks>
/// Calling this method enables you to store the log event in a buffer
/// and/or potentially evaluate it in another thread even though the
/// layout may contain thread-dependent renderer.
/// </remarks>
public virtual void Precalculate(LogEventInfo logEvent)
{
if (!ThreadAgnostic || MutableUnsafe)
{
Render(logEvent);
}
}
/// <summary>
/// Renders the event info in layout.
/// </summary>
/// <param name="logEvent">The event info.</param>
/// <returns>String representing log event.</returns>
public string Render(LogEventInfo logEvent)
{
if (!IsInitialized)
{
Initialize(LoggingConfiguration);
}
if (!ThreadAgnostic || MutableUnsafe)
{
object cachedValue;
if (logEvent.TryGetCachedLayoutValue(this, out cachedValue))
{
return cachedValue?.ToString() ?? string.Empty;
}
}
string layoutValue = GetFormattedMessage(logEvent) ?? string.Empty;
if (!ThreadAgnostic || MutableUnsafe)
{
// Would be nice to only do this in Precalculate(), but we need to ensure internal cache
// is updated for for custom Layouts that overrides Precalculate (without calling base.Precalculate)
logEvent.AddCachedLayoutValue(this, layoutValue);
}
return layoutValue;
}
internal virtual void PrecalculateBuilder(LogEventInfo logEvent, StringBuilder target)
{
Precalculate(logEvent); // Allow custom Layouts to also work
}
/// <summary>
/// Optimized version of <see cref="Render(LogEventInfo)"/> for internal Layouts. Works best
/// when override of <see cref="RenderFormattedMessage(LogEventInfo, StringBuilder)"/> is available.
/// </summary>
/// <param name="logEvent">The event info.</param>
/// <param name="target">Appends the string representing log event to target</param>
/// <param name="cacheLayoutResult">Should rendering result be cached on LogEventInfo</param>
internal void RenderAppendBuilder(LogEventInfo logEvent, StringBuilder target, bool cacheLayoutResult = false)
{
if (!IsInitialized)
{
Initialize(LoggingConfiguration);
}
if (!ThreadAgnostic || MutableUnsafe)
{
object cachedValue;
if (logEvent.TryGetCachedLayoutValue(this, out cachedValue))
{
target.Append(cachedValue?.ToString() ?? string.Empty);
return;
}
}
else
{
cacheLayoutResult = false;
}
using (var localTarget = new AppendBuilderCreator(target, cacheLayoutResult))
{
RenderFormattedMessage(logEvent, localTarget.Builder);
if (cacheLayoutResult)
{
// when needed as it generates garbage
logEvent.AddCachedLayoutValue(this, localTarget.Builder.ToString());
}
}
}
/// <summary>
/// Valid default implementation of <see cref="GetFormattedMessage" />, when having implemented the optimized <see cref="RenderFormattedMessage"/>
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <param name="reusableBuilder">StringBuilder to help minimize allocations [optional].</param>
/// <returns>The rendered layout.</returns>
internal string RenderAllocateBuilder(LogEventInfo logEvent, StringBuilder reusableBuilder = null)
{
int initialLength = _maxRenderedLength;
if (initialLength > MaxInitialRenderBufferLength)
{
initialLength = MaxInitialRenderBufferLength;
}
var sb = reusableBuilder ?? new StringBuilder(initialLength);
RenderFormattedMessage(logEvent, sb);
if (sb.Length > _maxRenderedLength)
{
_maxRenderedLength = sb.Length;
}
return sb.ToString();
}
/// <summary>
/// Renders the layout for the specified logging event by invoking layout renderers.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <param name="target"><see cref="StringBuilder"/> for the result</param>
protected virtual void RenderFormattedMessage(LogEventInfo logEvent, StringBuilder target)
{
target.Append(GetFormattedMessage(logEvent) ?? string.Empty);
}
/// <summary>
/// Initializes this instance.
/// </summary>
/// <param name="configuration">The configuration.</param>
void ISupportsInitialize.Initialize(LoggingConfiguration configuration)
{
Initialize(configuration);
}
/// <summary>
/// Closes this instance.
/// </summary>
void ISupportsInitialize.Close()
{
Close();
}
/// <summary>
/// Initializes this instance.
/// </summary>
/// <param name="configuration">The configuration.</param>
internal void Initialize(LoggingConfiguration configuration)
{
if (!IsInitialized)
{
LoggingConfiguration = configuration;
IsInitialized = true;
_scannedForObjects = false;
PropertyHelper.CheckRequiredParameters(this);
InitializeLayout();
if (!_scannedForObjects)
{
InternalLogger.Debug("{0} Initialized Layout done but not scanned for objects", GetType());
PerformObjectScanning();
}
}
}
internal void PerformObjectScanning()
{
var objectGraphScannerList = ObjectGraphScanner.FindReachableObjects<IRenderable>(true, this);
var objectGraphTypes = new HashSet<Type>(objectGraphScannerList.Select(o => o.GetType()));
objectGraphTypes.Remove(typeof(SimpleLayout));
objectGraphTypes.Remove(typeof(NLog.LayoutRenderers.LiteralLayoutRenderer));
// determine whether the layout is thread-agnostic
// layout is thread agnostic if it is thread-agnostic and
// all its nested objects are thread-agnostic.
ThreadAgnostic = objectGraphTypes.All(t => t.IsDefined(typeof(ThreadAgnosticAttribute), true));
ThreadSafe = objectGraphTypes.All(t => t.IsDefined(typeof(ThreadSafeAttribute), true));
MutableUnsafe = objectGraphTypes.Any(t => t.IsDefined(typeof(MutableUnsafeAttribute), true));
if ((ThreadAgnostic || !MutableUnsafe) && objectGraphScannerList.Count > 1 && objectGraphTypes.Count > 0)
{
foreach (var nestedLayout in objectGraphScannerList.OfType<Layout>())
{
if (!ReferenceEquals(nestedLayout, this))
{
nestedLayout.Initialize(LoggingConfiguration);
ThreadAgnostic = nestedLayout.ThreadAgnostic && ThreadAgnostic;
MutableUnsafe = nestedLayout.MutableUnsafe || MutableUnsafe;
}
}
}
// determine the max StackTraceUsage, to decide if Logger needs to capture callsite
StackTraceUsage = StackTraceUsage.None; // In case this Layout should implement IUsesStackTrace
StackTraceUsage = objectGraphScannerList.OfType<IUsesStackTrace>().DefaultIfEmpty().Max(item => item?.StackTraceUsage ?? StackTraceUsage.None);
_scannedForObjects = true;
}
/// <summary>
/// Closes this instance.
/// </summary>
internal void Close()
{
if (IsInitialized)
{
LoggingConfiguration = null;
IsInitialized = false;
CloseLayout();
}
}
/// <summary>
/// Initializes the layout.
/// </summary>
protected virtual void InitializeLayout()
{
PerformObjectScanning();
}
/// <summary>
/// Closes the layout.
/// </summary>
protected virtual void CloseLayout()
{
}
/// <summary>
/// Renders the layout for the specified logging event by invoking layout renderers.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <returns>The rendered layout.</returns>
protected abstract string GetFormattedMessage(LogEventInfo logEvent);
/// <summary>
/// Register a custom Layout.
/// </summary>
/// <remarks>Short-cut for registering to default <see cref="ConfigurationItemFactory"/></remarks>
/// <typeparam name="T"> Type of the Layout.</typeparam>
/// <param name="name"> Name of the Layout.</param>
public static void Register<T>(string name)
where T : Layout
{
var layoutRendererType = typeof(T);
Register(name, layoutRendererType);
}
/// <summary>
/// Register a custom Layout.
/// </summary>
/// <remarks>Short-cut for registering to default <see cref="ConfigurationItemFactory"/></remarks>
/// <param name="layoutType"> Type of the Layout.</param>
/// <param name="name"> Name of the Layout.</param>
public static void Register(string name, Type layoutType)
{
ConfigurationItemFactory.Default.Layouts
.RegisterDefinition(name, layoutType);
}
/// <summary>
/// Optimized version of <see cref="Precalculate(LogEventInfo)"/> for internal Layouts, when
/// override of <see cref="RenderFormattedMessage(LogEventInfo, StringBuilder)"/> is available.
/// </summary>
internal void PrecalculateBuilderInternal(LogEventInfo logEvent, StringBuilder target)
{
if (!ThreadAgnostic || MutableUnsafe)
{
RenderAppendBuilder(logEvent, target, true);
}
}
internal string ToStringWithNestedItems<T>(IList<T> nestedItems, Func<T, string> nextItemToString)
{
if (nestedItems?.Count > 0)
{
var nestedNames = nestedItems.Select(nextItemToString).ToArray();
return string.Concat(GetType().Name, "=", string.Join("|", nestedNames));
}
return base.ToString();
}
/// <summary>
/// Try get value
/// </summary>
/// <param name="logEvent"></param>
/// <param name="rawValue">rawValue if return result is true</param>
/// <returns>false if we could not determine the rawValue</returns>
internal virtual bool TryGetRawValue(LogEventInfo logEvent, out object rawValue)
{
rawValue = null;
return false;
}
/// <summary>
/// Resolve from DI <see cref="LogFactory.ServiceRepository"/>
/// </summary>
/// <remarks>Avoid calling this while handling a LogEvent, since random deadlocks can occur</remarks>
protected T ResolveService<T>() where T : class
{
return LoggingConfiguration.GetServiceProvider().ResolveService<T>(IsInitialized);
}
}
}
| 1 | 22,595 | @snakefoot why is internal needed? | NLog-NLog | .cs |
@@ -190,11 +190,14 @@ func (s *server) startService() common.Daemon {
}
params.PublicClient, err = sdkclient.NewClient(sdkclient.Options{
- HostPort: s.cfg.PublicClient.HostPort,
- Namespace: common.SystemLocalNamespace,
- MetricsScope: params.MetricScope,
- Logger: l.NewZapAdapter(zapLogger),
- ConnectionOptions: sdkclient.ConnectionOptions{TLS: options},
+ HostPort: s.cfg.PublicClient.HostPort,
+ Namespace: common.SystemLocalNamespace,
+ MetricsScope: params.MetricScope,
+ Logger: l.NewZapAdapter(zapLogger),
+ ConnectionOptions: sdkclient.ConnectionOptions{
+ TLS: options,
+ DisableHealthCheck: true,
+ },
})
if err != nil {
log.Fatalf("failed to create public client: %v", err) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package temporal
import (
"log"
"time"
sdkclient "go.temporal.io/sdk/client"
"go.uber.org/zap"
"go.temporal.io/server/api/persistenceblobs/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/archiver"
"go.temporal.io/server/common/archiver/provider"
"go.temporal.io/server/common/authorization"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/elasticsearch"
l "go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/loggerimpl"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/messaging"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
persistenceClient "go.temporal.io/server/common/persistence/client"
"go.temporal.io/server/common/primitives"
"go.temporal.io/server/common/resource"
"go.temporal.io/server/common/rpc"
"go.temporal.io/server/common/rpc/encryption"
"go.temporal.io/server/common/service/config"
"go.temporal.io/server/common/service/config/ringpop"
"go.temporal.io/server/common/service/dynamicconfig"
"go.temporal.io/server/service/frontend"
"go.temporal.io/server/service/history"
"go.temporal.io/server/service/matching"
"go.temporal.io/server/service/worker"
)
type (
server struct {
name string
cfg *config.Config
doneC chan struct{}
daemon common.Daemon
}
)
// newServer returns a new instance of a daemon
// that represents a temporal service
func newServer(service string, cfg *config.Config) common.Daemon {
return &server{
cfg: cfg,
name: service,
doneC: make(chan struct{}),
}
}
// Start starts the server
func (s *server) Start() {
if _, ok := s.cfg.Services[s.name]; !ok {
log.Fatalf("`%v` service missing config", s)
}
s.daemon = s.startService()
}
// Stop stops the server
func (s *server) Stop() {
if s.daemon == nil {
return
}
select {
case <-s.doneC:
default:
s.daemon.Stop()
select {
case <-s.doneC:
case <-time.After(time.Minute):
log.Printf("timed out waiting for server %v to exit\n", s.name)
}
}
}
// startService starts a service with the given name and config
func (s *server) startService() common.Daemon {
var err error
params := resource.BootstrapParams{}
params.Name = s.name
params.Logger = loggerimpl.NewLogger(s.cfg.Log.NewZapLogger())
params.PersistenceConfig = s.cfg.Persistence
params.DynamicConfig, err = dynamicconfig.NewFileBasedClient(&s.cfg.DynamicConfigClient, params.Logger.WithTags(tag.Service(params.Name)), s.doneC)
if err != nil {
log.Printf("error creating file based dynamic config client, use no-op config client instead. error: %v", err)
params.DynamicConfig = dynamicconfig.NewNopClient()
}
dc := dynamicconfig.NewCollection(params.DynamicConfig, params.Logger)
err = ringpop.ValidateRingpopConfig(&s.cfg.Global.Membership)
if err != nil {
log.Fatalf("Ringpop config validation error - %v", err)
}
tlsFactory, err := encryption.NewTLSConfigProviderFromConfig(s.cfg.Global.TLS)
if err != nil {
log.Fatalf("error initializing TLS provider: %v", err)
}
svcCfg := s.cfg.Services[s.name]
params.MetricScope = svcCfg.Metrics.NewScope(params.Logger)
params.RPCFactory = rpc.NewFactory(&svcCfg.RPC, params.Name, params.Logger, tlsFactory)
// Ringpop uses a different port to register handlers, this map is needed to resolve
// services to correct addresses used by clients through ServiceResolver lookup API
servicePortMap := make(map[string]int)
for roleName, svcCfg := range s.cfg.Services {
serviceName := roleName
servicePortMap[serviceName] = svcCfg.RPC.GRPCPort
}
params.MembershipFactoryInitializer =
func(persistenceBean persistenceClient.Bean, logger l.Logger) (resource.MembershipMonitorFactory, error) {
return ringpop.NewRingpopFactory(
&s.cfg.Global.Membership,
params.RPCFactory.GetRingpopChannel(),
params.Name,
servicePortMap,
logger,
persistenceBean.GetClusterMetadataManager(),
)
}
params.DCRedirectionPolicy = s.cfg.DCRedirectionPolicy
params.MetricsClient = metrics.NewClient(params.MetricScope, metrics.GetMetricsServiceIdx(params.Name, params.Logger))
clusterMetadata := s.cfg.ClusterMetadata
// This call performs a config check against the configured persistence store for immutable cluster metadata.
// If there is a mismatch, the persisted values take precedence and will be written over in the config objects.
// This is to keep this check hidden from independent downstream daemons and keep this in a single place.
immutableClusterMetadataInitialization(params.Logger, dc, ¶ms.PersistenceConfig, ¶ms.AbstractDatastoreFactory, ¶ms.MetricsClient, clusterMetadata)
params.ClusterMetadata = cluster.NewMetadata(
params.Logger,
dc.GetBoolProperty(dynamicconfig.EnableGlobalNamespace, clusterMetadata.EnableGlobalNamespace),
clusterMetadata.FailoverVersionIncrement,
clusterMetadata.MasterClusterName,
clusterMetadata.CurrentClusterName,
clusterMetadata.ClusterInformation,
clusterMetadata.ReplicationConsumer,
)
if s.cfg.PublicClient.HostPort == "" {
log.Fatal("need to provide an endpoint config for PublicClient")
} else {
zapLogger, err := zap.NewProduction()
if err != nil {
log.Fatalf("failed to initialize zap logger: %v", err)
}
options, err := tlsFactory.GetFrontendClientConfig()
if err != nil {
log.Fatalf("unable to load frontend tls configuration: %v", err)
}
params.PublicClient, err = sdkclient.NewClient(sdkclient.Options{
HostPort: s.cfg.PublicClient.HostPort,
Namespace: common.SystemLocalNamespace,
MetricsScope: params.MetricScope,
Logger: l.NewZapAdapter(zapLogger),
ConnectionOptions: sdkclient.ConnectionOptions{TLS: options},
})
if err != nil {
log.Fatalf("failed to create public client: %v", err)
}
}
advancedVisMode := dc.GetStringProperty(
dynamicconfig.AdvancedVisibilityWritingMode,
common.GetDefaultAdvancedVisibilityWritingMode(params.PersistenceConfig.IsAdvancedVisibilityConfigExist()),
)()
isAdvancedVisEnabled := advancedVisMode != common.AdvancedVisibilityWritingModeOff
if params.ClusterMetadata.IsGlobalNamespaceEnabled() {
params.MessagingClient = messaging.NewKafkaClient(&s.cfg.Kafka, params.MetricsClient, zap.NewNop(), params.Logger, params.MetricScope, true, isAdvancedVisEnabled)
} else if isAdvancedVisEnabled {
params.MessagingClient = messaging.NewKafkaClient(&s.cfg.Kafka, params.MetricsClient, zap.NewNop(), params.Logger, params.MetricScope, false, isAdvancedVisEnabled)
} else {
params.MessagingClient = nil
}
if isAdvancedVisEnabled {
// verify config of advanced visibility store
advancedVisStoreKey := s.cfg.Persistence.AdvancedVisibilityStore
advancedVisStore, ok := s.cfg.Persistence.DataStores[advancedVisStoreKey]
if !ok {
log.Fatalf("not able to find advanced visibility store in config: %v", advancedVisStoreKey)
}
params.ESConfig = advancedVisStore.ElasticSearch
esClient, err := elasticsearch.NewClient(params.ESConfig)
if err != nil {
log.Fatalf("error creating elastic search client: %v", err)
}
params.ESClient = esClient
// verify index name
indexName, ok := params.ESConfig.Indices[common.VisibilityAppName]
if !ok || len(indexName) == 0 {
log.Fatalf("elastic search config missing visibility index")
}
}
params.ArchivalMetadata = archiver.NewArchivalMetadata(
dc,
s.cfg.Archival.History.State,
s.cfg.Archival.History.EnableRead,
s.cfg.Archival.Visibility.State,
s.cfg.Archival.Visibility.EnableRead,
&s.cfg.NamespaceDefaults.Archival,
)
params.ArchiverProvider = provider.NewArchiverProvider(s.cfg.Archival.History.Provider, s.cfg.Archival.Visibility.Provider)
params.PersistenceConfig.TransactionSizeLimit = dc.GetIntProperty(dynamicconfig.TransactionSizeLimit, common.DefaultTransactionSizeLimit)
params.Authorizer = authorization.NewNopAuthorizer()
params.Logger.Info("Starting service " + s.name)
var daemon common.Daemon
switch s.name {
case primitives.FrontendService:
daemon, err = frontend.NewService(¶ms)
case primitives.HistoryService:
daemon, err = history.NewService(¶ms)
case primitives.MatchingService:
daemon, err = matching.NewService(¶ms)
case primitives.WorkerService:
daemon, err = worker.NewService(¶ms)
}
if err != nil {
params.Logger.Fatal("Fail to start "+s.name+" service ", tag.Error(err))
}
go execute(daemon, s.doneC)
return daemon
}
func immutableClusterMetadataInitialization(
logger l.Logger,
dc *dynamicconfig.Collection,
persistenceConfig *config.Persistence,
abstractDatastoreFactory *persistenceClient.AbstractDataStoreFactory,
metricsClient *metrics.Client,
clusterMetadata *config.ClusterMetadata) {
logger = logger.WithTags(tag.ComponentMetadataInitializer)
factory := persistenceClient.NewFactory(
persistenceConfig,
dc.GetIntProperty(dynamicconfig.HistoryPersistenceMaxQPS, 3000),
*abstractDatastoreFactory,
clusterMetadata.CurrentClusterName,
*metricsClient,
logger,
)
clusterMetadataManager, err := factory.NewClusterMetadataManager()
if err != nil {
log.Fatalf("Error initializing cluster metadata manager: %v", err)
}
defer clusterMetadataManager.Close()
resp, err := clusterMetadataManager.InitializeImmutableClusterMetadata(
&persistence.InitializeImmutableClusterMetadataRequest{
ImmutableClusterMetadata: persistenceblobs.ImmutableClusterMetadata{
HistoryShardCount: int32(persistenceConfig.NumHistoryShards),
ClusterName: clusterMetadata.CurrentClusterName,
}})
if err != nil {
log.Fatalf("Error while fetching or persisting immutable cluster metadata: %v", err)
}
if resp.RequestApplied {
logger.Info("Successfully applied immutable cluster metadata.")
} else {
if clusterMetadata.CurrentClusterName != resp.PersistedImmutableData.ClusterName {
logImmutableMismatch(logger,
"ClusterMetadata.CurrentClusterName",
clusterMetadata.CurrentClusterName,
resp.PersistedImmutableData.ClusterName)
clusterMetadata.CurrentClusterName = resp.PersistedImmutableData.ClusterName
}
var persistedShardCount = int(resp.PersistedImmutableData.HistoryShardCount)
if persistenceConfig.NumHistoryShards != persistedShardCount {
logImmutableMismatch(logger,
"Persistence.NumHistoryShards",
persistenceConfig.NumHistoryShards,
persistedShardCount)
persistenceConfig.NumHistoryShards = persistedShardCount
}
}
metadataManager, err := factory.NewMetadataManager()
if err != nil {
log.Fatalf("Error initializing metadata manager: %v", err)
}
defer metadataManager.Close()
if err := metadataManager.InitializeSystemNamespaces(clusterMetadata.CurrentClusterName); err != nil {
log.Fatalf("failed to register system namespace: %v", err)
}
}
func logImmutableMismatch(l l.Logger, key string, ignored interface{}, value interface{}) {
l.Error(
"Supplied configuration key/value mismatches persisted ImmutableClusterMetadata."+
"Continuing with the persisted value as this value cannot be changed once initialized.",
tag.Key(key),
tag.IgnoredValue(ignored),
tag.Value(value))
}
// execute runs the daemon in a separate go routine
func execute(d common.Daemon, doneC chan struct{}) {
d.Start()
close(doneC)
}
| 1 | 9,986 | Why were we disabling health checks before? | temporalio-temporal | go |
@@ -22,7 +22,7 @@ def check_src_files_have_test():
def check_test_files_have_src():
unknown_test_files = []
- excluded = ['test/mitmproxy/data/', 'test/mitmproxy/net/data/', '/tservers.py']
+ excluded = ['test/mitmproxy/data/', 'test/mitmproxy/net/data/', '/tservers.py', '/conftest.py']
test_files = glob.glob('test/mitmproxy/**/*.py', recursive=True) + glob.glob('test/pathod/**/*.py', recursive=True)
test_files = [f for f in test_files if os.path.basename(f) != '__init__.py']
test_files = [f for f in test_files if not any(os.path.normpath(p) in f for p in excluded)] | 1 | import os
import re
import glob
import sys
def check_src_files_have_test():
missing_test_files = []
excluded = ['mitmproxy/contrib/', 'mitmproxy/test/', 'mitmproxy/tools/', 'mitmproxy/platform/']
src_files = glob.glob('mitmproxy/**/*.py', recursive=True) + glob.glob('pathod/**/*.py', recursive=True)
src_files = [f for f in src_files if os.path.basename(f) != '__init__.py']
src_files = [f for f in src_files if not any(os.path.normpath(p) in f for p in excluded)]
for f in src_files:
p = os.path.join("test", os.path.dirname(f), "test_" + os.path.basename(f))
if not os.path.isfile(p):
missing_test_files.append((f, p))
return missing_test_files
def check_test_files_have_src():
unknown_test_files = []
excluded = ['test/mitmproxy/data/', 'test/mitmproxy/net/data/', '/tservers.py']
test_files = glob.glob('test/mitmproxy/**/*.py', recursive=True) + glob.glob('test/pathod/**/*.py', recursive=True)
test_files = [f for f in test_files if os.path.basename(f) != '__init__.py']
test_files = [f for f in test_files if not any(os.path.normpath(p) in f for p in excluded)]
for f in test_files:
p = os.path.join(re.sub('^test/', '', os.path.dirname(f)), re.sub('^test_', '', os.path.basename(f)))
if not os.path.isfile(p):
unknown_test_files.append((f, p))
return unknown_test_files
def main():
exitcode = 0
missing_test_files = check_src_files_have_test()
if missing_test_files:
exitcode += 1
for f, p in sorted(missing_test_files):
print("{} MUST have a matching test file: {}".format(f, p))
unknown_test_files = check_test_files_have_src()
if unknown_test_files:
# TODO: enable this in the future
# exitcode += 1
for f, p in sorted(unknown_test_files):
print("{} DOES NOT MATCH a source file! Expected to find: {}".format(f, p))
sys.exit(exitcode)
if __name__ == '__main__':
main()
| 1 | 13,481 | did `conftest.py` actually show up for you? `test_files` should never contain it... | mitmproxy-mitmproxy | py |
@@ -561,7 +561,8 @@ StmtDDLDropSchema::StmtDDLDropSchema(//const SchemaName & schemaName,
// If the schema name specified is reserved name, users cannot drop them.
// They can only be dropped internally.
if ((! Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) &&
- (ComIsTrafodionReservedSchemaName(schemaQualName_.getSchemaName())))
+ (ComIsTrafodionReservedSchemaName(schemaQualName_.getSchemaName())) &&
+ (!ComIsTrafodionExternalSchemaName(schemaQualName_.getSchemaName())))
{
// error.
*SqlParser_Diags << DgSqlCode(-1430) | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: StmtDDLDrop.C
* Description: definitions of methods associating with DDL Drop
* statements
*
*
* Created: 11/11/95
* Language: C++
*
*
*
*
*****************************************************************************
*/
#include "AllStmtDDLDrop.h"
#ifndef SQLPARSERGLOBALS_CONTEXT_AND_DIAGS
#define SQLPARSERGLOBALS_CONTEXT_AND_DIAGS
#endif
#include "SqlParserGlobals.h"
// -----------------------------------------------------------------------
// methods for class StmtDDLDropCatalog
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropCatalog::StmtDDLDropCatalog(const NAString & catalogName, ComDropBehavior dropBehavior)
: StmtDDLNode(DDL_DROP_CATALOG),
catalogName_(catalogName, PARSERHEAP()),
dropBehavior_(dropBehavior)
{
}
//
// virtual destructor
//
StmtDDLDropCatalog::~StmtDDLDropCatalog()
{
}
//
// cast
//
StmtDDLDropCatalog *
StmtDDLDropCatalog::castToStmtDDLDropCatalog()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropCatalog::displayLabel1() const
{
return NAString("Catalog name: ") + getCatalogName();
}
const NAString
StmtDDLDropCatalog::getText() const
{
return "StmtDDLDropCatalog";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropComponentPrivilege
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropComponentPrivilege::StmtDDLDropComponentPrivilege(
const NAString & aComponentPrivilegeName,
const NAString & aComponentName,
ComDropBehavior dropBehavior,
CollHeap * heap) // default is PARSERHEAP()
: StmtDDLNode(DDL_DROP_COMPONENT_PRIVILEGE),
componentPrivilegeName_(aComponentPrivilegeName, heap),
dropBehavior_(dropBehavior),
componentName_(aComponentName, heap)
{
}
//
// virtual destructor
//
StmtDDLDropComponentPrivilege::~StmtDDLDropComponentPrivilege()
{
}
//
// virtual safe cast-down function
//
StmtDDLDropComponentPrivilege *
StmtDDLDropComponentPrivilege::castToStmtDDLDropComponentPrivilege()
{
return this;
}
//
// methods for tracing
//
// LCOV_EXCL_START
const NAString
StmtDDLDropComponentPrivilege::displayLabel1() const
{
NAString aLabel("Component privilege name: ");
aLabel += getComponentPrivilegeName();
return aLabel;
}
const NAString
StmtDDLDropComponentPrivilege::displayLabel2() const
{
NAString aLabel("Component name: ");
aLabel += getComponentName();
aLabel += " Drop behavior: ";
if (dropBehavior_ == COM_CASCADE_DROP_BEHAVIOR)
aLabel += "CASCADE";
else
aLabel += "RESTRICT";
return aLabel;
}
const NAString
StmtDDLDropComponentPrivilege::getText() const
{
return "StmtDDLDropComponentPrivilege";
}
// LCOV_EXCL_STOP
//----------------------------------------------------------------------------
// MV - RG
// methods for class StmtDDLDropMvRGroup - refresh groups
// ---------------------------------------------------------------------------
// initialize constructor
StmtDDLDropMvRGroup::StmtDDLDropMvRGroup(const QualifiedName & mvGroupName)
:StmtDDLNode(DDL_DROP_MV_REFRESH_GROUP),
mvRGroupQualName_(mvGroupName, PARSERHEAP())
{
// XXXXXXXXXMVSXXXXXXXXXXXXXXX
}
StmtDDLDropMvRGroup::~StmtDDLDropMvRGroup()
{
}
StmtDDLDropMvRGroup *
StmtDDLDropMvRGroup::castToStmtDDLDropMvRGroup()
{
return this;
}
const NAString
StmtDDLDropMvRGroup::displayLabel1() const
{
return NAString("MV name: ") + getMvRGroupName();
}
const NAString
StmtDDLDropMvRGroup::getText() const
{
return "StmtDDLDropMvRGroup";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropTrigger
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropTrigger::StmtDDLDropTrigger(const QualifiedName & triggerQualName,
NABoolean cleanupSpec,
NABoolean validateSpec,
NAString * pLogFile)
: StmtDDLNode(DDL_DROP_TRIGGER),
triggerQualName_(triggerQualName, PARSERHEAP()),
isCleanupSpec_(cleanupSpec),
isValidateSpec_(validateSpec),
pLogFile_(pLogFile)
{}
//
// virtual destructor
//
StmtDDLDropTrigger::~StmtDDLDropTrigger()
{
if (pLogFile_)
delete pLogFile_;
}
//
// cast
//
StmtDDLDropTrigger *
StmtDDLDropTrigger::castToStmtDDLDropTrigger()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropTrigger::displayLabel1() const
{
return NAString("Trigger name: ") + getTriggerName();
}
const NAString
StmtDDLDropTrigger::getText() const
{
return "StmtDDLDropTrigger";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropIndex
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropIndex::StmtDDLDropIndex(const QualifiedName & indexName,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NABoolean validateSpec,
NAString * pLogFile)
: StmtDDLNode(DDL_DROP_INDEX),
origIndexQualName_(PARSERHEAP()),
indexQualName_(indexName, PARSERHEAP()),
dropBehavior_(dropBehavior),
isCleanupSpec_(cleanupSpec),
isValidateSpec_(validateSpec),
pLogFile_(pLogFile)
{}
//
// virtual destructor
//
StmtDDLDropIndex::~StmtDDLDropIndex()
{
if (pLogFile_)
delete pLogFile_;
}
void
StmtDDLDropIndex::synthesize()
{
}
//
// cast
//
StmtDDLDropIndex *
StmtDDLDropIndex::castToStmtDDLDropIndex()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropIndex::displayLabel1() const
{
return NAString("Index name: ") + getIndexName();
}
const NAString
StmtDDLDropIndex::displayLabel2() const
{
NAString label2("Drop behavior: ");
switch (getDropBehavior())
{
case COM_CASCADE_DROP_BEHAVIOR :
return label2 + "Cascade";
case COM_RESTRICT_DROP_BEHAVIOR :
return label2 + "Restrict";
default :
NAAbort("StmtDDLDrop.C", __LINE__, "internal logic error");
return NAString();
}
}
const NAString
StmtDDLDropIndex::getText() const
{
return "StmtDDLDropIndex";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropLibrary
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropLibrary::StmtDDLDropLibrary(
const QualifiedName & libraryName,
ComDropBehavior dropBehavior)
: StmtDDLNode(DDL_DROP_LIBRARY),
libraryName_(libraryName, PARSERHEAP()),
dropBehavior_(dropBehavior)
{
}
//
// virtual destructor
//
StmtDDLDropLibrary::~StmtDDLDropLibrary()
{}
//
// safe cast
//
StmtDDLDropLibrary *
StmtDDLDropLibrary::castToStmtDDLDropLibrary()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropLibrary::displayLabel1() const
{
return NAString("Library name: ") + getLibraryName();
}
const NAString
StmtDDLDropLibrary::getText() const
{
return "StmtDDLDropLibrary";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropModule
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropModule::StmtDDLDropModule(const QualifiedName & modulename)
: StmtDDLNode(DDL_DROP_MODULE),
moduleName_(PARSERHEAP()),
moduleQualName_(modulename, PARSERHEAP())
{
moduleName_ = moduleQualName_.getQualifiedNameAsAnsiString();
}
//
// virtual destructor
//
StmtDDLDropModule::~StmtDDLDropModule()
{}
//
// safe cast
//
StmtDDLDropModule *
StmtDDLDropModule::castToStmtDDLDropModule()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropModule::displayLabel1() const
{
return NAString("Module name: ") + getModuleName();
}
const NAString
StmtDDLDropModule::getText() const
{
return "StmtDDLDropModule";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropRoutine
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropRoutine::StmtDDLDropRoutine(ComRoutineType routineType,
const QualifiedName & routineName,
const QualifiedName & routineActionName,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NABoolean validateSpec,
NAString * pLogFile,
CollHeap * heap)
: StmtDDLNode(DDL_DROP_ROUTINE),
routineType_(routineType),
routineQualName_(routineName, heap),
routineActionQualName_(routineActionName, heap),
dropBehavior_(dropBehavior),
isCleanupSpec_(cleanupSpec),
isValidateSpec_(validateSpec),
pLogFile_(pLogFile)
{
}
//
// virtual destructor
//
StmtDDLDropRoutine::~StmtDDLDropRoutine()
{
if (pLogFile_)
delete pLogFile_;
}
//
// cast
//
StmtDDLDropRoutine *
StmtDDLDropRoutine::castToStmtDDLDropRoutine()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropRoutine::displayLabel1() const
{
return NAString("Routine name: ") + getRoutineName();
}
const NAString
StmtDDLDropRoutine::displayLabel2() const
{
NAString label2("Drop behavior: ");
switch (getDropBehavior())
{
case COM_CASCADE_DROP_BEHAVIOR :
return label2 + "Cascade";
case COM_RESTRICT_DROP_BEHAVIOR :
return label2 + "Restrict";
default :
NAAbort("StmtDDLDrop.C", __LINE__, "internal logic error");
return NAString();
}
}
const NAString
StmtDDLDropRoutine::getText() const
{
return "StmtDDLDropRoutine";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropSchema
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropSchema::StmtDDLDropSchema(//const SchemaName & schemaName,
const ElemDDLSchemaName & aSchemaNameParseNode,
ComDropBehavior dropBehavior,
ComBoolean cleanupMode,
ComBoolean dropObjectsOnly)
: StmtDDLNode(DDL_DROP_SCHEMA),
schemaQualName_(aSchemaNameParseNode.getSchemaName(), PARSERHEAP()),
dropBehavior_(dropBehavior),
cleanupMode_(cleanupMode),
dropObjectsOnly_(dropObjectsOnly),
dropIfExists_(FALSE),
schemaName_(PARSERHEAP())
{
if (schemaQualName_.getCatalogName().isNull())
{
schemaName_ = ToAnsiIdentifier(schemaQualName_.getSchemaName());
}
else
{
schemaName_ = ToAnsiIdentifier(schemaQualName_.getCatalogName()) + "." +
ToAnsiIdentifier(schemaQualName_.getSchemaName());
}
// If the schema name specified is reserved name, users cannot drop them.
// They can only be dropped internally.
if ((! Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) &&
(ComIsTrafodionReservedSchemaName(schemaQualName_.getSchemaName())))
{
// error.
*SqlParser_Diags << DgSqlCode(-1430)
<< DgSchemaName(schemaName_);
}
}
//
// virtual destructor
//
StmtDDLDropSchema::~StmtDDLDropSchema()
{
}
//
// cast
//
StmtDDLDropSchema *
StmtDDLDropSchema::castToStmtDDLDropSchema()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropSchema::displayLabel1() const
{
return NAString("Schema name: ") + getSchemaName();
}
const NAString
StmtDDLDropSchema::displayLabel2() const
{
NAString label2("Drop behavior: ");
switch (getDropBehavior())
{
case COM_CASCADE_DROP_BEHAVIOR :
return label2 + "Cascade";
case COM_RESTRICT_DROP_BEHAVIOR :
return label2 + "Restrict";
default :
NAAbort("StmtDDLDrop.C", __LINE__, "internal logic error");
return NAString();
}
}
const NAString
StmtDDLDropSchema::getText() const
{
return "StmtDDLDropSchema";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropSequence
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropSequence::StmtDDLDropSequence(const QualifiedName & seqQualName,
ElemDDLNode * pSequenceOptionList,
CollHeap * heap)
: StmtDDLNode(DDL_DROP_SEQUENCE),
seqQualName_(seqQualName, heap)
{
}
StmtDDLDropSequence::~StmtDDLDropSequence()
{
}
//
// cast virtual function
//
StmtDDLDropSequence *
StmtDDLDropSequence::castToStmtDDLDropSequence()
{
return this;
}
//
// accessors
//
Int32
StmtDDLDropSequence::getArity() const
{
return 0;
}
ExprNode *
StmtDDLDropSequence::getChild(Lng32 index)
{
return NULL;
}
//
// methods for tracing
//
const NAString
StmtDDLDropSequence::displayLabel1() const
{
return NAString("Sequence name: ") + seqQualName_.getQualifiedNameAsAnsiString();
}
NATraceList
StmtDDLDropSequence::getDetailInfo() const
{
NAString detailText;
NATraceList detailTextList;
//
// table name
//
detailTextList.append(displayLabel1());
return detailTextList;
}
const NAString
StmtDDLDropSequence::getText() const
{
return "StmtDDLDropSequence";
}
// method for collecting information
void StmtDDLDropSequence::synthesize()
{
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropSQL
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropSQL::StmtDDLDropSQL(ComDropBehavior dropBehavior)
: StmtDDLNode(DDL_DROP_SQL),
dropBehavior_(dropBehavior)
{}
//
// virtual destructor
//
StmtDDLDropSQL::~StmtDDLDropSQL()
{}
StmtDDLDropSQL *
StmtDDLDropSQL::castToStmtDDLDropSQL()
{
return this;
}
//
// for tracing
//
const NAString
StmtDDLDropSQL::getText() const
{
return "StmtDDLDropSQL";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropTable
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropTable::StmtDDLDropTable(const QualifiedName & tableQualName,
ComDropBehavior dropBehavior)
: StmtDDLNode(DDL_DROP_TABLE),
origTableQualName_(PARSERHEAP()),
tableQualName_(tableQualName, PARSERHEAP()),
dropBehavior_(dropBehavior),
tableType_(ExtendedQualName::NORMAL_TABLE), //++ MV
isSpecialTypeSpecified_(FALSE), //++ MV
isCleanupSpec_(FALSE),
isValidateSpec_(FALSE),
pLogFile_(NULL),
dropIfExists_(FALSE)
{
}
//
// constructor for CLEANUP
//
StmtDDLDropTable::StmtDDLDropTable(const QualifiedName & tableQualName,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NABoolean validateSpec,
NAString *pLogFile)
: StmtDDLNode(DDL_DROP_TABLE),
origTableQualName_(PARSERHEAP()),
tableQualName_(tableQualName, PARSERHEAP()),
dropBehavior_(dropBehavior),
tableType_(ExtendedQualName::NORMAL_TABLE), //++ MV
isSpecialTypeSpecified_(FALSE), //++ MV
isCleanupSpec_(cleanupSpec),
isValidateSpec_(validateSpec),
pLogFile_(pLogFile),
dropIfExists_(FALSE)
{
}
//
// virtual destructor
//
StmtDDLDropTable::~StmtDDLDropTable()
{
if (pLogFile_)
delete pLogFile_;
}
void
StmtDDLDropTable::synthesize()
{
}
//
// cast
//
StmtDDLDropTable *
StmtDDLDropTable::castToStmtDDLDropTable()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropTable::displayLabel1() const
{
return NAString("Table name: ") + getTableName();
}
const NAString
StmtDDLDropTable::displayLabel2() const
{
NAString label2("Drop behavior: ");
switch (getDropBehavior())
{
case COM_CASCADE_DROP_BEHAVIOR :
return label2 + "Cascade";
case COM_RESTRICT_DROP_BEHAVIOR :
return label2 + "Restrict";
default :
NAAbort("StmtDDLDrop.C", __LINE__, "internal logic error");
return NAString();
}
}
const NAString
StmtDDLDropTable::getText() const
{
return "StmtDDLDropTable";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropHbaseTable
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropHbaseTable::StmtDDLDropHbaseTable(const QualifiedName & tableQualName)
: StmtDDLNode(DDL_DROP_HBASE_TABLE),
origTableQualName_(PARSERHEAP()),
tableQualName_(tableQualName, PARSERHEAP())
{
}
//
// virtual destructor
//
StmtDDLDropHbaseTable::~StmtDDLDropHbaseTable()
{
}
//
// cast
//
StmtDDLDropHbaseTable *
StmtDDLDropHbaseTable::castToStmtDDLDropHbaseTable()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropHbaseTable::displayLabel1() const
{
return NAString("Table name: ") + getTableName();
}
const NAString
StmtDDLDropHbaseTable::displayLabel2() const
{
return NAString();
}
const NAString
StmtDDLDropHbaseTable::getText() const
{
return "StmtDDLDropHbaseTable";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropView
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropView::StmtDDLDropView(const QualifiedName & viewQualName,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NABoolean validateSpec,
NAString * pLogFile)
: StmtDDLNode(DDL_DROP_VIEW),
viewQualName_(viewQualName, PARSERHEAP()),
dropBehavior_(dropBehavior),
isCleanupSpec_(cleanupSpec),
isValidateSpec_(validateSpec),
pLogFile_(pLogFile)
{
}
//
// virtual destructor
//
StmtDDLDropView::~StmtDDLDropView()
{
if (pLogFile_)
delete pLogFile_;
}
//
// cast
//
StmtDDLDropView *
StmtDDLDropView::castToStmtDDLDropView()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropView::displayLabel1() const
{
return NAString("View name: ") + getViewName();
}
const NAString
StmtDDLDropView::displayLabel2() const
{
NAString label2("Drop behavior: ");
switch (getDropBehavior())
{
case COM_CASCADE_DROP_BEHAVIOR :
return label2 + "Cascade";
case COM_RESTRICT_DROP_BEHAVIOR :
return label2 + "Restrict";
default :
NAAbort("StmtDDLDrop.C", __LINE__, "internal logic error");
return NAString();
}
}
const NAString
StmtDDLDropView::getText() const
{
return "StmtDDLDropView";
}
// -----------------------------------------------------------------------
// methods for class StmtDDLDropMV
// -----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropMV::StmtDDLDropMV(const QualifiedName & MVQualName,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NABoolean validateSpec,
NAString * pLogFile)
: StmtDDLNode(DDL_DROP_MV),
MVQualName_(MVQualName, PARSERHEAP()),
dropBehavior_(dropBehavior),
isCleanupSpec_(cleanupSpec),
isValidateSpec_(validateSpec),
pLogFile_(pLogFile)
{
}
//
// virtual destructor
//
StmtDDLDropMV::~StmtDDLDropMV()
{
if (pLogFile_)
delete pLogFile_;
}
//
// cast
//
StmtDDLDropMV *
StmtDDLDropMV::castToStmtDDLDropMV()
{
return this;
}
//
// methods for tracing
//
const NAString
StmtDDLDropMV::displayLabel1() const
{
return NAString("Materialized View name: ") + getMVName();
}
const NAString
StmtDDLDropMV::displayLabel2() const
{
NAString label2("Drop behavior: ");
switch (getDropBehavior())
{
case COM_CASCADE_DROP_BEHAVIOR :
return label2 + "Cascade";
case COM_RESTRICT_DROP_BEHAVIOR :
return label2 + "Restrict";
default :
NAAbort("StmtDDLDrop.C", __LINE__, "internal logic error");
return NAString();
}
}
const NAString
StmtDDLDropMV::getText() const
{
return "StmtDDLDropMV";
}
//-----------------------------------------------------------------------
// methods for class StmtDDLDropSynonym
//-----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropSynonym::StmtDDLDropSynonym(const QualifiedName & synonymName)
: StmtDDLNode (DDL_DROP_SYNONYM),
synonymName_(synonymName, PARSERHEAP())
{
}
//
// Virtual destructor
//
StmtDDLDropSynonym::~StmtDDLDropSynonym()
{}
//
// cast
//
StmtDDLDropSynonym *
StmtDDLDropSynonym::castToStmtDDLDropSynonym()
{
return this;
}
//
// for tracing
//
const NAString
StmtDDLDropSynonym::displayLabel1() const
{
return NAString ("Synonym name: ") + getSynonymName();
}
const NAString
StmtDDLDropSynonym::getText() const
{
return "StmtDropSynonym";
}
//-----------------------------------------------------------------------
// methods for class StmtDDLDropExceptionTable
//-----------------------------------------------------------------------
//
// constructor
//
StmtDDLDropExceptionTable::StmtDDLDropExceptionTable(const QualifiedName & exceptionName,
const QualifiedName & objectReference,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NAString * pLogFile)
: StmtDDLNode (DDL_DROP_EXCEPTION_TABLE),
exceptionName_(exceptionName, PARSERHEAP()),
objectReference_(objectReference, PARSERHEAP()),
dropBehavior_(dropBehavior),
isCleanupSpec_(cleanupSpec),
dropType_(COM_DROP_SINGLE),
pLogFile_(pLogFile)
{
}
StmtDDLDropExceptionTable::StmtDDLDropExceptionTable(const QualifiedName & objectReference,
ComDropBehavior dropBehavior,
NABoolean cleanupSpec,
NAString * pLogFile)
: StmtDDLNode (DDL_DROP_EXCEPTION_TABLE),
exceptionName_(NULL),
objectReference_(objectReference, PARSERHEAP()),
dropBehavior_(dropBehavior),
isCleanupSpec_(cleanupSpec),
dropType_(COM_DROP_ALL),
pLogFile_(pLogFile)
{
}
//
// Virtual destructor
//
StmtDDLDropExceptionTable::~StmtDDLDropExceptionTable()
{}
//
// cast
//
StmtDDLDropExceptionTable *
StmtDDLDropExceptionTable::castToStmtDDLDropExceptionTable()
{
return this;
}
//
// for tracing
//
const NAString
StmtDDLDropExceptionTable::displayLabel1() const
{
return NAString ("Exception table name: ") + getExceptionName();
}
const NAString
StmtDDLDropExceptionTable::displayLabel2() const
{
return NAString ("Table name: ") + getObjectReference();
}
const NAString
StmtDDLDropExceptionTable::getText() const
{
return "StmtDropExceptionTable";
}
//
// End of File
//
| 1 | 7,449 | Should this be a ! on line 565? I would have expected the condition to be similar to line 564. If I am wrong please excuse my mistake. | apache-trafodion | cpp |
@@ -288,6 +288,10 @@ class DBUpgrader {
db.execSQL("DROP TABLE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES);
}
+ if (oldVersion < 1070296) {
+ db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ + " ADD COLUMN " + PodDBAdapter.KEY_FEED_VOLUME_REDUCTION + " INTEGER DEFAULT 0");
+ }
}
} | 1 | package de.danoeh.antennapod.core.storage;
import android.content.ContentValues;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.media.MediaMetadataRetriever;
import android.util.Log;
import de.danoeh.antennapod.core.feed.FeedItem;
class DBUpgrader {
/**
* Upgrades the given database to a new schema version
*/
static void upgrade(final SQLiteDatabase db, final int oldVersion, final int newVersion) {
if (oldVersion <= 1) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_TYPE + " TEXT");
}
if (oldVersion <= 2) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_LINK + " TEXT");
}
if (oldVersion <= 3) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_ITEM_IDENTIFIER + " TEXT");
}
if (oldVersion <= 4) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_FEED_IDENTIFIER + " TEXT");
}
if (oldVersion <= 5) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_REASON_DETAILED + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_DOWNLOADSTATUS_TITLE + " TEXT");
}
if (oldVersion <= 6) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_CHAPTER_TYPE + " INTEGER");
}
if (oldVersion <= 7) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYBACK_COMPLETION_DATE
+ " INTEGER");
}
if (oldVersion <= 8) {
final int KEY_ID_POSITION = 0;
final int KEY_MEDIA_POSITION = 1;
// Add feeditem column to feedmedia table
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_FEEDITEM
+ " INTEGER");
Cursor feeditemCursor = db.query(PodDBAdapter.TABLE_NAME_FEED_ITEMS,
new String[]{PodDBAdapter.KEY_ID, PodDBAdapter.KEY_MEDIA}, "? > 0",
new String[]{PodDBAdapter.KEY_MEDIA}, null, null, null);
if (feeditemCursor.moveToFirst()) {
db.beginTransaction();
ContentValues contentValues = new ContentValues();
do {
long mediaId = feeditemCursor.getLong(KEY_MEDIA_POSITION);
contentValues.put(PodDBAdapter.KEY_FEEDITEM, feeditemCursor.getLong(KEY_ID_POSITION));
db.update(PodDBAdapter.TABLE_NAME_FEED_MEDIA, contentValues, PodDBAdapter.KEY_ID + "=?", new String[]{String.valueOf(mediaId)});
contentValues.clear();
} while (feeditemCursor.moveToNext());
db.setTransactionSuccessful();
db.endTransaction();
}
feeditemCursor.close();
}
if (oldVersion <= 9) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " INTEGER DEFAULT 1");
}
if (oldVersion <= 10) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN flattr_status"
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN flattr_status"
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYED_DURATION
+ " INTEGER");
}
if (oldVersion <= 11) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_USERNAME
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_PASSWORD
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE
+ " INTEGER");
}
if (oldVersion <= 12) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IS_PAGED + " INTEGER DEFAULT 0");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_NEXT_PAGE_LINK + " TEXT");
}
if (oldVersion <= 13) {
// remove duplicate rows in "Chapters" table that were created because of a bug.
db.execSQL(String.format("DELETE FROM %s WHERE %s NOT IN " +
"(SELECT MIN(%s) as %s FROM %s GROUP BY %s,%s,%s,%s,%s)",
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_TITLE,
PodDBAdapter.KEY_START,
PodDBAdapter.KEY_FEEDITEM,
PodDBAdapter.KEY_LINK,
PodDBAdapter.KEY_CHAPTER_TYPE));
}
if (oldVersion <= 14) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " INTEGER");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " = "
+ "(SELECT " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " FROM " + PodDBAdapter.TABLE_NAME_FEEDS
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEEDS + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_FEED + ")");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_HIDE + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_LAST_UPDATE_FAILED + " INTEGER DEFAULT 0");
// create indexes
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_FEED);
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDMEDIA_FEEDITEM);
db.execSQL(PodDBAdapter.CREATE_INDEX_QUEUE_FEEDITEM);
db.execSQL(PodDBAdapter.CREATE_INDEX_SIMPLECHAPTERS_FEEDITEM);
}
if (oldVersion <= 15) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + " INTEGER DEFAULT -1");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=0"
+ " WHERE " + PodDBAdapter.KEY_DOWNLOADED + "=0");
Cursor c = db.rawQuery("SELECT " + PodDBAdapter.KEY_FILE_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " WHERE " + PodDBAdapter.KEY_DOWNLOADED + "=1 "
+ " AND " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=-1", null);
if (c.moveToFirst()) {
MediaMetadataRetriever mmr = new MediaMetadataRetriever();
do {
String fileUrl = c.getString(0);
try {
mmr.setDataSource(fileUrl);
byte[] image = mmr.getEmbeddedPicture();
if (image != null) {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=1"
+ " WHERE " + PodDBAdapter.KEY_FILE_URL + "='" + fileUrl + "'");
} else {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=0"
+ " WHERE " + PodDBAdapter.KEY_FILE_URL + "='" + fileUrl + "'");
}
} catch (Exception e) {
e.printStackTrace();
}
} while (c.moveToNext());
}
c.close();
}
if (oldVersion <= 16) {
String selectNew = "SELECT " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " INNER JOIN " + PodDBAdapter.TABLE_NAME_FEED_MEDIA + " ON "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID + "="
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_FEEDITEM
+ " LEFT OUTER JOIN " + PodDBAdapter.TABLE_NAME_QUEUE + " ON "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID + "="
+ PodDBAdapter.TABLE_NAME_QUEUE + "." + PodDBAdapter.KEY_FEEDITEM
+ " WHERE "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_READ + " = 0 AND " // unplayed
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_DOWNLOADED + " = 0 AND " // undownloaded
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_POSITION + " = 0 AND " // not partially played
+ PodDBAdapter.TABLE_NAME_QUEUE + "." + PodDBAdapter.KEY_ID + " IS NULL"; // not in queue
String sql = "UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_READ + "=" + FeedItem.NEW
+ " WHERE " + PodDBAdapter.KEY_ID + " IN (" + selectNew + ")";
Log.d("Migration", "SQL: " + sql);
db.execSQL(sql);
}
if (oldVersion <= 17) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DELETE_ACTION + " INTEGER DEFAULT 0");
}
if (oldVersion < 1030005) {
db.execSQL("UPDATE FeedItems SET auto_download=0 WHERE " +
"(read=1 OR id IN (SELECT feeditem FROM FeedMedia WHERE position>0 OR downloaded=1)) " +
"AND id NOT IN (SELECT feeditem FROM Queue)");
}
if (oldVersion < 1040001) {
db.execSQL(PodDBAdapter.CREATE_TABLE_FAVORITES);
}
if (oldVersion < 1040002) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_LAST_PLAYED_TIME + " INTEGER DEFAULT 0");
}
if (oldVersion < 1040013) {
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_PUBDATE);
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_READ);
}
if (oldVersion < 1050003) {
// Migrates feed list filter data
db.beginTransaction();
// Change to intermediate values to avoid overwriting in the following find/replace
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'unplayed', 'noplay')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'not_queued', 'noqueue')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'not_downloaded', 'nodl')");
// Replace played, queued, and downloaded with their opposites
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'played', 'unplayed')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'queued', 'not_queued')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'downloaded', 'not_downloaded')");
// Now replace intermediates for unplayed, not queued, etc. with their opposites
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'noplay', 'played')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'noqueue', 'queued')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'nodl', 'downloaded')");
// Paused doesn't have an opposite, so unplayed is the next best option
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'paused', 'unplayed')");
db.setTransactionSuccessful();
db.endTransaction();
// and now get ready for autodownload filters
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_INCLUDE_FILTER + " TEXT DEFAULT ''");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_EXCLUDE_FILTER + " TEXT DEFAULT ''");
// and now auto refresh
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_KEEP_UPDATED + " INTEGER DEFAULT 1");
}
if (oldVersion < 1050004) {
// prevent old timestamps to be misinterpreted as ETags
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " SET " + PodDBAdapter.KEY_LASTUPDATE + "=NULL");
}
if (oldVersion < 1060200) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_CUSTOM_TITLE + " TEXT");
}
if (oldVersion < 1060596) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + " SET " + PodDBAdapter.KEY_IMAGE_URL + " = ("
+ " SELECT " + PodDBAdapter.KEY_DOWNLOAD_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_IMAGES
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_IMAGE + ")");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + " SET " + PodDBAdapter.KEY_IMAGE_URL + " = ("
+ " SELECT " + PodDBAdapter.KEY_DOWNLOAD_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_IMAGES
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEEDS + "." + PodDBAdapter.KEY_IMAGE + ")");
db.execSQL("DROP TABLE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES);
}
}
}
| 1 | 14,867 | Please change to `1070400`. I promise to look into this PR in more detail before the 1.7.4 release ;) | AntennaPod-AntennaPod | java |
@@ -68,15 +68,15 @@ public class FlinkParquetWriters {
}
@Override
- public ParquetValueWriter<?> message(RowType sStruct, MessageType message, List<ParquetValueWriter<?>> fields) {
+ public ParquetValueWriter<?> message(LogicalType sStruct, MessageType message, List<ParquetValueWriter<?>> fields) {
return struct(sStruct, message.asGroupType(), fields);
}
@Override
- public ParquetValueWriter<?> struct(RowType sStruct, GroupType struct,
+ public ParquetValueWriter<?> struct(LogicalType fStruct, GroupType struct,
List<ParquetValueWriter<?>> fieldWriters) {
List<Type> fields = struct.getFields();
- List<RowField> flinkFields = sStruct.getFields();
+ List<RowField> flinkFields = ((RowType) fStruct).getFields();
List<ParquetValueWriter<?>> writers = Lists.newArrayListWithExpectedSize(fieldWriters.size());
List<LogicalType> flinkTypes = Lists.newArrayList();
for (int i = 0; i < fields.size(); i += 1) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink.data;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.flink.table.data.ArrayData;
import org.apache.flink.table.data.DecimalData;
import org.apache.flink.table.data.MapData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.logical.ArrayType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.MapType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.types.logical.RowType.RowField;
import org.apache.flink.table.types.logical.SmallIntType;
import org.apache.flink.table.types.logical.TinyIntType;
import org.apache.iceberg.parquet.ParquetValueReaders;
import org.apache.iceberg.parquet.ParquetValueWriter;
import org.apache.iceberg.parquet.ParquetValueWriters;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.util.DecimalUtil;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.LogicalTypeAnnotation.DecimalLogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
public class FlinkParquetWriters {
private FlinkParquetWriters() {
}
@SuppressWarnings("unchecked")
public static <T> ParquetValueWriter<T> buildWriter(LogicalType schema, MessageType type) {
return (ParquetValueWriter<T>) ParquetWithFlinkSchemaVisitor.visit(schema, type, new WriteBuilder(type));
}
private static class WriteBuilder extends ParquetWithFlinkSchemaVisitor<ParquetValueWriter<?>> {
private final MessageType type;
WriteBuilder(MessageType type) {
this.type = type;
}
@Override
public ParquetValueWriter<?> message(RowType sStruct, MessageType message, List<ParquetValueWriter<?>> fields) {
return struct(sStruct, message.asGroupType(), fields);
}
@Override
public ParquetValueWriter<?> struct(RowType sStruct, GroupType struct,
List<ParquetValueWriter<?>> fieldWriters) {
List<Type> fields = struct.getFields();
List<RowField> flinkFields = sStruct.getFields();
List<ParquetValueWriter<?>> writers = Lists.newArrayListWithExpectedSize(fieldWriters.size());
List<LogicalType> flinkTypes = Lists.newArrayList();
for (int i = 0; i < fields.size(); i += 1) {
writers.add(newOption(struct.getType(i), fieldWriters.get(i)));
flinkTypes.add(flinkFields.get(i).getType());
}
return new RowDataWriter(writers, flinkTypes);
}
@Override
public ParquetValueWriter<?> list(ArrayType sArray, GroupType array, ParquetValueWriter<?> elementWriter) {
GroupType repeated = array.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath);
int repeatedR = type.getMaxRepetitionLevel(repeatedPath);
return new ArrayDataWriter<>(repeatedD, repeatedR,
newOption(repeated.getType(0), elementWriter),
sArray.getElementType());
}
@Override
public ParquetValueWriter<?> map(MapType sMap, GroupType map,
ParquetValueWriter<?> keyWriter, ParquetValueWriter<?> valueWriter) {
GroupType repeatedKeyValue = map.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath);
int repeatedR = type.getMaxRepetitionLevel(repeatedPath);
return new MapDataWriter<>(repeatedD, repeatedR,
newOption(repeatedKeyValue.getType(0), keyWriter),
newOption(repeatedKeyValue.getType(1), valueWriter),
sMap.getKeyType(), sMap.getValueType());
}
private ParquetValueWriter<?> newOption(org.apache.parquet.schema.Type fieldType, ParquetValueWriter<?> writer) {
int maxD = type.getMaxDefinitionLevel(path(fieldType.getName()));
return ParquetValueWriters.option(fieldType, maxD, writer);
}
@Override
public ParquetValueWriter<?> primitive(LogicalType fType, PrimitiveType primitive) {
ColumnDescriptor desc = type.getColumnDescription(currentPath());
if (primitive.getOriginalType() != null) {
switch (primitive.getOriginalType()) {
case ENUM:
case JSON:
case UTF8:
return strings(desc);
case DATE:
case INT_8:
case INT_16:
case INT_32:
return ints(fType, desc);
case INT_64:
return ParquetValueWriters.longs(desc);
case TIME_MICROS:
return timeMicros(desc);
case TIMESTAMP_MICROS:
return timestamps(desc);
case DECIMAL:
DecimalLogicalTypeAnnotation decimal = (DecimalLogicalTypeAnnotation) primitive.getLogicalTypeAnnotation();
switch (primitive.getPrimitiveTypeName()) {
case INT32:
return decimalAsInteger(desc, decimal.getPrecision(), decimal.getScale());
case INT64:
return decimalAsLong(desc, decimal.getPrecision(), decimal.getScale());
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
return decimalAsFixed(desc, decimal.getPrecision(), decimal.getScale());
default:
throw new UnsupportedOperationException(
"Unsupported base type for decimal: " + primitive.getPrimitiveTypeName());
}
case BSON:
return byteArrays(desc);
default:
throw new UnsupportedOperationException(
"Unsupported logical type: " + primitive.getOriginalType());
}
}
switch (primitive.getPrimitiveTypeName()) {
case FIXED_LEN_BYTE_ARRAY:
case BINARY:
return byteArrays(desc);
case BOOLEAN:
return ParquetValueWriters.booleans(desc);
case INT32:
return ints(fType, desc);
case INT64:
return ParquetValueWriters.longs(desc);
case FLOAT:
return ParquetValueWriters.floats(desc);
case DOUBLE:
return ParquetValueWriters.doubles(desc);
default:
throw new UnsupportedOperationException("Unsupported type: " + primitive);
}
}
}
private static ParquetValueWriters.PrimitiveWriter<?> ints(LogicalType type, ColumnDescriptor desc) {
if (type instanceof TinyIntType) {
return ParquetValueWriters.tinyints(desc);
} else if (type instanceof SmallIntType) {
return ParquetValueWriters.shorts(desc);
}
return ParquetValueWriters.ints(desc);
}
private static ParquetValueWriters.PrimitiveWriter<StringData> strings(ColumnDescriptor desc) {
return new StringDataWriter(desc);
}
private static ParquetValueWriters.PrimitiveWriter<Integer> timeMicros(ColumnDescriptor desc) {
return new TimeMicrosWriter(desc);
}
private static ParquetValueWriters.PrimitiveWriter<DecimalData> decimalAsInteger(ColumnDescriptor desc,
int precision, int scale) {
Preconditions.checkArgument(precision <= 9, "Cannot write decimal value as integer with precision larger than 9," +
" wrong precision %s", precision);
return new IntegerDecimalWriter(desc, precision, scale);
}
private static ParquetValueWriters.PrimitiveWriter<DecimalData> decimalAsLong(ColumnDescriptor desc,
int precision, int scale) {
Preconditions.checkArgument(precision <= 18, "Cannot write decimal value as long with precision larger than 18, " +
" wrong precision %s", precision);
return new LongDecimalWriter(desc, precision, scale);
}
private static ParquetValueWriters.PrimitiveWriter<DecimalData> decimalAsFixed(ColumnDescriptor desc,
int precision, int scale) {
return new FixedDecimalWriter(desc, precision, scale);
}
private static ParquetValueWriters.PrimitiveWriter<TimestampData> timestamps(ColumnDescriptor desc) {
return new TimestampDataWriter(desc);
}
private static ParquetValueWriters.PrimitiveWriter<byte[]> byteArrays(ColumnDescriptor desc) {
return new ByteArrayWriter(desc);
}
private static class StringDataWriter extends ParquetValueWriters.PrimitiveWriter<StringData> {
private StringDataWriter(ColumnDescriptor desc) {
super(desc);
}
@Override
public void write(int repetitionLevel, StringData value) {
column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(value.toBytes()));
}
}
private static class TimeMicrosWriter extends ParquetValueWriters.PrimitiveWriter<Integer> {
private TimeMicrosWriter(ColumnDescriptor desc) {
super(desc);
}
@Override
public void write(int repetitionLevel, Integer value) {
long micros = value.longValue() * 1000;
column.writeLong(repetitionLevel, micros);
}
}
private static class IntegerDecimalWriter extends ParquetValueWriters.PrimitiveWriter<DecimalData> {
private final int precision;
private final int scale;
private IntegerDecimalWriter(ColumnDescriptor desc, int precision, int scale) {
super(desc);
this.precision = precision;
this.scale = scale;
}
@Override
public void write(int repetitionLevel, DecimalData decimal) {
Preconditions.checkArgument(decimal.scale() == scale,
"Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal);
Preconditions.checkArgument(decimal.precision() <= precision,
"Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal);
column.writeInteger(repetitionLevel, (int) decimal.toUnscaledLong());
}
}
private static class LongDecimalWriter extends ParquetValueWriters.PrimitiveWriter<DecimalData> {
private final int precision;
private final int scale;
private LongDecimalWriter(ColumnDescriptor desc, int precision, int scale) {
super(desc);
this.precision = precision;
this.scale = scale;
}
@Override
public void write(int repetitionLevel, DecimalData decimal) {
Preconditions.checkArgument(decimal.scale() == scale,
"Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal);
Preconditions.checkArgument(decimal.precision() <= precision,
"Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal);
column.writeLong(repetitionLevel, decimal.toUnscaledLong());
}
}
private static class FixedDecimalWriter extends ParquetValueWriters.PrimitiveWriter<DecimalData> {
private final int precision;
private final int scale;
private final ThreadLocal<byte[]> bytes;
private FixedDecimalWriter(ColumnDescriptor desc, int precision, int scale) {
super(desc);
this.precision = precision;
this.scale = scale;
this.bytes = ThreadLocal.withInitial(() -> new byte[TypeUtil.decimalRequiredBytes(precision)]);
}
@Override
public void write(int repetitionLevel, DecimalData decimal) {
byte[] binary = DecimalUtil.toReusedFixLengthBytes(precision, scale, decimal.toBigDecimal(), bytes.get());
column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(binary));
}
}
private static class TimestampDataWriter extends ParquetValueWriters.PrimitiveWriter<TimestampData> {
private TimestampDataWriter(ColumnDescriptor desc) {
super(desc);
}
@Override
public void write(int repetitionLevel, TimestampData value) {
column.writeLong(repetitionLevel, value.getMillisecond() * 1000 + value.getNanoOfMillisecond() / 1000);
}
}
private static class ByteArrayWriter extends ParquetValueWriters.PrimitiveWriter<byte[]> {
private ByteArrayWriter(ColumnDescriptor desc) {
super(desc);
}
@Override
public void write(int repetitionLevel, byte[] bytes) {
column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(bytes));
}
}
private static class ArrayDataWriter<E> extends ParquetValueWriters.RepeatedWriter<ArrayData, E> {
private final LogicalType elementType;
private ArrayDataWriter(int definitionLevel, int repetitionLevel,
ParquetValueWriter<E> writer, LogicalType elementType) {
super(definitionLevel, repetitionLevel, writer);
this.elementType = elementType;
}
@Override
protected Iterator<E> elements(ArrayData list) {
return new ElementIterator<>(list);
}
private class ElementIterator<E> implements Iterator<E> {
private final int size;
private final ArrayData list;
private final ArrayData.ElementGetter getter;
private int index;
private ElementIterator(ArrayData list) {
this.list = list;
size = list.size();
getter = ArrayData.createElementGetter(elementType);
index = 0;
}
@Override
public boolean hasNext() {
return index != size;
}
@Override
@SuppressWarnings("unchecked")
public E next() {
if (index >= size) {
throw new NoSuchElementException();
}
E element = (E) getter.getElementOrNull(list, index);
index += 1;
return element;
}
}
}
private static class MapDataWriter<K, V> extends ParquetValueWriters.RepeatedKeyValueWriter<MapData, K, V> {
private final LogicalType keyType;
private final LogicalType valueType;
private MapDataWriter(int definitionLevel, int repetitionLevel,
ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter,
LogicalType keyType, LogicalType valueType) {
super(definitionLevel, repetitionLevel, keyWriter, valueWriter);
this.keyType = keyType;
this.valueType = valueType;
}
@Override
protected Iterator<Map.Entry<K, V>> pairs(MapData map) {
return new EntryIterator<>(map);
}
private class EntryIterator<K, V> implements Iterator<Map.Entry<K, V>> {
private final int size;
private final ArrayData keys;
private final ArrayData values;
private final ParquetValueReaders.ReusableEntry<K, V> entry;
private final ArrayData.ElementGetter keyGetter;
private final ArrayData.ElementGetter valueGetter;
private int index;
private EntryIterator(MapData map) {
size = map.size();
keys = map.keyArray();
values = map.valueArray();
entry = new ParquetValueReaders.ReusableEntry<>();
keyGetter = ArrayData.createElementGetter(keyType);
valueGetter = ArrayData.createElementGetter(valueType);
index = 0;
}
@Override
public boolean hasNext() {
return index != size;
}
@Override
@SuppressWarnings("unchecked")
public Map.Entry<K, V> next() {
if (index >= size) {
throw new NoSuchElementException();
}
entry.set((K) keyGetter.getElementOrNull(keys, index), (V) valueGetter.getElementOrNull(values, index));
index += 1;
return entry;
}
}
}
private static class RowDataWriter extends ParquetValueWriters.StructWriter<RowData> {
private final RowData.FieldGetter[] fieldGetter;
RowDataWriter(List<ParquetValueWriter<?>> writers, List<LogicalType> types) {
super(writers);
fieldGetter = new RowData.FieldGetter[types.size()];
for (int i = 0; i < types.size(); i += 1) {
fieldGetter[i] = RowData.createFieldGetter(types.get(i), i);
}
}
@Override
protected Object get(RowData struct, int index) {
return fieldGetter[index].getFieldOrNull(struct);
}
}
}
| 1 | 23,726 | Can we change to use `LogicalType.getChildren`? | apache-iceberg | java |
@@ -1,10 +1,11 @@
require 'spec_helper'
describe '#create' do
- it 'sets the payment_method on Purchase to free' do
+ it 'sets the payment_method on Purchase to subscription' do
user = create(:user, :with_subscription)
create_subscriber_purchase(create(:book_product), user)
- user.purchases.last.payment_method.should == 'free'
+ user.purchases.last.payment_method.should eq 'free'
+ user.purchases.last[:payment_method].should eq 'subscription'
end
it 'sets the comments on the purchase if provided' do | 1 | require 'spec_helper'
describe '#create' do
it 'sets the payment_method on Purchase to free' do
user = create(:user, :with_subscription)
create_subscriber_purchase(create(:book_product), user)
user.purchases.last.payment_method.should == 'free'
end
it 'sets the comments on the purchase if provided' do
user = create(:user, :with_subscription)
product = create(:workshop_product)
subscriber_purchase = SubscriberPurchase.new(product, user, 'test')
purchase = subscriber_purchase.create
purchase.comments.should == 'test'
end
context 'when the purchaseable is a github fulfilled product' do
it 'enqueues a job to add the subscriber to the repo' do
GithubFulfillmentJob.stubs(:enqueue)
user = create(:user, :with_subscription, github_username: 'github_username')
product = create(:github_book_product)
create_subscriber_purchase(product, user)
GithubFulfillmentJob.should have_received(:enqueue).
with(product.github_team, [user.github_username], Purchase.last.id)
end
end
def create_subscriber_purchase(product, user)
SubscriberPurchase.new(product, user).create
end
end
| 1 | 7,438 | I'm confused as to how these are both passing. | thoughtbot-upcase | rb |
@@ -0,0 +1,17 @@
+module DashboardsHelper
+ def learn_live_link(&block)
+ if current_user_has_access_to?(:office_hours)
+ link_to OfficeHours.url, target: "_blank", &block
+ else
+ content_tag "a", &block
+ end
+ end
+
+ def learn_repo_link(&block)
+ if current_user_has_access_to?(:source_code)
+ link_to ENV['LEARN_REPO_URL'], target: "_blank", &block
+ else
+ content_tag "a", &block
+ end
+ end
+end | 1 | 1 | 9,871 | Prefer single-quoted strings when you don't need string interpolation or special symbols. | thoughtbot-upcase | rb |
|
@@ -28,7 +28,7 @@ class CategoryData
public $seoH1s;
/**
- * @var string[]
+ * @var string[]|null[]
*/
public $descriptions;
| 1 | <?php
namespace Shopsys\FrameworkBundle\Model\Category;
use Shopsys\FrameworkBundle\Component\FileUpload\ImageUploadData;
use Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\UrlListData;
class CategoryData
{
/**
* @var string[]
*/
public $name;
/**
* @var string[]|null[]
*/
public $seoTitles;
/**
* @var string[]|null[]
*/
public $seoMetaDescriptions;
/**
* @var string[]|null[]
*/
public $seoH1s;
/**
* @var string[]
*/
public $descriptions;
/**
* @var \Shopsys\FrameworkBundle\Model\Category\Category|null
*/
public $parent;
/**
* @var bool[]
*/
public $enabled;
/**
* @var \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\UrlListData
*/
public $urls;
/**
* @var \Shopsys\FrameworkBundle\Component\FileUpload\ImageUploadData
*/
public $image;
/**
* @var array
*/
public $pluginData;
public function __construct()
{
$this->name = [];
$this->seoTitles = [];
$this->seoMetaDescriptions = [];
$this->seoH1s = [];
$this->descriptions = [];
$this->enabled = [];
$this->urls = new UrlListData();
$this->image = new ImageUploadData();
$this->pluginData = [];
}
}
| 1 | 14,143 | there is `"` sign in commit message and colon. can you rename also `descrition` to plural `descriptions` and add there `$` ? | shopsys-shopsys | php |
@@ -108,7 +108,9 @@ func (ti *TelemetryInterceptor) Intercept(
resp, err := handler(ctx, req)
if val, ok := metrics.ContextCounterGet(ctx, metrics.HistoryWorkflowExecutionCacheLatency); ok {
- timerNoUserLatency.Subtract(time.Duration(val))
+ userLatencyDuration := time.Duration(val)
+ timerNoUserLatency.Subtract(userLatencyDuration)
+ metricsScope.RecordTimer(metrics.ServiceLatencyUserLatency, userLatencyDuration)
}
if err != nil { | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package interceptor
import (
"context"
"time"
"go.temporal.io/api/serviceerror"
"go.temporal.io/api/workflowservice/v1"
"google.golang.org/grpc"
"go.temporal.io/server/common"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
serviceerrors "go.temporal.io/server/common/serviceerror"
)
type (
metricsContextKey struct{}
)
var (
metricsCtxKey = metricsContextKey{}
)
type (
TelemetryInterceptor struct {
namespaceCache cache.NamespaceCache
metricsClient metrics.Client
scopes map[string]int
logger log.Logger
}
)
var _ grpc.UnaryServerInterceptor = (*TelemetryInterceptor)(nil).Intercept
func NewTelemetryInterceptor(
namespaceCache cache.NamespaceCache,
metricsClient metrics.Client,
scopes map[string]int,
logger log.Logger,
) *TelemetryInterceptor {
return &TelemetryInterceptor{
namespaceCache: namespaceCache,
metricsClient: metricsClient,
scopes: scopes,
logger: logger,
}
}
// Use this method to override scope used for reporting a metric.
// Ideally this method should never be used.
func (ti *TelemetryInterceptor) overrideScope(scope int, req interface{}) int {
// GetWorkflowExecutionHistory method handles both long poll and regular calls.
// Current plan is to eventually split GetWorkflowExecutionHistory into two APIs,
// remove this if case when that is done.
if scope == metrics.FrontendGetWorkflowExecutionHistoryScope {
request := req.(*workflowservice.GetWorkflowExecutionHistoryRequest)
if request.GetWaitNewEvent() {
return metrics.FrontendPollWorkflowExecutionHistoryScope
}
}
return scope
}
func (ti *TelemetryInterceptor) Intercept(
ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler,
) (interface{}, error) {
_, methodName := splitMethodName(info.FullMethod)
metricsScope, logTags := ti.metricsScopeLogTags(req, methodName)
ctx = context.WithValue(ctx, metricsCtxKey, metricsScope)
metricsScope.IncCounter(metrics.ServiceRequests)
timer := metricsScope.StartTimer(metrics.ServiceLatency)
defer timer.Stop()
timerNoUserLatency := metricsScope.StartTimer(metrics.ServiceLatencyNoUserLatency)
defer timerNoUserLatency.Stop()
resp, err := handler(ctx, req)
if val, ok := metrics.ContextCounterGet(ctx, metrics.HistoryWorkflowExecutionCacheLatency); ok {
timerNoUserLatency.Subtract(time.Duration(val))
}
if err != nil {
ti.handleError(metricsScope, logTags, err)
return nil, err
}
return resp, nil
}
func (ti *TelemetryInterceptor) metricsScopeLogTags(
req interface{},
methodName string,
) (metrics.Scope, []tag.Tag) {
// if the method name is not defined, will default to
// unknown scope, which carries value 0
scopeDef, _ := ti.scopes[methodName]
scopeDef = ti.overrideScope(scopeDef, req)
namespace := GetNamespace(ti.namespaceCache, req)
if namespace == "" {
return ti.metricsClient.Scope(scopeDef).Tagged(metrics.NamespaceUnknownTag()), []tag.Tag{tag.Operation(methodName)}
}
return ti.metricsClient.Scope(scopeDef).Tagged(metrics.NamespaceTag(namespace)), []tag.Tag{
tag.Operation(methodName),
tag.WorkflowNamespace(namespace),
}
}
func (ti *TelemetryInterceptor) handleError(
scope metrics.Scope,
logTags []tag.Tag,
err error,
) {
if common.IsContextDeadlineExceededErr(err) {
scope.IncCounter(metrics.ServiceErrContextTimeoutCounter)
return
}
if common.IsContextCanceledErr(err) {
scope.IncCounter(metrics.ServiceErrContextCancelledCounter)
return
}
switch err := err.(type) {
case *serviceerrors.ShardOwnershipLost:
scope.IncCounter(metrics.ServiceErrShardOwnershipLostCounter)
case *serviceerrors.TaskAlreadyStarted:
scope.IncCounter(metrics.ServiceErrTaskAlreadyStartedCounter)
case *serviceerror.InvalidArgument:
scope.IncCounter(metrics.ServiceErrInvalidArgumentCounter)
case *serviceerror.NamespaceNotActive:
scope.IncCounter(metrics.ServiceErrInvalidArgumentCounter)
case *serviceerror.WorkflowExecutionAlreadyStarted:
scope.IncCounter(metrics.ServiceErrExecutionAlreadyStartedCounter)
case *serviceerror.NotFound:
scope.IncCounter(metrics.ServiceErrNotFoundCounter)
case *serviceerror.ResourceExhausted:
scope.IncCounter(metrics.ServiceErrResourceExhaustedCounter)
case *serviceerrors.RetryReplication:
scope.IncCounter(metrics.ServiceErrRetryTaskCounter)
case *serviceerror.NamespaceAlreadyExists:
scope.IncCounter(metrics.ServiceErrNamespaceAlreadyExistsCounter)
case *serviceerror.QueryFailed:
scope.IncCounter(metrics.ServiceErrQueryFailedCounter)
case *serviceerror.ClientVersionNotSupported:
scope.IncCounter(metrics.ServiceErrClientVersionNotSupportedCounter)
case *serviceerror.DataLoss:
scope.IncCounter(metrics.ServiceFailures)
ti.logger.Error("internal service error, data loss", append(logTags, tag.Error(err))...)
case *serviceerror.Internal:
scope.IncCounter(metrics.ServiceFailures)
ti.logger.Error("internal service error", append(logTags, tag.Error(err))...)
default:
scope.IncCounter(metrics.ServiceFailures)
ti.logger.Error("uncategorized error", append(logTags, tag.Error(err))...)
}
}
func MetricsScope(
ctx context.Context,
logger log.Logger,
) metrics.Scope {
scope, ok := ctx.Value(metricsCtxKey).(metrics.Scope)
if !ok {
logger.Error("unable to get metrics scope")
return metrics.NoopScope(metrics.Frontend)
}
return scope
}
| 1 | 12,270 | timerNoUserLatency is not being used? | temporalio-temporal | go |
@@ -603,6 +603,8 @@ class Serializer(object):
:param Date attr: Object to be serialized.
:rtype: str
"""
+ if isinstance(attr, str):
+ attr = isodate.parse_date(attr)
t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
return t
| 1 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from base64 import b64decode, b64encode
import calendar
import datetime
import decimal
from enum import Enum
import json
import logging
import re
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
import chardet
import isodate
from .exceptions import (
ValidationError,
SerializationError,
DeserializationError,
raise_with_traceback)
try:
basestring
except NameError:
basestring = str
_LOGGER = logging.getLogger(__name__)
class UTC(datetime.tzinfo):
"""Time Zone info for handling UTC"""
def utcoffset(self, dt):
"""UTF offset for UTC is 0."""
return datetime.timedelta(0)
def tzname(self, dt):
"""Timestamp representation."""
return "Z"
def dst(self, dt):
"""No daylight saving for UTC."""
return datetime.timedelta(hours=1)
try:
from datetime import timezone
TZ_UTC = timezone.utc
except ImportError:
TZ_UTC = UTC()
class Model(object):
"""Mixin for all client request body/response body models to support
serialization and deserialization.
"""
_subtype_map = {}
_attribute_map = {}
_validation = {}
def __init__(self, *args, **kwargs):
"""Allow attribute setting via kwargs on initialization."""
for k in kwargs:
setattr(self, k, kwargs[k])
def __eq__(self, other):
"""Compare objects by comparing all attributes."""
if isinstance(other, self.__class__):
return self.__class__.__dict__ == other.__class__.__dict__
return False
def __ne__(self, other):
"""Compare objects by comparing all attributes."""
return not self.__eq__(other)
def __str__(self):
return str(self.__dict__)
@classmethod
def _get_subtype_map(cls):
attr = '_subtype_map'
parents = cls.__bases__
for base in parents:
if hasattr(base, attr) and base._subtype_map:
return base._subtype_map
return {}
@classmethod
def _classify(cls, response, objects):
"""Check the class _subtype_map for any child classes.
We want to ignore any inheirited _subtype_maps.
"""
try:
map = cls.__dict__.get('_subtype_map', {})
for _type, _classes in map.items():
classification = response.get(_type)
try:
return objects[_classes[classification]]
except KeyError:
pass
for c in _classes:
try:
_cls = objects[_classes[c]]
return _cls._classify(response, objects)
except (KeyError, TypeError):
continue
raise TypeError("Object cannot be classified futher.")
except AttributeError:
raise TypeError("Object cannot be classified futher.")
def _convert_to_datatype(data, data_type, localtypes):
if data is None:
return data
data_obj = localtypes.get(data_type.strip('{[]}'))
if data_obj:
if data_type.startswith('['):
data = [
_convert_to_datatype(
param, data_type[1:-1], localtypes) for param in data
]
elif data_type.startswith('{'):
data = {
key: _convert_to_datatype(
data[key], data_type[1:-1], localtypes) for key in data
}
elif issubclass(data_obj, Enum):
return data
elif not isinstance(data, data_obj):
result = {
key: _convert_to_datatype(
data[key],
data_obj._attribute_map[key]['type'],
localtypes) for key in data
}
data = data_obj(**result)
else:
try:
for attr, map in data._attribute_map.items():
setattr(data, attr, _convert_to_datatype(
getattr(data, attr), map['type'], localtypes))
except AttributeError:
pass
return data
class Serializer(object):
"""Request object model serializer."""
basic_types = {str: 'str', int: 'int', bool: 'bool', float: 'float'}
days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu",
4: "Fri", 5: "Sat", 6: "Sun"}
months = {1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun",
7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec"}
validation = {
"min_length": lambda x, y: len(x) < y,
"max_length": lambda x, y: len(x) > y,
"minimum": lambda x, y: x < y,
"maximum": lambda x, y: x > y,
"minimum_ex": lambda x, y: x <= y,
"maximum_ex": lambda x, y: x >= y,
"min_items": lambda x, y: len(x) < y,
"max_items": lambda x, y: len(x) > y,
"pattern": lambda x, y: not re.match(y, x),
"unique": lambda x, y: len(x) != len(set(x)),
"multiple": lambda x, y: x % y != 0
}
flattten = re.compile(r"(?<!\\)\.")
def __init__(self, classes=None):
self.serialize_type = {
'iso-8601': Serializer.serialize_iso,
'rfc-1123': Serializer.serialize_rfc,
'unix-time': Serializer.serialize_unix,
'duration': Serializer.serialize_duration,
'date': Serializer.serialize_date,
'decimal': Serializer.serialize_decimal,
'long': Serializer.serialize_long,
'bytearray': Serializer.serialize_bytearray,
'base64': Serializer.serialize_base64,
'object': self.serialize_object,
'[]': self.serialize_iter,
'{}': self.serialize_dict
}
self.dependencies = dict(classes) if classes else {}
def _serialize(self, target_obj, data_type=None, **kwargs):
"""Serialize data into a string according to type.
:param target_obj: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str, dict
:raises: SerializationError if serialization fails.
"""
if target_obj is None:
return None
serialized = {}
attr_name = None
class_name = target_obj.__class__.__name__
if data_type:
return self.serialize_data(
target_obj, data_type, **kwargs)
if not hasattr(target_obj, "_attribute_map"):
data_type = type(target_obj).__name__
if data_type in self.basic_types.values():
return self.serialize_data(
target_obj, data_type, **kwargs)
try:
attributes = target_obj._attribute_map
self._classify_data(target_obj, class_name, serialized)
for attr, map in attributes.items():
attr_name = attr
try:
keys = self.flattten.split(map['key'])
keys = [k.replace('\\.', '.') for k in keys]
attr_type = map['type']
orig_attr = getattr(target_obj, attr)
validation = target_obj._validation.get(attr_name, {})
orig_attr = self.validate(
orig_attr, attr_name, **validation)
new_attr = self.serialize_data(
orig_attr, attr_type, **kwargs)
for k in reversed(keys):
unflattened = {k: new_attr}
new_attr = unflattened
_new_attr = new_attr
_serialized = serialized
for k in keys:
if k not in _serialized:
_serialized.update(_new_attr)
_new_attr = _new_attr[k]
_serialized = _serialized[k]
except ValueError:
continue
except (AttributeError, KeyError, TypeError) as err:
msg = "Attribute {} in object {} cannot be serialized.".format(
attr_name, class_name)
raise_with_traceback(SerializationError, msg, err)
else:
return serialized
def _classify_data(self, target_obj, class_name, serialized):
"""Check whether this object is a child and therefor needs to be
classified in the message.
"""
try:
for _type, _classes in target_obj._get_subtype_map().items():
for ref, name in _classes.items():
if name == class_name:
serialized[_type] = ref
except AttributeError:
pass # TargetObj has no _subtype_map so we don't need to classify.
def body(self, data, data_type, **kwargs):
"""Serialize data intended for a request body.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: dict
:raises: SerializationError if serialization fails.
:raises: ValueError if data is None
"""
if data is None:
raise ValidationError("required", "body", True)
data = _convert_to_datatype(data, data_type, self.dependencies)
return self._serialize(data, data_type, **kwargs)
def url(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL path.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
data = self.validate(data, name, required=True, **kwargs)
try:
output = self.serialize_data(data, data_type, **kwargs)
if data_type == 'bool':
output = json.dumps(output)
if kwargs.get('skip_quote') is True:
output = str(output)
else:
output = quote(str(output), safe='')
except SerializationError:
raise TypeError("{} must be type {}.".format(name, data_type))
else:
return output
def query(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL query.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
data = self.validate(data, name, required=True, **kwargs)
try:
if data_type in ['[str]']:
data = ["" if d is None else d for d in data]
output = self.serialize_data(data, data_type, **kwargs)
if data_type == 'bool':
output = json.dumps(output)
if kwargs.get('skip_quote') is True:
output = str(output)
else:
output = quote(str(output), safe='')
except SerializationError:
raise TypeError("{} must be type {}.".format(name, data_type))
else:
return str(output)
def header(self, name, data, data_type, **kwargs):
"""Serialize data intended for a request header.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
data = self.validate(data, name, required=True, **kwargs)
try:
if data_type in ['[str]']:
data = ["" if d is None else d for d in data]
output = self.serialize_data(data, data_type, **kwargs)
if data_type == 'bool':
output = json.dumps(output)
except SerializationError:
raise TypeError("{} must be type {}.".format(name, data_type))
else:
return str(output)
def validate(self, data, name, **kwargs):
"""Validate that a piece of data meets certain conditions"""
required = kwargs.get('required', False)
if required and data is None:
raise ValidationError("required", name, True)
elif data is None:
return
elif kwargs.get('readonly'):
return
try:
for key, value in kwargs.items():
validator = self.validation.get(key, lambda x, y: False)
if validator(data, value):
raise ValidationError(key, name, value)
except TypeError:
raise ValidationError("unknown", name)
else:
return data
def serialize_data(self, data, data_type, **kwargs):
"""Serialize generic data according to supplied data type.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:param bool required: Whether it's essential that the data not be
empty or None
:raises: AttributeError if required data is None.
:raises: ValueError if data is None
:raises: SerializationError if serialization fails.
"""
if data is None:
raise ValueError("No value for given attribute")
try:
if data_type in self.basic_types.values():
return self.serialize_basic(data, data_type)
elif data_type in self.serialize_type:
return self.serialize_type[data_type](data, **kwargs)
enum_type = self.dependencies.get(data_type)
if enum_type and issubclass(enum_type, Enum):
return Serializer.serialize_enum(data, enum_obj=enum_type)
iter_type = data_type[0] + data_type[-1]
if iter_type in self.serialize_type:
return self.serialize_type[iter_type](
data, data_type[1:-1], **kwargs)
except (ValueError, TypeError) as err:
msg = "Unable to serialize value: {!r} as type: {!r}."
raise_with_traceback(
SerializationError, msg.format(data, data_type), err)
else:
return self._serialize(data, **kwargs)
def serialize_basic(self, data, data_type):
"""Serialize basic builting data type.
Serializes objects to str, int, float or bool.
:param data: Object to be serialized.
:param str data_type: Type of object in the iterable.
"""
if data_type == 'str':
return self.serialize_unicode(data)
return eval(data_type)(data)
def serialize_unicode(self, data):
"""Special handling for serializing unicode strings in Py2.
Encode to UTF-8 if unicode, otherwise handle as a str.
:param data: Object to be serialized.
:rtype: str
"""
try:
return data.value
except AttributeError:
pass
try:
if isinstance(data, unicode):
return data.encode(encoding='utf-8')
except NameError:
return str(data)
else:
return str(data)
def serialize_iter(self, data, iter_type, div=None, **kwargs):
"""Serialize iterable.
:param list attr: Object to be serialized.
:param str iter_type: Type of object in the iterable.
:param bool required: Whether the objects in the iterable must
not be None or empty.
:param str div: If set, this str will be used to combine the elements
in the iterable into a combined string. Default is 'None'.
:rtype: list, str
"""
serialized = []
for d in data:
try:
serialized.append(
self.serialize_data(d, iter_type, **kwargs))
except ValueError:
serialized.append(None)
if div:
serialized = ['' if s is None else s for s in serialized]
serialized = div.join(serialized)
return serialized
def serialize_dict(self, attr, dict_type, **kwargs):
"""Serialize a dictionary of objects.
:param dict attr: Object to be serialized.
:param str dict_type: Type of object in the dictionary.
:param bool required: Whether the objects in the dictionary must
not be None or empty.
:rtype: dict
"""
serialized = {}
for key, value in attr.items():
try:
serialized[str(key)] = self.serialize_data(
value, dict_type, **kwargs)
except ValueError:
serialized[str(key)] = None
return serialized
def serialize_object(self, attr, **kwargs):
"""Serialize a generic object.
This will be handled as a dictionary. If object passed in is not
a basic type (str, int, float, dict, list) it will simply be
cast to str.
:param dict attr: Object to be serialized.
:rtype: dict or str
"""
obj_type = type(attr)
if obj_type in self.basic_types:
return self.serialize_basic(attr, self.basic_types[obj_type])
if obj_type == dict:
serialized = {}
for key, value in attr.items():
try:
serialized[str(key)] = self.serialize_object(
value, **kwargs)
except ValueError:
serialized[str(key)] = None
return serialized
if obj_type == list:
serialized = []
for obj in attr:
try:
serialized.append(self.serialize_object(
obj, **kwargs))
except ValueError:
pass
return serialized
else:
return str(attr)
@staticmethod
def serialize_enum(attr, enum_obj=None):
try:
return attr.value
except AttributeError:
pass
try:
enum_obj(attr)
return attr
except ValueError:
for enum_value in enum_obj:
if enum_value.value.lower() == str(attr).lower():
return enum_value.value
error = "{!r} is not valid value for enum {!r}"
raise SerializationError(error.format(attr, enum_obj))
@staticmethod
def serialize_bytearray(attr, **kwargs):
"""Serialize bytearray into base-64 string.
:param attr: Object to be serialized.
:rtype: str
"""
return b64encode(attr).decode()
@staticmethod
def serialize_base64(attr, **kwargs):
"""Serialize str into base-64 string.
:param attr: Object to be serialized.
:rtype: str
"""
encoded = b64encode(attr).decode('ascii')
return encoded.strip('=').replace('+', '-').replace('/', '_')
@staticmethod
def serialize_decimal(attr, **kwargs):
"""Serialize Decimal object to float.
:param attr: Object to be serialized.
:rtype: float
"""
return float(attr)
@staticmethod
def serialize_long(attr, **kwargs):
"""Serialize long (Py2) or int (Py3).
:param attr: Object to be serialized.
:rtype: int/long
"""
try:
return long(attr)
except NameError:
return int(attr)
@staticmethod
def serialize_date(attr, **kwargs):
"""Serialize Date object into ISO-8601 formatted string.
:param Date attr: Object to be serialized.
:rtype: str
"""
t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
return t
@staticmethod
def serialize_duration(attr, **kwargs):
"""Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
"""
return isodate.duration_isoformat(attr)
@staticmethod
def serialize_rfc(attr, **kwargs):
"""Serialize Datetime object into RFC-1123 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: TypeError if format invalid.
"""
try:
if not attr.tzinfo:
_LOGGER.warning(
"Datetime with no tzinfo will be considered UTC.")
utc = attr.utctimetuple()
except AttributeError:
raise TypeError("RFC1123 object must be valid Datetime object.")
return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
Serializer.days[utc.tm_wday], utc.tm_mday,
Serializer.months[utc.tm_mon], utc.tm_year,
utc.tm_hour, utc.tm_min, utc.tm_sec)
@staticmethod
def serialize_iso(attr, **kwargs):
"""Serialize Datetime object into ISO-8601 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: SerializationError if format invalid.
"""
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
try:
if not attr.tzinfo:
_LOGGER.warning(
"Datetime with no tzinfo will be considered UTC.")
utc = attr.utctimetuple()
if utc.tm_year > 9999 or utc.tm_year < 1:
raise OverflowError("Hit max or min date")
microseconds = str(float(attr.microsecond)*1e-6)[1:].ljust(4, '0')
date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
utc.tm_year, utc.tm_mon, utc.tm_mday,
utc.tm_hour, utc.tm_min, utc.tm_sec)
return date + microseconds + 'Z'
except (ValueError, OverflowError) as err:
msg = "Unable to serialize datetime object."
raise_with_traceback(SerializationError, msg, err)
except AttributeError as err:
msg = "ISO-8601 object must be valid Datetime object."
raise_with_traceback(TypeError, msg, err)
@staticmethod
def serialize_unix(attr, **kwargs):
"""Serialize Datetime object into IntTime format.
This is represented as seconds.
:param Datetime attr: Object to be serialized.
:rtype: int
:raises: SerializationError if format invalid
"""
if isinstance(attr, int):
return attr
try:
if not attr.tzinfo:
_LOGGER.warning(
"Datetime with no tzinfo will be considered UTC.")
return int(calendar.timegm(attr.utctimetuple()))
except AttributeError:
raise TypeError("Unix time object must be valid Datetime object.")
class Deserializer(object):
"""Response object model deserializer.
:param dict classes: Class type dictionary for deserializing
complex types.
"""
basic_types = {str: 'str', int: 'int', bool: 'bool', float: 'float'}
valid_date = re.compile(
r'\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}'
'\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?')
flatten = re.compile(r"(?<!\\)\.")
def __init__(self, classes=None):
self.deserialize_type = {
'iso-8601': Deserializer.deserialize_iso,
'rfc-1123': Deserializer.deserialize_rfc,
'unix-time': Deserializer.deserialize_unix,
'duration': Deserializer.deserialize_duration,
'date': Deserializer.deserialize_date,
'decimal': Deserializer.deserialize_decimal,
'long': Deserializer.deserialize_long,
'bytearray': Deserializer.deserialize_bytearray,
'base64': Deserializer.deserialize_base64,
'object': self.deserialize_object,
'[]': self.deserialize_iter,
'{}': self.deserialize_dict
}
self.dependencies = dict(classes) if classes else {}
def __call__(self, target_obj, response_data):
"""Call the deserializer to process a REST response.
:param str target_obj: Target data type to deserialize to.
:param requests.Response response_data: REST response object.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
"""
data = self._unpack_content(response_data)
response, class_name = self._classify_target(target_obj, data)
if isinstance(response, basestring):
return self.deserialize_data(data, response)
elif isinstance(response, type) and issubclass(response, Enum):
return self.deserialize_enum(data, response)
if data is None:
return data
try:
attributes = response._attribute_map
d_attrs = {}
for attr, map in attributes.items():
attr_type = map['type']
key = map['key']
working_data = data
while '.' in key:
dict_keys = self.flatten.split(key)
if len(dict_keys) == 1:
key = dict_keys[0].replace('\\.', '.')
break
working_key = dict_keys[0].replace('\\.', '.')
working_data = working_data.get(working_key, data)
key = '.'.join(dict_keys[1:])
raw_value = working_data.get(key)
value = self.deserialize_data(raw_value, attr_type)
d_attrs[attr] = value
except (AttributeError, TypeError, KeyError) as err:
msg = "Unable to deserialize to object: " + class_name
raise_with_traceback(DeserializationError, msg, err)
else:
return self._instantiate_model(response, d_attrs)
def _classify_target(self, target, data):
"""Check to see whether the deserialization target object can
be classified into a subclass.
Once classification has been determined, initialize object.
:param str target: The target object type to deserialize to.
:param str/dict data: The response data to deseralize.
"""
if target is None:
return None, None
if isinstance(target, basestring):
try:
target = self.dependencies[target]
except KeyError:
return target, target
try:
target = target._classify(data, self.dependencies)
except (TypeError, AttributeError):
pass # Target has no subclasses, so can't classify further.
return target, target.__class__.__name__
def _unpack_content(self, raw_data):
"""Extract data from the body of a REST response object.
:param raw_data: Data to be processed. This could be a
requests.Response object, in which case the json content will be
be returned.
"""
if raw_data and isinstance(raw_data, bytes):
data = raw_data.decode(
encoding=chardet.detect(raw_data)['encoding'])
else:
data = raw_data
if hasattr(raw_data, 'content'):
if not raw_data.content:
return None
if isinstance(raw_data.content, bytes):
encoding = chardet.detect(raw_data.content)["encoding"]
data = raw_data.content.decode(encoding=encoding)
else:
data = raw_data.content
try:
return json.loads(data)
except (ValueError, TypeError):
return data
return data
def _instantiate_model(self, response, attrs):
"""Instantiate a response model passing in deserialized args.
:param response: The response model class.
:param d_attrs: The deserialized response attributes.
"""
if callable(response):
subtype = response._get_subtype_map()
try:
readonly = [k for k, v in response._validation.items()
if v.get('readonly')]
const = [k for k, v in response._validation.items()
if v.get('constant')]
kwargs = {k: v for k, v in attrs.items()
if k not in subtype and k not in readonly + const}
response_obj = response(**kwargs)
for attr in readonly:
setattr(response_obj, attr, attrs.get(attr))
return response_obj
except TypeError as err:
msg = "Unable to deserialize {} into model {}. ".format(
kwargs, response)
raise DeserializationError(msg + str(err))
else:
try:
for attr, value in attrs.items():
setattr(response, attr, value)
return response
except Exception as exp:
msg = "Unable to populate response model. "
msg += "Type: {}, Error: {}".format(type(response), exp)
raise DeserializationError(msg)
def deserialize_data(self, data, data_type):
"""Process data for deserialization according to data type.
:param str data: The response string to be deserialized.
:param str data_type: The type to deserialize to.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
"""
if data is None:
return data
try:
if not data_type:
return data
if data_type in self.basic_types.values():
return self.deserialize_basic(data, data_type)
if data_type in self.deserialize_type:
data_val = self.deserialize_type[data_type](data)
return data_val
iter_type = data_type[0] + data_type[-1]
if iter_type in self.deserialize_type:
return self.deserialize_type[iter_type](data, data_type[1:-1])
obj_type = self.dependencies[data_type]
if issubclass(obj_type, Enum):
return self.deserialize_enum(data, obj_type)
except (ValueError, TypeError, AttributeError) as err:
msg = "Unable to deserialize response data."
msg += " Data: {}, {}".format(data, data_type)
raise_with_traceback(DeserializationError, msg, err)
else:
return self(obj_type, data)
def deserialize_iter(self, attr, iter_type):
"""Deserialize an iterable.
:param list attr: Iterable to be deserialized.
:param str iter_type: The type of object in the iterable.
:rtype: list
"""
if not attr and not isinstance(attr, list):
return None
return [self.deserialize_data(a, iter_type) for a in attr]
def deserialize_dict(self, attr, dict_type):
"""Deserialize a dictionary.
:param dict/list attr: Dictionary to be deserialized. Also accepts
a list of key, value pairs.
:param str dict_type: The object type of the items in the dictionary.
:rtype: dict
"""
if isinstance(attr, list):
return {str(x['key']): self.deserialize_data(
x['value'], dict_type) for x in attr}
return {str(k): self.deserialize_data(
v, dict_type) for k, v in attr.items()}
def deserialize_object(self, attr, **kwargs):
"""Deserialize a generic object.
This will be handled as a dictionary.
:param dict attr: Dictionary to be deserialized.
:rtype: dict
:raises: TypeError if non-builtin datatype encountered.
"""
if attr is None:
return None
if isinstance(attr, basestring):
return self.deserialize_basic(attr, 'str')
obj_type = type(attr)
if obj_type in self.basic_types:
return self.deserialize_basic(attr, self.basic_types[obj_type])
if obj_type == dict:
deserialized = {}
for key, value in attr.items():
try:
deserialized[str(key)] = self.deserialize_object(
value, **kwargs)
except ValueError:
deserialized[str(key)] = None
return deserialized
if obj_type == list:
deserialized = []
for obj in attr:
try:
deserialized.append(self.deserialize_object(
obj, **kwargs))
except ValueError:
pass
return deserialized
else:
error = "Cannot deserialize generic object with type: "
raise TypeError(error + str(obj_type))
def deserialize_basic(self, attr, data_type):
"""Deserialize baisc builtin data type from string.
Will attempt to convert to str, int, float and bool.
This function will also accept '1', '0', 'true' and 'false' as
valid bool values.
:param str attr: response string to be deserialized.
:param str data_type: deserialization data type.
:rtype: str, int, float or bool
:raises: TypeError if string format is not valid.
"""
if data_type == 'bool':
if attr in [True, False, 1, 0]:
return bool(attr)
elif isinstance(attr, basestring):
if attr.lower() in ['true', '1']:
return True
elif attr.lower() in ['false', '0']:
return False
raise TypeError("Invalid boolean value: {}".format(attr))
if data_type == 'str':
return self.deserialize_unicode(attr)
return eval(data_type)(attr)
def deserialize_unicode(self, data):
"""Preserve unicode objects in Python 2, otherwise return data
as a string.
:param str data: response string to be deserialized.
:rtype: str or unicode
"""
try:
if isinstance(data, unicode):
return data
except NameError:
return str(data)
else:
return str(data)
def deserialize_enum(self, data, enum_obj):
"""Deserialize string into enum object.
:param str data: response string to be deserialized.
:param Enum enum_obj: Enum object to deserialize to.
:rtype: Enum
:raises: DeserializationError if string is not valid enum value.
"""
if isinstance(data, int):
# Workaround. We might consider remove it in the future.
# https://github.com/Azure/azure-rest-api-specs/issues/141
try:
return list(enum_obj.__members__.values())[data]
except IndexError:
error = "{!r} is not a valid index for enum {!r}"
raise DeserializationError(error.format(data, enum_obj))
try:
return enum_obj(str(data))
except ValueError:
for enum_value in enum_obj:
if enum_value.value.lower() == str(data).lower():
return enum_value
error = "{!r} is not valid value for enum {!r}"
raise DeserializationError(error.format(data, enum_obj))
@staticmethod
def deserialize_bytearray(attr):
"""Deserialize string into bytearray.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
return bytearray(b64decode(attr))
@staticmethod
def deserialize_base64(attr):
"""Deserialize base64 encoded string into string.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
padding = '=' * (3 - (len(attr) + 3) % 4)
attr = attr + padding
encoded = attr.replace('-', '+').replace('_', '/')
return b64decode(encoded)
@staticmethod
def deserialize_decimal(attr):
"""Deserialize string into Decimal object.
:param str attr: response string to be deserialized.
:rtype: Decimal
:raises: DeserializationError if string format invalid.
"""
try:
return decimal.Decimal(attr)
except decimal.DecimalException as err:
msg = "Invalid decimal {}".format(attr)
raise_with_traceback(DeserializationError, msg, err)
@staticmethod
def deserialize_long(attr):
"""Deserialize string into long (Py2) or int (Py3).
:param str attr: response string to be deserialized.
:rtype: long or int
:raises: ValueError if string format invalid.
"""
try:
return long(attr)
except NameError:
return int(attr)
@staticmethod
def deserialize_duration(attr):
"""Deserialize ISO-8601 formatted string into TimeDelta object.
:param str attr: response string to be deserialized.
:rtype: TimeDelta
:raises: DeserializationError if string format invalid.
"""
try:
duration = isodate.parse_duration(attr)
except(ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize duration object."
raise_with_traceback(DeserializationError, msg, err)
else:
return duration
@staticmethod
def deserialize_date(attr):
"""Deserialize ISO-8601 formatted string into Date object.
:param str attr: response string to be deserialized.
:rtype: Date
:raises: DeserializationError if string format invalid.
"""
return isodate.parse_date(attr)
@staticmethod
def deserialize_rfc(attr):
"""Deserialize RFC-1123 formatted string into Datetime object.
:param str attr: response string to be deserialized.
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
try:
date_obj = datetime.datetime.strptime(
attr, "%a, %d %b %Y %H:%M:%S %Z")
if not date_obj.tzinfo:
date_obj = date_obj.replace(tzinfo=TZ_UTC)
except ValueError as err:
msg = "Cannot deserialize to rfc datetime object."
raise_with_traceback(DeserializationError, msg, err)
else:
return date_obj
@staticmethod
def deserialize_iso(attr):
"""Deserialize ISO-8601 formatted string into Datetime object.
:param str attr: response string to be deserialized.
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
try:
attr = attr.upper()
match = Deserializer.valid_date.match(attr)
if not match:
raise ValueError("Invalid datetime string: " + attr)
check_decimal = attr.split('.')
if len(check_decimal) > 1:
decimal = ""
for digit in check_decimal[1]:
if digit.isdigit():
decimal += digit
else:
break
if len(decimal) > 6:
attr = attr.replace(decimal, decimal[0:-1])
date_obj = isodate.parse_datetime(attr)
test_utc = date_obj.utctimetuple()
if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
raise OverflowError("Hit max or min date")
except(ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize datetime object."
raise_with_traceback(DeserializationError, msg, err)
else:
return date_obj
@staticmethod
def deserialize_unix(attr):
"""Serialize Datetime object into IntTime format.
This is represented as seconds.
:param int attr: Object to be serialized.
:rtype: Datetime
:raises: DeserializationError if format invalid
"""
try:
date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
except ValueError as err:
msg = "Cannot deserialize to unix datetime object."
raise_with_traceback(DeserializationError, msg, err)
else:
return date_obj
| 1 | 22,576 | @lmazuel - I've made a couple of minor changes to serialization.py, if you could do a quick review :) They should not be breaking, and I doubt are used by the existing clients so may not need a new release yet. Effectively it's a change to support default/constant date and datetime values by allowing strings to passed in instead of date or datetime objects. The strings would need to already be in the correct ISO formatting that the request will serialize to. Let me know if you think this is okay. | Azure-autorest | java |
@@ -553,8 +553,8 @@ namespace pwiz.SkylineTest
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"15N\" label_15N=\"true\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Heavy K\" aminoacid=\"K\" label_13C=\"true\" label_15N=\"true\" label_18O=\"true\" label_2H=\"true\"/>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Aqua\" aminoacid=\"K, R\" label_13C=\"true\" label_15N=\"true\" label_18O=\"true\" label_2H=\"true\"/>", true, true, isotopeModificationType);
- AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss1\" aminoacid=\"T, S\" formula=\"HPO3\"><fragment_loss formula=\"HP3O4\"/></static_modification>", true, true, isotopeModificationType);
- AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss3\" aminoacid=\"T, S\" formula=\"HPO3\" explicit_decl=\"true\"><fragment_loss formula=\"HP3O4\"/><fragment_loss formula=\"H2O\"/><fragment_loss formula=\"NH3\"/></static_modification>", true, true, isotopeModificationType);
+ AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss1\" aminoacid=\"T, S\" formula=\"HPO3\"><potential_loss formula=\"HP3O4\"/></static_modification>", true, true, structuralModificationType);
+ AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss3\" aminoacid=\"T, S\" formula=\"HPO3\" explicit_decl=\"true\"><potential_loss formula=\"HP3O4\"/><potential_loss formula=\"H2O\"/><potential_loss formula=\"NH3\"/></static_modification>", true, true, structuralModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\"><potential_loss formula=\"NH3\"/></static_modification>", true, true, structuralModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"LossInclusion\" aminoacid=\"T, S\" formula=\"HPO3\"><potential_loss formula=\"HP3O4\" inclusion=\"Always\"/><potential_loss formula=\"HP2O3\" inclusion=\"Library\"/><potential_loss formula=\"HP1O2\" inclusion=\"Never\"/></static_modification>", true, true, structuralModificationType);
| 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Xml.Serialization;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using pwiz.Common.Chemistry;
using pwiz.ProteomeDatabase.API;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.SkylineTestUtil;
using SequenceTerminus = pwiz.Skyline.Model.SequenceTerminus;
namespace pwiz.SkylineTest
{
/// <summary>
/// This is a test class for SrmSettingsTest and is intended
/// to contain all SrmSettingsTest Unit Tests
/// </summary>
[TestClass]
public class SrmSettingsTest : AbstractUnitTest
{
private const string XML_DIRECTIVE = "<?xml version=\"1.0\" encoding=\"utf-16\"?>\r\n";
/// <summary>
/// Simple test of serializing the default SrmSettings, reloading
/// and ensuring consistency.
/// </summary>
[TestMethod]
public void SettingsSerializeDefaultsTest()
{
AssertEx.Serializable(SrmSettingsList.GetDefault(), AssertEx.SettingsCloned);
}
/// <summary>
/// Test of deserializing current settings.
/// </summary>
[TestMethod]
public void SettingsSerializeCurrentTest()
{
AssertEx.Serializable(AssertEx.Deserialize<SrmSettings>(SETTINGS_CURRENT), 3, AssertEx.SettingsCloned);
}
private const string SETTINGS_CURRENT =
"<settings_summary name=\"Default\">\n" +
" <peptide_settings>\n" +
" <enzyme name=\"LysN promisc\" cut=\"KASR\" no_cut=\"\" sense=\"N\" />\n" +
" <digest_settings max_missed_cleavages=\"1\" exclude_ragged_ends=\"true\" />\n" +
" <peptide_prediction>\n" +
" <predict_retention_time name=\"Bovine Standard (100A)\" calculator=\"SSRCalc 3.0 (100A)\"\n" +
" time_window=\"13.6\">\n" +
" <regression_rt slope=\"1.681\" intercept=\"-6.247\" />\n" +
" </predict_retention_time>\n" +
" </peptide_prediction>\n" +
" <peptide_filter start=\"0\" min_length=\"5\" max_length=\"30\" min_transtions=\"4\"\n" +
" auto_select=\"True\">\n" +
" <peptide_exclusions>\n" +
" <exclusion name=\"Met\" regex=\"[M]\" />\n" +
" <exclusion name=\"NXT/NXS\" regex=\"N.[TS]\" />\n" +
" <exclusion name=\"D Runs\" regex=\"DDDD\" />\n" +
" </peptide_exclusions>\n" +
" </peptide_filter>\n" +
" <peptide_libraries />\n" +
" <peptide_modifications>\n" +
" <static_modifications>\n" +
" <static_modification name=\"Test2\" aminoacid=\"M\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
" </static_modifications>\n" +
" </peptide_modifications>\n" +
" </peptide_settings>\n" +
" <transition_settings>\n" +
" <transition_prediction precursor_mass_type=\"Average\" fragment_mass_type=\"Average\">\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regression_ce charge=\"2\" slope=\"0.0431\" intercept=\"4.7556\" />\n" +
" </predict_collision_energy>\n" +
" <predict_declustering_potential name=\"Test1\" slope=\"0.5\" intercept=\"5\" />\n" +
" </transition_prediction>\n" +
" <transition_filter precursor_charges=\"2,3\" product_charges=\"1,2\"\n" +
" fragment_range_first=\"y3\" fragment_range_last=\"last y-ion - 1\"\n" +
" include_n_proline=\"true\" include_c_glu_asp=\"true\" auto_select=\"true\" />\n" +
" <transition_libraries ion_match_tolerance=\"0.5\" ion_count=\"3\" pick_from=\"all\" />\n" +
" <transition_integration/>" +
" <transition_instrument min_mz=\"52\" max_mz=\"1503\" />\n" +
" </transition_settings>\n" +
"</settings_summary>";
/// <summary>
/// Test of deserializing v0.1 settings, by deserializing versions written
/// by v0.1 and the current code, and checking for equality.
/// </summary>
[TestMethod]
public void SettingsSerialize_0_1_Test()
{
// ReSharper disable InconsistentNaming
XmlSerializer ser_0_1 = new XmlSerializer(typeof(SrmSettingsList));
XmlSerializer serCurrent = new XmlSerializer(typeof(SrmSettings));
using (TextReader reader_0_1 = new StringReader(SETTINGS_LIST_0_1))
using (TextReader readerCurrent = new StringReader(SETTINGS_CURRENT))
{
SrmSettings settings_0_1 = ((SrmSettingsList) ser_0_1.Deserialize(reader_0_1))[0];
SrmSettings settingsCurrent = (SrmSettings) serCurrent.Deserialize(readerCurrent);
AssertEx.SettingsCloned(settings_0_1, settingsCurrent);
}
// ReSharper restore InconsistentNaming
}
private const string SETTINGS_LIST_0_1 =
"<SrmSettingsList>\n" +
" <ArrayOfSrmSettings xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n" +
" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">\n" +
" <SrmSettings name=\"Default\">\n" +
" <peptide_settings>\n" +
" <enzyme name=\"LysN promisc\" cut=\"KASR\" no_cut=\"\" sense=\"N\" />\n" +
" <digest_settings max_missed_cleavages=\"1\" exclude_ragged_ends=\"true\" />\n" +
" <peptide_filter start=\"0\" min_length=\"5\" max_length=\"30\" min_transtions=\"4\"\n" +
" auto_select=\"true\">\n" +
" <peptide_exclusions>\n" +
" <exclusion name=\"Met\" regex=\"[M]\" />\n" +
" <exclusion name=\"NXT/NXS\" regex=\"N.[TS]\" />\n" +
" <exclusion name=\"D Runs\" regex=\"DDDD\" />\n" +
" </peptide_exclusions>\n" +
" </peptide_filter>\n" +
" <peptide_modifications>\n" +
" <static_modifications>\n" +
" <static_modification name=\"Test2\" aminoacid=\"M\"\n" +
" massdiff_monoisotopic=\"5\" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
" </static_modifications>\n" +
" </peptide_modifications>\n" +
" </peptide_settings>\n" +
" <transition_settings>\n" +
" <transition_prediction precursor_mass_type=\"Average\" fragment_mass_type=\"Average\">\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regressions>\n" +
" <regression_ce slope=\"0.0431\" intercept=\"4.7556\" charge=\"2\" />\n" +
" </regressions>\n" +
" </predict_collision_energy>\n" +
// Retention time moved from transition to prediction
" <predict_retention_time name=\"Bovine Standard (100A)\" calculator=\"SSRCalc 3.0 (100A)\"\n" +
" time_window=\"13.6\">\n" +
" <regression_rt slope=\"1.681\" intercept=\"-6.247\" />\n" +
" </predict_retention_time>\n" +
" <predict_declustering_potential slope=\"0.5\" intercept=\"5\" name=\"Test1\" />\n" +
" </transition_prediction>\n" +
" <transition_filter precursor_charges=\"2,3\" product_charges=\"1,2\"\n" +
" fragment_range_first=\"y3\" fragment_range_last=\"last y-ion - 1\"\n" +
" include_n_prolene=\"true\" include_c_glu_asp=\"true\" auto_select=\"true\" />\n" +
" <transition_instrument min_mz=\"52\" max_mz=\"1503\" />\n" +
" </transition_settings>\n" +
" </SrmSettings>\n" +
" </ArrayOfSrmSettings>\n" +
"</SrmSettingsList>";
/// <summary>
/// Test de/serialization of all the other types of lists stored
/// in user.config.
/// </summary>
[TestMethod]
public void SettingsSerializeListsTest()
{
AssertEx.Serialization<EnzymeList>(SETTINGS_ENZYME_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<StaticModList>(SETTINGS_STATIC_MOD_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<HeavyModList>(SETTINGS_HEAVY_MOD_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<PeptideExcludeList>(SETTINGS_EXCLUSIONS_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<CollisionEnergyList>(SETTINGS_CE_LIST, (t, c) => CheckSettingsList(t, c, true), false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<DeclusterPotentialList>(SETTINGS_DP_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<RetentionTimeList>(SETTINGS_RT_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
}
private const string SETTINGS_ENZYME_LIST =
"<EnzymeList>\n" +
" <enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"P\" sense=\"C\" />\n" +
" <enzyme name=\"Trypsin/P\" cut=\"KR\" no_cut=\"\" sense=\"C\" />\n" +
" <enzyme name=\"Chymotrypsin\" cut=\"FWYM\" no_cut=\"P\" sense=\"C\" />\n" +
" <enzyme name=\"AspN\" cut=\"D\" no_cut=\"\" sense=\"N\" />\n" +
" <enzyme name=\"Trypsin AspN\" cut_c=\"KR\" no_cut_c=\"P\" cut_n=\"D\" no_cut_n=\"\" />\n" +
"</EnzymeList>";
private const string SETTINGS_STATIC_MOD_LIST =
"<StaticModList>\n" +
" <static_modification name=\"Test1\" aminoacid=\"C\"\n" +
" formula=\"C2H3 - ON4\" />\n" +
" <static_modification name=\"Test2\" terminus=\"N\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
"</StaticModList>";
private const string SETTINGS_HEAVY_MOD_LIST =
"<HeavyModList>\n" +
" <static_modification name=\"Test1\" aminoacid=\"C\"\n" +
" formula=\"C2H3 - ON4\" />\n" +
" <static_modification name=\"Test2\" terminus=\"N\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
"</HeavyModList>";
private const string SETTINGS_EXCLUSIONS_LIST =
"<PeptideExcludeList>\n" +
" <exclusion name=\"Cys\" regex=\"[C]\" />\n" +
" <exclusion name=\"Met\" regex=\"[M]\" />\n" +
" <exclusion name=\"His\" regex=\"[H]\" />\n" +
" <exclusion name=\"NXT/NXS\" regex=\"N.[TS]\" />\n" +
" <exclusion name=\"RP/KP\" regex=\"[RK]P\" />\n" +
" <exclusion name=\"D Runs\" regex=\"DDDD\" />\n" +
"</PeptideExcludeList>";
private const string SETTINGS_CE_LIST =
"<CollisionEnergyList>\n" +
" <predict_collision_energy name=\"Thermo\">\n" +
" <regression_ce charge=\"2\" slope=\"0.034\" intercept=\"3.314\" />\n" +
" <regression_ce charge=\"3\" slope=\"0.044\" intercept=\"3.314\" />\n" +
" </predict_collision_energy>\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regression_ce charge=\"2\" slope=\"0.0431\" intercept=\"4.7556\" />\n" +
" </predict_collision_energy>\n" +
"</CollisionEnergyList>";
private const string SETTINGS_DP_LIST =
"<DeclusterPotentialList>\n" +
" <predict_declustering_potential name=\"None\" slope=\"0\" intercept=\"0\" />\n" +
" <predict_declustering_potential name=\"ABI\" slope=\"0.0729\" intercept=\"31.117\" />\n" +
" <predict_declustering_potential name=\"Test1\" slope=\"0.5\" intercept=\"5\" />\n" +
"</DeclusterPotentialList>";
private const string SETTINGS_RT_LIST =
"<RetentionTimeList>\n" +
" <predict_retention_time name=\"None\" time_window=\"0\">\n" +
" <regression_rt slope=\"0\" intercept=\"0\" />\n" +
" </predict_retention_time>\n" +
" <predict_retention_time name=\"Bovine Standard (100A)\" calculator=\"SSRCalc 3.0 (100A)\"\n" +
" time_window=\"13.6\">\n" +
" <regression_rt slope=\"1.681\" intercept=\"-6.247\" />\n" +
" </predict_retention_time>\n" +
"</RetentionTimeList>";
/// <summary>
/// Test XML deserialization where major parts are missing.
/// </summary>
[TestMethod]
public void SettingsSerializeStubTest()
{
XmlSerializer ser = new XmlSerializer(typeof(SrmSettings));
using (TextReader reader = new StringReader(XML_DIRECTIVE + string.Format(SETTINGS_STUBS, SrmSettingsList.DefaultName)))
{
var target = SrmSettingsList.GetDefault();
var copy = (SrmSettings) ser.Deserialize(reader);
Assert.AreSame(target.PeptideSettings.Enzyme, copy.PeptideSettings.Enzyme);
Assert.AreSame(target.PeptideSettings.DigestSettings, copy.PeptideSettings.DigestSettings);
AssertEx.Cloned(target.PeptideSettings.Prediction, copy.PeptideSettings.Prediction);
Assert.AreSame(target.PeptideSettings.Filter, copy.PeptideSettings.Filter);
Assert.AreSame(target.PeptideSettings.Libraries, copy.PeptideSettings.Libraries);
Assert.AreSame(target.PeptideSettings.Modifications, copy.PeptideSettings.Modifications);
AssertEx.Cloned(target.PeptideSettings, copy.PeptideSettings);
Assert.AreSame(target.TransitionSettings.Prediction, copy.TransitionSettings.Prediction);
Assert.AreSame(target.TransitionSettings.Filter, copy.TransitionSettings.Filter);
Assert.AreSame(target.TransitionSettings.Libraries, copy.TransitionSettings.Libraries);
Assert.AreSame(target.TransitionSettings.Instrument, copy.TransitionSettings.Instrument);
AssertEx.Cloned(target.TransitionSettings, copy.TransitionSettings);
AssertEx.Cloned(target, copy);
}
}
// This string should deserialize successfully to the default SRM settings.
private const string SETTINGS_STUBS =
"<settings_summary name=\"{0}\">\n" +
" <peptide_settings>\n" +
" <peptide_prediction/>\n" +
" </peptide_settings>\n" +
" <transition_settings/>\n" +
"</settings_summary>";
/// <summary>
/// Test error handling in XML deserialization of <see cref="Enzyme"/>.
/// </summary>
[TestMethod]
public void SerializeEnzymeTest()
{
// Valid first
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut=\"M\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (2)\" cut=\"M\" sense=\"N\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (3)\" cut=\"ACDEFGHIKLMNPQRSTVWY\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (4)\" cut_c=\"M\" cut_n=\"K\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (4)\" cut_c=\"M\" no_cut_c=\"N\" cut_n=\"K\" no_cut_n=\"P\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut_c=\"M\" no_cut_c=\"P\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut_n=\"M\" no_cut_n=\"P\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut_n=\"M\" no_cut_n=\"P\" semi=\"True\"/>");
// Missing parameters
AssertEx.DeserializeError<Enzyme>("<enzyme/>");
// No name
AssertEx.DeserializeError<Enzyme>("<enzyme cut=\"KR\" no_cut=\"P\" sense=\"C\" />");
// No cleavage
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"\" no_cut=\"P\" sense=\"N\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"\" no_cut_c=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_n=\"\" no_cut_n=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"M\" no_cut_c=\"N\" cut_n=\"\" no_cut_n=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"\" no_cut_c=\"N\" cut_n=\"K\" no_cut_n=\"P\" />");
// Bad cleavage
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"X\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"MKRM\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"MKRM\" no_cut_c=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_n=\"MKRM\" no_cut_n=\"P\" />");
// Bad restrict
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"+\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"AMRGR\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"KR\" no_cut_c=\"+\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_n=\"KR\" no_cut_n=\"AMRGR\" sense=\"C\" />");
// Bad sense
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"P\" sense=\"X\" />");
}
/// <summary>
/// Test Enzyme digestion
/// </summary>
[TestMethod]
public void EnzymeDigestionTest()
{
const string sequence = "KKRFAHFAHPRFAHKPAHKAHMERMLSTKKKRSTTKRK";
var enzymeTrypsin = new Enzyme("Trypsin", "KR", "P");
DigestsTo(sequence, false, 12, enzymeTrypsin, "FAHFAHPR", "FAHKPAHK", "AHMER", "MLSTK", "STTK"); // NB enzyme.CountCleavagePoints gave 8 rather than 12 prior to Feb 2016
DigestsTo(sequence, true, 12, enzymeTrypsin, "FAHKPAHK", "AHMER");
var enzymeReverseTrypsin = new Enzyme("R-Trypsin", "KR", "P", SequenceTerminus.N);
DigestsTo(sequence, false, 12, enzymeReverseTrypsin, "RFAHFAHPRFAH", "KPAH", "KAHME", "RMLST", "RSTT");
DigestsTo(sequence, true, 12, enzymeReverseTrypsin, "KPAH", "KAHME");
var enzymeUnrestrictedTrypsin = new Enzyme("U-Trypsin", "KR", null);
DigestsTo(sequence, false, 13, enzymeUnrestrictedTrypsin, "FAHFAHPR", "FAHK", "PAHK", "AHMER", "MLSTK", "STTK");
DigestsTo(sequence, true, 13, enzymeUnrestrictedTrypsin, "FAHK", "PAHK", "AHMER");
var enzymeUnReverseTrypsin = new Enzyme("U-R-Trypsin", "KR", null, SequenceTerminus.N);
DigestsTo(sequence, false, 13, enzymeUnReverseTrypsin, "RFAHFAHP", "RFAH", "KPAH", "KAHME", "RMLST", "RSTT");
DigestsTo(sequence, true, 13, enzymeUnReverseTrypsin,"RFAH", "KPAH", "KAHME");
var enzymeBothTrypsinR = new Enzyme("B-TrypsinR", "R", "P", "K", "P");
DigestsTo(sequence, false, 12, enzymeBothTrypsinR, "KR", "FAHFAHPR", "FAH", "KPAH", "KAHMER", "MLST", "KR", "STT", "KR");
DigestsTo(sequence, true, 12, enzymeBothTrypsinR, "KR", "FAHFAHPR", "FAH", "KPAH", "KAHMER", "STT", "KR");
var enzymeBothTrypsinK = new Enzyme("B-TrypsinK", "K", "P", "R", "P");
DigestsTo(sequence, false, 8, enzymeBothTrypsinK, "RFAHFAHPRFAHKPAHK", "AHME", "RMLSTK", "RSTTK", "RK");
DigestsTo(sequence, true, 8, enzymeBothTrypsinK, "AHME", "RK");
var enzymeUnrestrictedBothTrypsin = new Enzyme("U-B-Trypsin", "K", null, "R", null);
DigestsTo(sequence, false, 10, enzymeUnrestrictedBothTrypsin, "RFAHFAHP", "RFAHK", "PAHK", "AHME", "RMLSTK", "RSTTK", "RK");
DigestsTo(sequence, true, 10, enzymeUnrestrictedBothTrypsin, "RFAHK", "PAHK", "AHME", "RK");
var enzymeTrypsinSemi = new Enzyme("Trypsin (semi)", "KR", "P", null, null, true);
DigestsTo(sequence, false, 12, null, 4, enzymeTrypsinSemi,
"FAHFAHPR",
"FAHFAHP",
"FAHFAH",
"FAHFA",
"FAHF",
"AHFAHPR",
"HFAHPR",
"FAHPR",
"AHPR",
"FAHKPAHK",
"FAHKPAH",
"FAHKPA",
"FAHKP",
"FAHK",
"AHKPAHK",
"HKPAHK",
"KPAHK",
"PAHK",
"AHMER",
"AHME",
"HMER",
"MLSTK",
"MLST",
"LSTK",
"STTK");
DigestsTo("ASKSUPAHLONGNONCLEAVINGSEQUENCERCPEPTIDE", false, 2, 8, 5, enzymeTrypsinSemi,
"SUPAHLON",
"SUPAHLO",
"SUPAHL",
"SUPAH",
"EQUENCER",
"QUENCER",
"UENCER",
"ENCER",
"CPEPTIDE",
"CPEPTID",
"CPEPTI",
"CPEPT",
"PEPTIDE",
"EPTIDE",
"PTIDE");
// Make sure Equals and GetHashCode are implemented to include the new semi bool
var trypCompare = new Enzyme("Trypsin", "KR", "P", null, null);
var trypSemiCompare = new Enzyme("Trypsin", "KR", "P", null, null, true);
Assert.AreNotEqual(trypCompare, trypSemiCompare);
Assert.AreNotEqual(trypCompare.GetHashCode(), trypSemiCompare.GetHashCode());
// And serialization is implemented to include new property
AssertEx.Serializable(enzymeTrypsinSemi, (e1, e2) =>
{
Assert.AreEqual(e1, e2);
Assert.AreNotSame(e1, e2);
});
}
private static void DigestsTo(string sequence, bool excludeRaggedEnds, int expectedCleavagePoints, Enzyme enzyme, params string[] pepSeqs)
{
DigestsTo(sequence, excludeRaggedEnds, expectedCleavagePoints, null, null, enzyme, pepSeqs);
}
private static void DigestsTo(string sequence, bool excludeRaggedEnds, int expectedCleavagePoints, int? maxPepLen, int? minPepLen, Enzyme enzyme, params string[] pepSeqs)
{
var fastaSeq = new FastaSequence("p", "d", new ProteinMetadata[0], sequence);
var digestSettings = new DigestSettings(0, excludeRaggedEnds);
var peptides = "Missed " + enzyme.CountCleavagePoints(sequence) + " " +
string.Join(" ", enzyme.Digest(fastaSeq, digestSettings, maxPepLen, minPepLen).Select(p => p.Target));
var expected = "Missed " + expectedCleavagePoints + " " + string.Join(" ", pepSeqs);
Assert.AreEqual(expected, peptides);
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="DigestSettings"/>.
/// </summary>
[TestMethod]
public void SerializeDigestTest()
{
// Valid first
AssertEx.DeserializeNoError<DigestSettings>("<digest_settings max_missed_cleavages=\"0\" exclude_ragged_ends=\"true\" />");
AssertEx.DeserializeNoError<DigestSettings>("<digest_settings max_missed_cleavages=\"9\" exclude_ragged_ends=\"false\" />");
AssertEx.DeserializeNoError<DigestSettings>("<digest_settings/>");
// Errors
AssertEx.DeserializeError<DigestSettings>("<digest_settings max_missed_cleavages=\"10\" exclude_ragged_ends=\"true\" />");
AssertEx.DeserializeError<DigestSettings>("<digest_settings max_missed_cleavages=\"-1\" exclude_ragged_ends=\"true\" />");
AssertEx.DeserializeError<DigestSettings>("<digest_settings max_missed_cleavages=\"0\" exclude_ragged_ends=\"yes\" />");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="DigestSettings"/>.
/// </summary>
[TestMethod]
public void SerializePeptidePredictionTest()
{
// Valid first
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction />");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"false\" />");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"true\" />");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"true\" measured_rt_window=\"2.0\"/>");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"false\" measured_rt_window=\"2.0\"/>");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction measured_rt_window=\"5.0\"/>");
// Errors (out of range)
AssertEx.DeserializeError<PeptidePrediction>("<peptide_prediction measured_rt_window=\"0.01\"/>");
AssertEx.DeserializeError<PeptidePrediction>("<peptide_prediction measured_rt_window=\"600.0\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="PeptideFilter"/>.
/// </summary>
[TestMethod]
public void SerializePeptideFilterTest()
{
// Valid first
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter min_length=\"2\" max_length=\"200\" min_transtions=\"1\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"none\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"protein\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"gene\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"species\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"100\" min_length=\"2\" max_length=\"5\" auto_select=\"true\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\" auto_select=\"true\"><peptide_exclusions/></peptide_filter>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Valid\" regex=\"^[^C]$\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Valid\" regex=\"M\\[\" include=\"true\" match_mod_sequence=\"true\"/></peptide_exclusions></peptide_filter>");
// Missing parameters
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter/>");
// min_length range
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"1\" max_length=\"30\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"500\" max_length=\"30\"/>");
// max_length range
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"10\" max_length=\"8\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"4\" max_length=\"4\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"8\" max_length=\"500\"/>");
// start range
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"-1\" min_length=\"8\" max_length=\"25\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"50000\" min_length=\"8\" max_length=\"25\"/>");
// bad exclusions
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Noex\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion regex=\"PX\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Invalid\" regex=\"!(M[)\" match_mod_sequence=\"true\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Invalid\" regex=\"M\\[\" include=\"T\" match_mod_sequence=\"T\"/></peptide_exclusions></peptide_filter>");
// bad peptide uniqueness mode
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"nonsense\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="PeptideModifications"/>.
/// </summary>
[TestMethod]
public void SerializePeptideModificationsTest()
{
// Valid first
AssertEx.DeserializeNoError<PeptideModifications>("<peptide_modifications><static_modifications/></peptide_modifications>");
AssertEx.DeserializeNoError<PeptideModifications>("<peptide_modifications/>");
var mods = AssertEx.Deserialize<PeptideModifications>("<peptide_modifications internal_standard=\"none\"><static_modifications/><heavy_modifications/></peptide_modifications>");
Assert.AreEqual(0, mods.InternalStandardTypes.Count);
mods = AssertEx.Deserialize<PeptideModifications>("<peptide_modifications internal_standard=\"light\"></peptide_modifications>");
Assert.AreEqual(1, mods.InternalStandardTypes.Count);
Assert.AreEqual("light", mods.InternalStandardTypes[0].Name);
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="StaticMod"/>.
/// </summary>
[TestMethod]
public void SerializeStaticModTest()
{
const string structuralModificationType = "structural_modification_type";
const string isotopeModificationType = "isotope_modification_type";
// Valid first
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"R\" terminus=\"C\" formula=\"C2H3ON15PS\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" terminus=\"N\" formula=\"-ON4\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"P\" formula=\"C23 - O N P14\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"P\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Oxidation\" aminoacid=\"M, D\" formula=\"O\" variable=\"true\"/>", true, true, structuralModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" formula=\"C23N\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"15N\" label_15N=\"true\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Heavy K\" aminoacid=\"K\" label_13C=\"true\" label_15N=\"true\" label_18O=\"true\" label_2H=\"true\"/>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Aqua\" aminoacid=\"K, R\" label_13C=\"true\" label_15N=\"true\" label_18O=\"true\" label_2H=\"true\"/>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss1\" aminoacid=\"T, S\" formula=\"HPO3\"><fragment_loss formula=\"HP3O4\"/></static_modification>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss3\" aminoacid=\"T, S\" formula=\"HPO3\" explicit_decl=\"true\"><fragment_loss formula=\"HP3O4\"/><fragment_loss formula=\"H2O\"/><fragment_loss formula=\"NH3\"/></static_modification>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\"><potential_loss formula=\"NH3\"/></static_modification>", true, true, structuralModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"LossInclusion\" aminoacid=\"T, S\" formula=\"HPO3\"><potential_loss formula=\"HP3O4\" inclusion=\"Always\"/><potential_loss formula=\"HP2O3\" inclusion=\"Library\"/><potential_loss formula=\"HP1O2\" inclusion=\"Never\"/></static_modification>", true, true, structuralModificationType);
// Missing parameters
AssertEx.DeserializeError<StaticMod>("<static_modification />");
// Bad amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"X\" formula=\"C23N\" />");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"KR\" formula=\"C23N\" />");
// Bad terminus
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" terminus=\"X\" formula=\"C23N\" />");
// Bad formula
AssertEx.DeserializeError<StaticMod, ArgumentException>("<static_modification name=\"Mod\" aminoacid=\"K\" formula=\"C23NHx2\" />");
// Terminal label without amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"15N\" terminus=\"C\" label_13C=\"true\"/>");
// Formula and labeled atoms
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"15N\" label_15N=\"true\" formula=\"C23N\" />");
// Missing formula and masses
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"R\" />");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"R\" formula=\"\" />");
// Both formula and masses
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"P\" formula=\"C23N\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />");
// Bad amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"A, B, C\" />");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"DM\" />");
// Variable with no amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" variable=\"true\" />");
// Loss only failures
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\" variable=\"true\"><potential_loss formula=\"NH3\"/></static_modification>");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\" explicit_decl=\"true\"><potential_loss formula=\"NH3\"/></static_modification>");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"LossInclusion\" aminoacid=\"T, S\" formula=\"HPO3\"><potential_loss formula=\"HP3O4\" inclusion=\"Sometimes\"/></static_modification>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="FragmentLoss"/>.
/// </summary>
[TestMethod]
public void SerializeFragmentLossTest()
{
// Valid first
AssertEx.DeserializeNoError<FragmentLoss>("<potential_loss formula=\"H2O\"/>");
AssertEx.DeserializeNoError<FragmentLoss>("<potential_loss formula=\"HCO3\"/>");
AssertEx.DeserializeNoError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />");
// Negative formula
AssertEx.DeserializeError<FragmentLoss>("<potential_loss formula=\"-H2O\"/>");
// Too big formula
AssertEx.DeserializeError<FragmentLoss>("<potential_loss formula=\"N393\"/>");
// Bad formula
AssertEx.DeserializeError<FragmentLoss, ArgumentException>("<potential_loss formula=\"H3Mx5Cl5\"/>");
// Constant mass out of range
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"" + FragmentLoss.MIN_LOSS_MASS / 2 + "\"\n" +
" massdiff_average=\"1\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"1\"\n" +
" massdiff_average=\"" + FragmentLoss.MIN_LOSS_MASS / 2 + "\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"" + (FragmentLoss.MAX_LOSS_MASS + 1) + "\"\n" +
" massdiff_average=\"1\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"1\"\n" +
" massdiff_average=\"" + (FragmentLoss.MAX_LOSS_MASS + 1) + "\" />");
// Missing information
AssertEx.DeserializeError<FragmentLoss>("<potential_loss/>");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"1\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_average=\"1\" />");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionPrediction"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionPredictionTest()
{
// Valid first
AssertEx.DeserializeNoError<TransitionPrediction>("<transition_prediction>" +
"<predict_collision_energy name=\"Pass\">" +
"<regressions><regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" /></regressions>" +
"</predict_collision_energy></transition_prediction>");
// Bad mass type
AssertEx.DeserializeError<TransitionPrediction>("<transition_prediction precursor_mass_type=\"Bad\">" +
"<predict_collision_energy name=\"Fail\">" +
"<regressions><regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" /></regressions>" +
"</predict_collision_energy></transition_prediction>");
// No collision energy regression (Allowed during 3.7.1 development)
AssertEx.DeserializeNoError<TransitionPrediction>("<transition_prediction/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="CollisionEnergyRegression"/>.
/// </summary>
[TestMethod]
public void SerializeCollisionEnergyTest()
{
// Valid first
AssertEx.DeserializeNoError<CollisionEnergyRegression>("<predict_collision_energy name=\"Pass\">" +
"<regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" />" +
"</predict_collision_energy>");
AssertEx.DeserializeNoError<CollisionEnergyRegression>("<predict_collision_energy name=\"Pass\">" +
"<regression_ce charge=\"1\" /><regression_ce charge=\"2\" /><regression_ce charge=\"3\" /><regression_ce charge=\"4\" />" +
"</predict_collision_energy>");
// v0.1 format
AssertEx.DeserializeNoError<CollisionEnergyRegression>("<predict_collision_energy name=\"Pass\">" +
"<regressions><regression_ce /></regressions>" +
"</predict_collision_energy>");
// No regressions
AssertEx.DeserializeError<CollisionEnergyRegression>("<predict_collision_energy name=\"Fail\" />");
// Repeated charge
AssertEx.DeserializeError<CollisionEnergyRegression>("<predict_collision_energy name=\"Fail\">" +
"<regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" />" +
"<regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" />" +
"</predict_collision_energy>");
}
[TestMethod]
public void SerializeCollisionEnergyListTest()
{
XmlSerializer ser = new XmlSerializer(typeof(CollisionEnergyList));
using (TextReader reader = new StringReader(
"<CollisionEnergyList>\n" +
" <predict_collision_energy name=\"Thermo\">\n" +
" <regression_ce charge=\"2\" slope=\"0.034\" intercept=\"3.314\" />\n" +
" <regression_ce charge=\"3\" slope=\"0.044\" intercept=\"3.314\" />\n" +
" </predict_collision_energy>\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regression_ce charge=\"2\" slope=\"0.0431\" intercept=\"4.7556\" />\n" +
" </predict_collision_energy>\n" +
"</CollisionEnergyList>"))
{
var listCE = (CollisionEnergyList) ser.Deserialize(reader);
Assert.AreSame(CollisionEnergyList.NONE, listCE[0]);
Assert.AreEqual(listCE.GetDefaults(listCE.RevisionIndexCurrent).Count() + 2, listCE.Count);
Assert.AreEqual(listCE.RevisionIndexCurrent, listCE.RevisionIndex);
foreach (var regressionCE in listCE.GetDefaults(listCE.RevisionIndexCurrent))
{
CollisionEnergyRegression regressionTmp;
Assert.IsTrue(listCE.TryGetValue(regressionCE.GetKey(), out regressionTmp));
Assert.AreEqual(regressionCE, regressionTmp);
}
}
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="DeclusteringPotentialRegression"/>.
/// </summary>
[TestMethod]
public void SerializeDeclusteringPotentialTest()
{
// Valid first
AssertEx.DeserializeNoError<DeclusteringPotentialRegression>("<predict_declustering_potential name=\"Pass\"" +
" slope=\"0.1\" intercept=\"4.7\" />");
AssertEx.DeserializeNoError<DeclusteringPotentialRegression>("<predict_declustering_potential name=\"Pass\" />");
// No name
AssertEx.DeserializeError<DeclusteringPotentialRegression>("<predict_declustering_potential" +
" slope=\"0.1\" intercept=\"4.7\" />");
// Non-numeric parameter
AssertEx.DeserializeError<DeclusteringPotentialRegression>("<predict_declustering_potential name=\"Pass\"" +
" slope=\"X\" intercept=\"4.7\" />");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionFilter"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionFilterTest()
{
// Valid first
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" " +
"include_n_prolene=\"true\" include_c_glu_asp=\"true\" auto_select=\"true\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"1,2,3,4,5,6,7,8,9\" product_charges=\"1,2,3,4,5\" " +
"fragment_range_first=\"(m/z > precursor) - 2\" fragment_range_last=\"start + 4\" " +
"include_n_prolene=\"false\" include_c_glu_asp=\"false\" auto_select=\"false\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"P,Y,Z\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"y,b,c,z,a,x,p\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
// v0.7 measured_ion examples
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\">" +
"<measured_ion name=\"N-terminal to Proline\" cut=\"P\" sense=\"N\"/>" +
"<measured_ion name=\"Reporter Test\" formula=\"C4H2O\" charges=\"1\"/>" +
"</transition_filter>");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" precursor_mz_window=\"" + TransitionFilter.MAX_EXCLUSION_WINDOW + "\"/>");
// Bad charges
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"0\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"0\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2,2\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"3\" product_charges=\"" + (Transition.MAX_PRODUCT_CHARGE + 1) + "\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"" + (TransitionGroup.MAX_PRECURSOR_CHARGE + 1) + "\" product_charges=\"2\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
// Bad ion type
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"precursor\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"d,w\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
// Bad fragments
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"b10\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last z-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" />");
// Out of range precursor m/z window
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" precursor_mz_window=\"" + (TransitionFilter.MAX_EXCLUSION_WINDOW*2).ToString(CultureInfo.InvariantCulture) + "\"/>");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" precursor_mz_window=\"" + (TransitionFilter.MIN_EXCLUSION_WINDOW/2).ToString(CultureInfo.InvariantCulture) + "\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="MeasuredIon"/>.
/// </summary>
[TestMethod]
public void SerializeMeasuredIonTest()
{
// Valid first
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"C-terminal Glu or Asp restricted\"" +
" cut=\"ED\" no_cut=\"A\" sense=\"C\" min_length=\"" + MeasuredIon.MAX_MIN_FRAGMENT_LENGTH.ToString(CultureInfo.InvariantCulture) + "\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"N-terminal many\"" +
" cut=\"ACPESTID\" no_cut=\"ACPESTID\" sense=\"N\" min_length=\"" + MeasuredIon.MIN_MIN_FRAGMENT_LENGTH.ToString(CultureInfo.InvariantCulture) + "\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\" sense=\"N\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"H4P2O5\" charges=\"1\"/>");
// Old style (as detected by use of "charges" instead of "charge"), mass is assumed to be M-H
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + MeasuredIon.MIN_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + (MeasuredIon.MAX_REPORTER_MASS-2*BioMassCalc.MassProton).ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
// Modern style, mass is assumed to be the actual ion mass (which will decrease by charge*massElectron)
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + (MeasuredIon.MIN_REPORTER_MASS).ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + (MeasuredIon.MAX_REPORTER_MASS).ToString(CultureInfo.InvariantCulture) + "\" charge=\"1\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name =\"Reporter Formula\" formula = \"H2O\" charges = \"1\" optional = \"true\"/>");
// No name
AssertEx.DeserializeError<MeasuredIon>("<measured_ion" +
" cut=\"P\" sense=\"N\"/>");
// No cut attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" sense=\"N\"/>");
// Invalid cut attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"b\" sense=\"N\"/>");
// Invalid no_cut attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\" no_cut=\"b\" sense=\"N\"/>");
// Missing sense attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\"/>");
// Invalid sense attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\" sense=\"x\"/>");
// Min length too short
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"C-terminal Glu or Asp restricted\"" +
" cut=\"ED\" no_cut=\"A\" sense=\"C\" min_length=\"" + (MeasuredIon.MIN_MIN_FRAGMENT_LENGTH - 1).ToString(CultureInfo.InvariantCulture) + "\"/>");
// Min length too long
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"C-terminal Glu or Asp restricted\"" +
" cut=\"ED\" no_cut=\"A\" sense=\"C\" min_length=\"" + (MeasuredIon.MAX_MIN_FRAGMENT_LENGTH + 1).ToString(CultureInfo.InvariantCulture) + "\"/>");
// Reporter with bad formulas
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"Hx3\" charges=\"1\"/>");
// Reporter with formulas producing out of range masses
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"H2\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"HP230O200\" charges=\"1\"/>");
// Reporter without formula and without both masses
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + MeasuredIon.MIN_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_average=\"" + MeasuredIon.MAX_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
// Reporter without formula and out of range masses
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + (MeasuredIon.MIN_REPORTER_MASS - 0.1).ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + MeasuredIon.MAX_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + MeasuredIon.MIN_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + (MeasuredIon.MAX_REPORTER_MASS + 0.1).ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
}
private const string LEGACY_LOW_ACCURACY = "Low Accuracy";
private const string LEGACY_HIGH_ACCURACY = "High Accuracy";
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionInstrument"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionInstrumentTest()
{
// Valid first
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"1503\" />");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" />");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0.4\"/>");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0.001\"/>");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" dynamic_min=\"true\"/>");
// Backward compatibility with v0.7.1
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"None\"/>");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Single\"/>", false); // Use defaults
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Multiple\"/>", false); // Use defaults
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Single\" precursor_filter=\"0.11\" product_filter_type=\"" +
LEGACY_LOW_ACCURACY + "\" product_filter=\"1\"/>", false);
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_filter_type=\"" +
LEGACY_HIGH_ACCURACY + "\" product_filter=\"10\"/>", false);
// Ignore extra filter values when None specified for precursor filter type
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"None\" precursor_filter=\"0.11\" product_filter_type=\"" +
LEGACY_LOW_ACCURACY + "\" product_filter=\"1\"/>", false);
// Empty element
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument />");
// Out of range values
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"-1\" max_mz=\"1503\" />");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"100\" />");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0\"/>");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0.65\"/>");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" dynamic_min=\"maybe\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionFullScan"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionFullScanTest()
{
string validLoRes = ToXml((TransitionFullScan.MIN_LO_RES + TransitionFullScan.MAX_LO_RES) / 2);
string validHiRes = ToXml((TransitionFullScan.MIN_HI_RES + TransitionFullScan.MAX_HI_RES) / 2);
string validHiResMz = ToXml((TransitionFullScan.MIN_RES_MZ + TransitionFullScan.MAX_RES_MZ) / 2);
string validPPM = ToXml((TransitionFullScan.MIN_CENTROID_PPM + TransitionFullScan.MAX_CENTROID_PPM) / 2);
// Valid first
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan />");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.qit +"\" " +
"precursor_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.tof + "\" " +
"precursor_res=\"" + validHiRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" precursor_filter=\"0.11\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes+ "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_left_filter=\"5\" precursor_right_filter=\"20\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\"/>"); // Use default res mz
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" precursor_res_mz=\"" + validHiResMz + "\" " +
"acquisition_method=\"" + FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" " +
"acquisition_method=\"" + FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>"); // Use default res mz
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.centroided + "\" " +
"precursor_res=\"" + validPPM + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan product_mass_analyzer=\"" + FullScanMassAnalyzerType.centroided + "\" " +
"product_res=\"" + validPPM + "\"/>");
// Isotope enrichments
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.tof + "\" " +
"precursor_res=\"" + validHiRes + "\">" + VALID_ISOTOPE_ENRICHMENT_XML + "</transition_full_scan>");
// Errors
string overMaxMulti = ToXml(TransitionFullScan.MAX_PRECURSOR_MULTI_FILTER * 2);
string underMinMulti = ToXml(TransitionFullScan.MIN_PRECURSOR_MULTI_FILTER / 2);
string overMaxPPM = ToXml(TransitionFullScan.MAX_CENTROID_PPM * 2);
string underMinPPM = ToXml(TransitionFullScan.MIN_CENTROID_PPM / 2);
string underMinLoRes = ToXml(TransitionFullScan.MIN_LO_RES / 2);
string overMaxLoRes = ToXml(TransitionFullScan.MAX_LO_RES * 2);
string underMinHiRes = ToXml(TransitionFullScan.MIN_HI_RES / 2);
string overMaxHiRes = ToXml(TransitionFullScan.MAX_HI_RES * 2);
string underMinResMz = ToXml(TransitionFullScan.MIN_RES_MZ / 2);
string defaultResMz = ToXml(TransitionFullScan.DEFAULT_RES_MZ);
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"Unknown\" " +
"product_resolution=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
"Unknown" + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_resoltion=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"" + overMaxMulti + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_resoltion=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"" + underMinMulti + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_resoltion=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_left_filter=\"5\" precursor_right_filter=\"fail\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + underMinLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + overMaxLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + underMinHiRes + "\" product_res_mz=\"" + defaultResMz + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" product_res=\"" + overMaxHiRes + "\" product_res_mz=\"" + defaultResMz + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" precursor_res_mz=\"" + underMinResMz + "\" " +
"acquisition_method=\"" + FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" precursor_res=\"" + underMinPPM + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" precursor_res=\"" + overMaxPPM + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" product_res=\"" + underMinPPM + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" product_res=\"" + overMaxPPM + "\"/>");
// With new isolation scheme tag.
AssertEx.DeserializeError<TransitionFullScan>(string.Format(@"
<transition_full_scan product_mass_analyzer=""{0}"" product_resoltion=""{1}"" acquisition_method=""{2}"">
<isolation_scheme name=""test"" precursor_filter=""{3}""/>
</transition_full_scan>",
FullScanMassAnalyzerType.qit, validLoRes, FullScanAcquisitionMethod.DIA, overMaxMulti));
AssertEx.DeserializeError<TransitionFullScan>(string.Format(@"
<transition_full_scan product_mass_analyzer=""{0}"" product_resoltion=""{1}"" acquisition_method=""{2}"">
<isolation_scheme name=""test"" precursor_filter=""{3}""/>
</transition_full_scan>",
FullScanMassAnalyzerType.qit, validLoRes, FullScanAcquisitionMethod.DIA, underMinMulti));
AssertEx.DeserializeError<TransitionFullScan>(string.Format(@"
<transition_full_scan product_mass_analyzer=""{0}"" product_res=""{1}"" product_res_mz=""{2}"" acquisition_method=""{3}"">
<isolation_scheme name=""test"" precursor_left_filter=""5"" precursor_right_filter=""fail""/>
</transition_full_scan>",
FullScanMassAnalyzerType.ft_icr, validHiRes, validHiResMz, FullScanAcquisitionMethod.DIA));
// Check backward compatibility reading old "Single" and "Multiple" filter types.
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Single\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_filter=\"0.11\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_left_filter=\"5\" precursor_right_filter=\"20\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\"/>"); // Use default res mz
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" precursor_res_mz=\"" + validHiResMz + "\" " +
"precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" " +
"precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>"); // Use default res mz
// Isotope enrichments with low res
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.qit + "\" " +
"precursor_res=\"" + validLoRes + "\">" + VALID_ISOTOPE_ENRICHMENT_XML + "</transition_full_scan>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="IsotopeEnrichments"/>.
/// </summary>
[TestMethod]
public void SerializeIsotopeEnrichmentsTest()
{
// Valid first
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"H'\">0.9</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"C'\">" + ToXml(IsotopeEnrichmentItem.MAX_ATOM_PERCENT_ENRICHMENT) + "</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"N'\">" + ToXml(IsotopeEnrichmentItem.MAX_ATOM_PERCENT_ENRICHMENT / 2) + "</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"O'\">" + ToXml(IsotopeEnrichmentItem.MIN_ATOM_PERCENT_ENRICHMENT) + "</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"O"\">" + ToXml(IsotopeEnrichmentItem.MIN_ATOM_PERCENT_ENRICHMENT * 2) + "</atom_percent_enrichment>");
// Invalid
for (char c = 'A'; c <= 'Z'; c++)
{
AssertEx.DeserializeError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"" + c + "\">0.9</atom_percent_enrichment>");
}
AssertEx.DeserializeError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"N'\">" + ToXml(IsotopeEnrichmentItem.MAX_ATOM_PERCENT_ENRICHMENT+1) + "</atom_percent_enrichment>");
AssertEx.DeserializeError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"O"\">" + ToXml(IsotopeEnrichmentItem.MIN_ATOM_PERCENT_ENRICHMENT-1) + "</atom_percent_enrichment>");
// Valid enrichments
AssertEx.DeserializeNoError<IsotopeEnrichments>(VALID_ISOTOPE_ENRICHMENT_XML);
// Invalid enrichments
AssertEx.DeserializeNoError<IsotopeEnrichments>("<isotope_enrichments name=\"Cambridge Isotope Labs\">" +
"<atom_percent_enrichment symbol=\"H'\">0.9</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"C'\">0.91</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"N'\">0.92</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"O'\">0.93</atom_percent_enrichment>" +
"</isotope_enrichments>"); // Missing label atom O"
string expected = null;
var enrichments = AssertEx.RoundTrip(IsotopeEnrichmentsList.GetDefault(), ref expected);
foreach (var symbol in BioMassCalc.HeavySymbols)
{
string isotopeSymbol = symbol;
double expectedEnrichment = BioMassCalc.GetIsotopeEnrichmentDefault(isotopeSymbol);
// Make sure the distribution in the IsotopeAbundances object got set correctly
double heavyMass = BioMassCalc.GetHeavySymbolMass(isotopeSymbol);
Assert.IsTrue(enrichments.IsotopeAbundances.ContainsKey(isotopeSymbol));
MassDistribution massDistribution;
Assert.IsTrue(enrichments.IsotopeAbundances.TryGetValue(isotopeSymbol, out massDistribution));
foreach (var elementIsotopeMass in massDistribution.Keys)
{
// If the heavy mass is one of the element's stable isotopes, then it should match exactly
// If it's not a stable isotope, then it must be at least some number close to 1 Dalton away.
if (Math.Abs(elementIsotopeMass - heavyMass) < .9)
{
Assert.AreEqual(elementIsotopeMass, heavyMass);
}
}
// Make sure the enrichments are set correctly
int indexEnrichment = enrichments.Enrichments.IndexOf(item => Equals(item.IsotopeSymbol, isotopeSymbol));
Assert.AreNotEqual(-1, indexEnrichment);
Assert.AreEqual(expectedEnrichment, enrichments.Enrichments[indexEnrichment].AtomPercentEnrichment);
}
foreach (var symDist in BioMassCalc.DEFAULT_ABUNDANCES)
{
var distDefault = symDist.Value;
var distEnriched = enrichments.IsotopeAbundances[symDist.Key];
AssertEx.AreEqualDeep(distDefault.ToArray(), distEnriched.ToArray());
}
}
/// <summary>
/// Test serialization of ion mobility data
/// </summary>
[TestMethod]
public void SerializeIonMobilityTest()
{
// Check using drift time predictor without measured drift times
const string predictor = "<predict_drift_time name=\"test\" resolving_power=\"100\"> <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/>" +
"<regression_dt charge=\"1\" slope=\"1\" intercept=\"0\"/></predict_drift_time>";
const string predictorNoRegression = "<predict_drift_time name=\"test\" resolving_power=\"100\"> <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/></predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor);
var pred = AssertEx.Deserialize<IonMobilityPredictor>(predictor);
Assert.AreEqual("db.imdb", pred.IonMobilityLibrary.PersistencePath);
Assert.AreEqual("scaled", pred.IonMobilityLibrary.Name);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred.WindowWidthCalculator.ResolvingPower);
var driftTimeMax = 5000;
var driftTime = 2000;
Assert.AreEqual(40, pred.WindowWidthCalculator.WidthAt(driftTime, driftTimeMax));
Assert.AreEqual(1, pred.GetRegressionLine(1).Slope);
Assert.AreEqual(0, pred.GetRegressionLine(1).Intercept);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor.Replace("100", "0"), Resources.DriftTimePredictor_Validate_Resolving_power_must_be_greater_than_0_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor.Replace("db.imdb", ""), Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_provide_a_filename_for_the_library_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictorNoRegression, Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_include_per_charge_regression_values_);
// Check using drift time predictor with only measured drift times, and no high energy drift offset
const string predictor1 = "<predict_drift_time name=\"test1\" resolving_power=\"100\"><measured_dt modified_sequence=\"JLMN\" charge=\"1\" drift_time=\"17.0\" /> </predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor1);
var pred1 = AssertEx.Deserialize<IonMobilityPredictor>(predictor1);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred1.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(0, pred1.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(0, pred1.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred1.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(17.0, pred1.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).IonMobility.Mobility);
Assert.AreEqual(17.0, pred1.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).GetHighEnergyDriftTimeMsec() ?? 0); // Apply the high energy offset
Assert.IsFalse(pred1.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that charge state
Assert.IsFalse(pred1.GetMeasuredIonMobility(new LibKey("LMNJK", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that peptide
// Check using drift time predictor with only measured drift times, and a high energy scan drift time offset
const string predictor2 = "<predict_drift_time name=\"test2\" resolving_power=\"100\"><measured_dt modified_sequence=\"JLMN\" charge=\"1\" drift_time=\"17.0\" collisional_cross_section=\"0\" high_energy_drift_time_offset=\"-1.0\"/> </predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor2);
var pred2 = AssertEx.Deserialize<IonMobilityPredictor>(predictor2);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred2.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(0, pred2.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(0, pred2.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred2.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(17.0, pred2.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).IonMobility.Mobility);
Assert.AreEqual(16.0, pred2.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).GetHighEnergyDriftTimeMsec() ?? 0); // Apply the high energy offset
Assert.IsFalse(pred2.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that charge state
Assert.IsFalse(pred2.GetMeasuredIonMobility(new LibKey("LMNJK", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that peptide
// Check using drift time predictor with only measured drift times, and a high energy scan drift time offset, and linear width
string predictor3 = "<predict_drift_time name=\"test\" peak_width_calc_type=\"resolving_power\" resolving_power=\"100\" width_at_dt_zero=\"20\" width_at_dt_max=\"500\"> <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/>" +
"<regression_dt charge=\"1\" slope=\"1\" intercept=\"0\"/></predict_drift_time>";
string predictor3NoRegression = "<predict_drift_time name=\"test\" peak_width_calc_type=\"resolving_power\" resolving_power=\"100\" width_at_dt_zero=\"100\" width_at_dt_max=\"500\" > <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/></predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor3);
pred = AssertEx.Deserialize<IonMobilityPredictor>(predictor3);
Assert.AreEqual("db.imdb", pred.IonMobilityLibrary.PersistencePath);
Assert.AreEqual("scaled", pred.IonMobilityLibrary.Name);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(40, pred.WindowWidthCalculator.WidthAt(driftTime, driftTimeMax));
var widthAtDt0 = 20;
var widthAtDtMax = 500;
Assert.AreEqual(widthAtDt0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(widthAtDtMax, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(1, pred.GetRegressionLine(1).Slope);
Assert.AreEqual(0, pred.GetRegressionLine(1).Intercept);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("100", "0"), Resources.DriftTimePredictor_Validate_Resolving_power_must_be_greater_than_0_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("db.imdb", ""), Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_provide_a_filename_for_the_library_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3NoRegression, Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_include_per_charge_regression_values_);
predictor3 = predictor3.Replace("\"resolving_power\"", "\"linear_range\"");
predictor3NoRegression = predictor3NoRegression.Replace("\"resolving_power\"", "\"linear_range\"");
pred = AssertEx.Deserialize<IonMobilityPredictor>(predictor3);
Assert.AreEqual("db.imdb", pred.IonMobilityLibrary.PersistencePath);
Assert.AreEqual("scaled", pred.IonMobilityLibrary.Name);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.linear_range, pred.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(widthAtDt0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(widthAtDtMax, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(1, pred.GetRegressionLine(1).Slope);
Assert.AreEqual(0, pred.GetRegressionLine(1).Intercept);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("20", "-1"), Resources.DriftTimeWindowWidthCalculator_Validate_Peak_width_must_be_non_negative_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("500", "-1"), Resources.DriftTimeWindowWidthCalculator_Validate_Peak_width_must_be_non_negative_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("db.imdb", ""), Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_provide_a_filename_for_the_library_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3NoRegression, Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_include_per_charge_regression_values_);
Assert.AreEqual(widthAtDt0 + (widthAtDtMax-widthAtDt0)*driftTime/driftTimeMax, pred.WindowWidthCalculator.WidthAt(driftTime, driftTimeMax));
}
private const string VALID_ISOTOPE_ENRICHMENT_XML =
"<isotope_enrichments name=\"Cambridge Isotope Labs\">" +
"<atom_percent_enrichment symbol=\"H'\">0.9</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"C'\">0.91</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"N'\">0.92</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"O'\">0.93</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"O"\">0.94</atom_percent_enrichment>" +
"</isotope_enrichments>";
private static string ToXml(double value)
{
return value.ToString(CultureInfo.InvariantCulture);
}
private static void CheckSettingsList<TItem>(SettingsList<TItem> target, SettingsList<TItem> copy)
where TItem : IKeyContainer<string>, IXmlSerializable
{
CheckSettingsList(target, copy, false);
}
private static void CheckSettingsList<TItem>(SettingsList<TItem> target, SettingsList<TItem> copy, bool firstSame)
where TItem : IKeyContainer<string>, IXmlSerializable
{
Assert.AreEqual(target.Count, copy.Count);
for (int i = 0; i < target.Count; i++)
{
if (firstSame && i == 0)
Assert.AreSame(target[i], copy[i]);
else
AssertEx.Cloned(target[i], copy[i]);
}
}
}
}
| 1 | 13,227 | Why change the name in the XML? | ProteoWizard-pwiz | .cs |
@@ -132,7 +132,11 @@ class User < ActiveRecord::Base
# = Callbacks =
# =============
- after_update :when_org_changes
+ before_update :clear_other_organisation, if: :org_id_changed?
+
+ after_update :delete_perms!, if: :org_id_changed?, unless: :can_change_org?
+
+ after_update :remove_token!, if: :org_id_changed?, unless: :can_change_org?
# =================
# = Class methods = | 1 | # == Schema Information
#
# Table name: users
#
# id :integer not null, primary key
# accept_terms :boolean
# active :boolean default(TRUE)
# api_token :string
# confirmation_sent_at :datetime
# confirmation_token :string
# confirmed_at :datetime
# current_sign_in_at :datetime
# current_sign_in_ip :string
# email :string(80) default(""), not null
# encrypted_password :string default("")
# firstname :string
# invitation_accepted_at :datetime
# invitation_created_at :datetime
# invitation_sent_at :datetime
# invitation_token :string
# invited_by_type :string
# last_sign_in_at :datetime
# last_sign_in_ip :string
# other_organisation :string
# recovery_email :string
# remember_created_at :datetime
# reset_password_sent_at :datetime
# reset_password_token :string
# sign_in_count :integer default(0)
# surname :string
# created_at :datetime not null
# updated_at :datetime not null
# invited_by_id :integer
# language_id :integer
# org_id :integer
#
# Indexes
#
# index_users_on_email (email) UNIQUE
# index_users_on_org_id (org_id)
#
# Foreign Keys
#
# fk_rails_... (language_id => languages.id)
# fk_rails_... (org_id => orgs.id)
#
class User < ActiveRecord::Base
include ConditionalUserMailer
include ValidationMessages
include ValidationValues
##
# Devise
# Include default devise modules. Others available are:
# :token_authenticatable, :confirmable,
# :lockable, :timeoutable and :omniauthable
devise :invitable, :database_authenticatable, :registerable, :recoverable,
:rememberable, :trackable, :validatable, :omniauthable,
:omniauth_providers => [:shibboleth, :orcid]
##
# User Notification Preferences
serialize :prefs, Hash
# ================
# = Associations =
# ================
has_and_belongs_to_many :perms, join_table: :users_perms
belongs_to :language
belongs_to :org
has_one :pref
has_many :answers
has_many :notes
has_many :exported_plans
has_many :roles, dependent: :destroy
has_many :plans, through: :roles
has_many :user_identifiers
has_many :identifier_schemes, through: :user_identifiers
has_and_belongs_to_many :notifications, dependent: :destroy,
join_table: 'notification_acknowledgements'
# ===============
# = Validations =
# ===============
validates :active, inclusion: { in: BOOLEAN_VALUES,
message: INCLUSION_MESSAGE }
# ==========
# = Scopes =
# ==========
default_scope { includes(:org, :perms) }
# Retrieves all of the org_admins for the specified org
scope :org_admins, -> (org_id) {
joins(:perms).where("users.org_id = ? AND perms.name IN (?) AND users.active = ?", org_id,
['grant_permissions', 'modify_templates', 'modify_guidance', 'change_org_details'], true)
}
scope :search, -> (term) {
search_pattern = "%#{term}%"
# MySQL does not support standard string concatenation and since concat_ws
# or concat functions do not exist for sqlite, we have to come up with this
# conditional
if ActiveRecord::Base.connection.adapter_name == "Mysql2"
where("concat_ws(' ', firstname, surname) LIKE ? OR email LIKE ?", search_pattern, search_pattern)
else
where("firstname || ' ' || surname LIKE ? OR email LIKE ?", search_pattern, search_pattern)
end
}
# =============
# = Callbacks =
# =============
after_update :when_org_changes
# =================
# = Class methods =
# =================
##
# Load the user based on the scheme and id provided by the Omniauth call
def self.from_omniauth(auth)
joins(user_identifiers: :identifier_scheme)
.where(user_identifiers: { identifier: auth.uid },
identifier_schemes: { name: auth.provider.downcase }).first
end
# ===========================
# = Public instance methods =
# ===========================
# This method uses Devise's built-in handling for inactive users
#
# Returns Boolean
def active_for_authentication?
super && active?
end
# EVALUATE CLASS AND INSTANCE METHODS BELOW
#
# What do they do? do they do it efficiently, and do we need them?
# Determines the locale set for the user or the organisation he/she belongs
#
# Returns String
# Returns nil
def get_locale
if !self.language.nil?
return self.language.abbreviation
elsif !self.org.nil?
return self.org.get_locale
else
return nil
end
end
# Gives either the name of the user, or the email if name unspecified
#
# user_email - Use the email if there is no firstname or surname (defaults: true)
#
# Returns String
def name(use_email = true)
if (firstname.blank? && surname.blank?) || use_email then
return email
else
name = "#{firstname} #{surname}"
return name.strip
end
end
# The user's identifier for the specified scheme name
#
# scheme - The identifier scheme name (e.g. ORCID)
#
# Returns UserIdentifier
def identifier_for(scheme)
user_identifiers.where(identifier_scheme: scheme).first
end
# Checks if the user is a super admin. If the user has any privelege which requires
# them to see the super admin page then they are a super admin.
#
# Returns Boolean
def can_super_admin?
return self.can_add_orgs? || self.can_grant_api_to_orgs? || self.can_change_org?
end
# Checks if the user is an organisation admin if the user has any privlege which
# requires them to see the org-admin pages then they are an org admin.
#
# Returns Boolean
def can_org_admin?
return self.can_grant_permissions? || self.can_modify_guidance? ||
self.can_modify_templates? || self.can_modify_org_details?
end
# Can the User add new organisations?
#
# Returns Boolean
def can_add_orgs?
perms.include? Perm.add_orgs
end
# Can the User change their organisation affiliations?
#
# Returns Boolean
def can_change_org?
perms.include? Perm.change_affiliation
end
# Can the User can grant their permissions to others?
#
# Returns Boolean
def can_grant_permissions?
perms.include? Perm.grant_permissions
end
# Can the User modify organisation templates?
#
# Returns Boolean
def can_modify_templates?
self.perms.include? Perm.modify_templates
end
# Can the User modify organisation guidance?
#
# Returns Boolean
def can_modify_guidance?
perms.include? Perm.modify_guidance
end
# Can the User use the API?
#
# Returns Boolean
def can_use_api?
perms.include? Perm.use_api
end
# Can the User modify their org's details?
#
# Returns Boolean
def can_modify_org_details?
perms.include? Perm.change_org_details
end
##
# Can the User grant the api to organisations?
#
# Returns Boolean
def can_grant_api_to_orgs?
perms.include? Perm.grant_api
end
# Removes the api_token from the user
#
# Returns nil
# Returns Boolean
def remove_token!
unless api_token.blank?
update_column(:api_token, "") unless new_record?
end
end
# Generates a new token for the user unless the user already has a token.
#
# Returns nil
# Returns Boolean
def keep_or_generate_token!
if api_token.nil? || api_token.empty?
self.api_token = loop do
random_token = SecureRandom.urlsafe_base64(nil, false)
break random_token unless User.exists?(api_token: random_token)
end
update_column(:api_token, api_token) unless new_record?
end
end
# The User's preferences for a given base key
#
# Returns Hash
def get_preferences(key)
defaults = Pref.default_settings[key.to_sym] || Pref.default_settings[key.to_s]
if pref.present?
existing = pref.settings[key.to_s].deep_symbolize_keys
# Check for new preferences
defaults.keys.each do |grp|
defaults[grp].keys.each do |pref, v|
# If the group isn't present in the saved values add all of it's preferences
existing[grp] = defaults[grp] if existing[grp].nil?
# If the preference isn't present in the saved values add the default
existing[grp][pref] = defaults[grp][pref] if existing[grp][pref].nil?
end
end
existing
else
defaults
end
end
# Override devise_invitable email title
def deliver_invitation(options = {})
super(options.merge(subject: _('A Data Management Plan in %{application_name} has been shared with you') % {application_name: Rails.configuration.branding[:application][:name]}))
end
# Case insensitive search over User model
#
# field - The name of the field being queried
# val - The String to search for, case insensitive. val is duck typed to check
# whether or not downcase method exist.
#
# Returns ActiveRecord::Relation
# Raises ArgumentError
def self.where_case_insensitive(field, val)
unless columns.map(&:name).include?(field.to_s)
raise ArgumentError, "Field #{field} is not present on users table"
end
User.where("LOWER(#{field}) = :value", value: val.to_s.downcase)
end
# Acknowledge a Notification
#
# notification - Notification to acknowledge
#
# Returns ActiveRecord::Associations::CollectionProxy
# Returns nil
def acknowledge(notification)
notifications << notification if notification.dismissable?
end
private
# ============================
# = Private instance methods =
# ============================
def when_org_changes
if org_id != org_id_was
unless can_change_org?
perms.delete_all
remove_token!
end
end
end
end
| 1 | 18,000 | This is much cleaner. makes it easier to tell what happens on a save. | DMPRoadmap-roadmap | rb |
@@ -212,6 +212,8 @@ func refineFiltersOperator(filters []datastore.ListFilter) ([]datastore.ListFilt
filter.Operator = "IN"
case datastore.OperatorNotIn:
filter.Operator = "NOT IN"
+ case datastore.OperatorContains:
+ // FIXME: Convert contains operator into one dedicated to MySQL
case datastore.OperatorNotEqual,
datastore.OperatorGreaterThan,
datastore.OperatorGreaterThanOrEqual, | 1 | // Copyright 2021 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"strings"
"github.com/pipe-cd/pipe/pkg/datastore"
)
func buildGetQuery(table string) string {
return fmt.Sprintf("SELECT Data FROM %s WHERE Id = UUID_TO_BIN(?,true)", table)
}
func buildUpdateQuery(table string) string {
return fmt.Sprintf("UPDATE %s SET Data = ? WHERE Id = UUID_TO_BIN(?,true)", table)
}
func buildPutQuery(table string) string {
return fmt.Sprintf("INSERT INTO %s (Id, Data) VALUE (UUID_TO_BIN(?,true), ?) ON DUPLICATE KEY UPDATE Data = ?", table)
}
func buildCreateQuery(table string) string {
return fmt.Sprintf("INSERT INTO %s (Id, Data) VALUE (UUID_TO_BIN(?,true), ?)", table)
}
func buildFindQuery(table string, ops datastore.ListOptions) (string, error) {
filters, err := refineFiltersOperator(refineFiltersField(ops.Filters))
if err != nil {
return "", err
}
orderByClause, err := buildOrderByClause(refineOrdersField(ops.Orders))
if err != nil {
return "", err
}
rawQuery := fmt.Sprintf(
"SELECT Data FROM %s %s %s %s %s",
table,
buildWhereClause(filters),
buildPaginationCondition(ops),
orderByClause,
buildLimitClause(ops.Limit),
)
return strings.Join(strings.Fields(rawQuery), " "), nil
}
func buildWhereClause(filters []datastore.ListFilter) string {
if len(filters) == 0 {
return ""
}
conds := make([]string, len(filters))
for i, filter := range filters {
switch filter.Operator {
case "IN", "NOT IN":
// Make string of (?,...) which contains the number of `?` equal to the element number of filter.Value
valLength := reflect.ValueOf(filter.Value).Len()
conds[i] = fmt.Sprintf("%s %s (?%s)", filter.Field, filter.Operator, strings.Repeat(",?", valLength-1))
default:
conds[i] = fmt.Sprintf("%s %s ?", filter.Field, filter.Operator)
}
}
return fmt.Sprintf("WHERE %s", strings.Join(conds[:], " AND "))
}
func buildPaginationCondition(opts datastore.ListOptions) string {
// Skip on no cursor.
if len(opts.Cursor) == 0 {
return ""
}
// Build outer set condition. The outer set condition should be
// in format:
// X < Vx AND Y < Vy AND ...
// with x, y, etc is not Id field.
outerSetConds := make([]string, len(opts.Orders)-1)
for i, o := range opts.Orders {
if o.Field == "Id" {
continue
}
outerSetConds[i] = fmt.Sprintf("%s %s ?", o.Field, makeCompareOperatorForOuterSet(o.Direction))
}
// Build sub set condition. The sub set condition should be
// in format:
// X = Vx AND Y = Vy AND ... AND Id <= last_iterated_id
// with last_iterated_id from the given cursor.
subSetConds := make([]string, len(opts.Orders))
for i, o := range opts.Orders {
if o.Field == "Id" {
subSetConds[i] = fmt.Sprintf("%s %s UUID_TO_BIN(?,true)", o.Field, makeCompareOperatorForSubSet(o.Direction))
} else {
subSetConds[i] = fmt.Sprintf("%s = ?", o.Field)
}
}
// If there is no filter, mean pagination condition should be treated as the only where condition.
if len(opts.Filters) == 0 {
return fmt.Sprintf("WHERE %s AND NOT (%s)", strings.Join(outerSetConds, " AND "), strings.Join(subSetConds, " AND "))
}
return fmt.Sprintf("AND %s AND NOT (%s)", strings.Join(outerSetConds, " AND "), strings.Join(subSetConds, " AND "))
}
func makeCompareOperatorForOuterSet(direction datastore.OrderDirection) string {
if direction == datastore.Asc {
return ">="
}
return "<="
}
func makeCompareOperatorForSubSet(direction datastore.OrderDirection) string {
if direction == datastore.Asc {
return "<="
}
return ">="
}
func buildOrderByClause(orders []datastore.Order) (string, error) {
if len(orders) == 0 {
return "", nil
}
conds := make([]string, len(orders))
hasIDFieldInOrdering := false
for i, ord := range orders {
if ord.Field == "Id" {
hasIDFieldInOrdering = true
}
conds[i] = fmt.Sprintf("%s %s", ord.Field, toMySQLDirection(ord.Direction))
}
if !hasIDFieldInOrdering {
return "", fmt.Errorf("id field is required as ordering field")
}
return fmt.Sprintf("ORDER BY %s", strings.Join(conds[:], ", ")), nil
}
func buildLimitClause(limit int) string {
var clause string
if limit > 0 {
clause = fmt.Sprintf("LIMIT %d ", limit)
}
return clause
}
func toMySQLDirection(d datastore.OrderDirection) string {
switch d {
case datastore.Asc:
return "ASC"
case datastore.Desc:
return "DESC"
default:
return ""
}
}
func refineOrdersField(orders []datastore.Order) []datastore.Order {
out := make([]datastore.Order, len(orders))
for i, order := range orders {
switch order.Field {
case "SyncState.Status":
order.Field = "SyncState_Status"
default:
break
}
out[i] = order
}
return out
}
func refineFiltersField(filters []datastore.ListFilter) []datastore.ListFilter {
out := make([]datastore.ListFilter, len(filters))
for i, filter := range filters {
switch filter.Field {
case "SyncState.Status":
filter.Field = "SyncState_Status"
default:
break
}
out[i] = filter
}
return out
}
// refineFiltersOperator converts operators unified within this project into one dedicated to MySQL.
func refineFiltersOperator(filters []datastore.ListFilter) ([]datastore.ListFilter, error) {
out := make([]datastore.ListFilter, len(filters))
for i, filter := range filters {
switch filter.Operator {
case datastore.OperatorEqual:
filter.Operator = "="
case datastore.OperatorIn:
filter.Operator = "IN"
case datastore.OperatorNotIn:
filter.Operator = "NOT IN"
case datastore.OperatorNotEqual,
datastore.OperatorGreaterThan,
datastore.OperatorGreaterThanOrEqual,
datastore.OperatorLessThan,
datastore.OperatorLessThanOrEqual:
break
default:
return nil, fmt.Errorf("unsupported operator %s", filter.Operator)
}
out[i] = filter
}
return out, nil
}
// refineFiltersValue destructs all slide/array type values and makes an array of all element values.
func refineFiltersValue(filters []datastore.ListFilter) []interface{} {
var filtersVals []interface{}
for _, filter := range filters {
fv := reflect.ValueOf(filter.Value)
switch fv.Kind() {
case reflect.Slice, reflect.Array:
for j := 0; j < fv.Len(); j++ {
filtersVals = append(filtersVals, fv.Index(j).Interface())
}
default:
filtersVals = append(filtersVals, filter.Value)
}
}
return filtersVals
}
// makePaginationCursorValues builds array of element values used on pagination condition check.
func makePaginationCursorValues(opts datastore.ListOptions) ([]interface{}, error) {
// Skip pagination on cursor is empty.
if len(opts.Cursor) == 0 {
return nil, nil
}
// Decode last object of previous page stored as opts.Cursor to string.
data, err := base64.StdEncoding.DecodeString(opts.Cursor)
if err != nil {
return nil, err
}
// Encode cursor data string to map[string]interface{} format for futher process.
obj := make(map[string]interface{})
if err := json.Unmarshal(data, &obj); err != nil {
return nil, err
}
// The cursorVals contains values used for pagination condition.
// For each field except Id, it should be duplicated as for using in outer set and subset.
// The Id field value should be one, and it's the last value in this list.
cursorVals := make([]interface{}, 0, 2*len(opts.Orders)-1)
for _, o := range opts.Orders {
// Skip the Id field value to add it at last.
if o.Field == "Id" {
continue
}
val, ok := obj[o.Field]
if !ok {
return nil, fmt.Errorf("cursor does not contain values that match to ordering field %s", o.Field)
}
cursorVals = append(cursorVals, val)
}
// Duplicate all values in added order.
cursorVals = append(cursorVals, cursorVals...)
// Add Id value at last.
id, ok := obj["Id"]
if !ok {
return nil, fmt.Errorf("cursor does not contain required value Id")
}
cursorVals = append(cursorVals, id)
return cursorVals, nil
}
| 1 | 16,921 | Could you add return error unsupported operator here | pipe-cd-pipe | go |
@@ -6,7 +6,8 @@
package javaslang.collection;
import javaslang.*;
-import javaslang.collection.Stream.*;
+import javaslang.collection.Stream.Cons;
+import javaslang.collection.Stream.Empty;
import javaslang.collection.StreamModule.*;
import javaslang.control.Option;
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.collection.Stream.*;
import javaslang.collection.StreamModule.*;
import javaslang.control.Option;
import java.io.*;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
/**
* An immutable {@code Stream} is lazy sequence of elements which may be infinitely long.
* Its immutability makes it suitable for concurrent programming.
* <p>
* A {@code Stream} is composed of a {@code head} element and a lazy evaluated {@code tail} {@code Stream}.
* <p>
* There are two implementations of the {@code Stream} interface:
* <ul>
* <li>{@link Empty}, which represents the empty {@code Stream}.</li>
* <li>{@link Cons}, which represents a {@code Stream} containing one or more elements.</li>
* </ul>
* Methods to obtain a {@code Stream}:
* <pre>
* <code>
* // factory methods
* Stream.empty() // = Stream.of() = Nil.instance()
* Stream.of(x) // = new Cons<>(x, Nil.instance())
* Stream.of(Object...) // e.g. Stream.of(1, 2, 3)
* Stream.ofAll(Iterable) // e.g. Stream.ofAll(List.of(1, 2, 3)) = 1, 2, 3
* Stream.ofAll(<primitive array>) // e.g. List.ofAll(new int[] {1, 2, 3}) = 1, 2, 3
*
* // int sequences
* Stream.from(0) // = 0, 1, 2, 3, ...
* Stream.range(0, 3) // = 0, 1, 2
* Stream.rangeClosed(0, 3) // = 0, 1, 2, 3
*
* // generators
* Stream.cons(Object, Supplier) // e.g. Stream.cons(current, () -> next(current));
* Stream.continually(Supplier) // e.g. Stream.continually(Math::random);
* Stream.iterate(Object, Function)// e.g. Stream.iterate(1, i -> i * 2);
* </code>
* </pre>
*
* Factory method applications:
*
* <pre>
* <code>
* Stream<Integer> s1 = Stream.of(1);
* Stream<Integer> s2 = Stream.of(1, 2, 3);
* // = Stream.of(new Integer[] {1, 2, 3});
*
* Stream<int[]> s3 = Stream.ofAll(new int[] {1, 2, 3});
* Stream<List<Integer>> s4 = Stream.ofAll(List.of(1, 2, 3));
*
* Stream<Integer> s5 = Stream.ofAll(new int[] {1, 2, 3});
* Stream<Integer> s6 = Stream.ofAll(List.of(1, 2, 3));
*
* // cuckoo's egg
* Stream<Integer[]> s7 = Stream.<Integer[]> of(new Integer[] {1, 2, 3});
* </code>
* </pre>
*
* Example: Generating prime numbers
*
* <pre>
* <code>
* // = Stream(2L, 3L, 5L, 7L, ...)
* Stream.iterate(2L, PrimeNumbers::nextPrimeFrom)
*
* // helpers
*
* static long nextPrimeFrom(long num) {
* return Stream.from(num + 1).find(PrimeNumbers::isPrime).get();
* }
*
* static boolean isPrime(long num) {
* return !Stream.rangeClosed(2L, (long) Math.sqrt(num)).exists(d -> num % d == 0);
* }
* </code>
* </pre>
*
* See Okasaki, Chris: <em>Purely Functional Data Structures</em> (p. 34 ff.). Cambridge, 2003.
*
* @param <T> component type of this Stream
* @author Daniel Dietrich, Jörgen Andersson, Ruslan Sennov
* @since 1.1.0
*/
public interface Stream<T> extends Kind1<Stream<?>, T>, LinearSeq<T> {
long serialVersionUID = 1L;
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.Stream}.
*
* @param <T> Component type of the Stream.
* @return A javaslang.collection.Stream Collector.
*/
static <T> Collector<T, ArrayList<T>, Stream<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, Stream<T>> finisher = Stream::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns an infinitely long Stream of {@code int} values starting from {@code from}.
* <p>
* The {@code Stream} extends to {@code Integer.MIN_VALUE} when passing {@code Integer.MAX_VALUE}.
*
* @param value a start int value
* @return a new Stream of int values starting from {@code from}
*/
static Stream<Integer> from(int value) {
return Stream.ofAll(Iterator.from(value));
}
/**
* Returns an infinite long Stream of {@code int} values starting from {@code value} and spaced by {@code step}.
* <p>
* The {@code Stream} extends to {@code Integer.MIN_VALUE} when passing {@code Integer.MAX_VALUE}.
*
* @param value a start int value
* @param step the step by which to advance on each next value
* @return a new {@code Stream} of int values starting from {@code from}
*/
static Stream<Integer> from(int value, int step) {
return Stream.ofAll(Iterator.from(value, step));
}
/**
* Returns an infinitely long Stream of {@code long} values starting from {@code from}.
* <p>
* The {@code Stream} extends to {@code Integer.MIN_VALUE} when passing {@code Long.MAX_VALUE}.
*
* @param value a start long value
* @return a new Stream of long values starting from {@code from}
*/
static Stream<Long> from(long value) {
return Stream.ofAll(Iterator.from(value));
}
/**
* Returns an infinite long Stream of {@code long} values starting from {@code value} and spaced by {@code step}.
* <p>
* The {@code Stream} extends to {@code Long.MIN_VALUE} when passing {@code Long.MAX_VALUE}.
*
* @param value a start long value
* @param step the step by which to advance on each next value
* @return a new {@code Stream} of long values starting from {@code from}
*/
static Stream<Long> from(long value, long step) {
return Stream.ofAll(Iterator.from(value, step));
}
/**
* Generates an (theoretically) infinitely long Stream using a value Supplier.
*
* @param supplier A Supplier of Stream values
* @param <T> value type
* @return A new Stream
*/
static <T> Stream<T> continually(Supplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return Stream.ofAll(Iterator.continually(supplier));
}
/**
* Generates a (theoretically) infinitely long Stream using a function to calculate the next value
* based on the previous.
*
* @param seed The first value in the Stream
* @param f A function to calculate the next value based on the previous
* @param <T> value type
* @return A new Stream
*/
static <T> Stream<T> iterate(T seed, Function<? super T, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Stream.ofAll(Iterator.iterate(seed, f));
}
/**
* Constructs a Stream of a head element and a tail supplier.
*
* @param head The head element of the Stream
* @param tailSupplier A supplier of the tail values. To end the stream, return {@link Stream#empty}.
* @param <T> value type
* @return A new Stream
*/
@SuppressWarnings("unchecked")
static <T> Stream<T> cons(T head, Supplier<? extends Stream<? extends T>> tailSupplier) {
Objects.requireNonNull(tailSupplier, "tailSupplier is null");
return new ConsImpl<>(head, (Supplier<Stream<T>>) tailSupplier);
}
/**
* Returns the single instance of Nil. Convenience method for {@code Nil.instance()}.
* <p>
* Note: this method intentionally returns type {@code Stream} and not {@code Nil}. This comes handy when folding.
* If you explicitly need type {@code Nil} use {@linkplain Empty#instance()}.
*
* @param <T> Component type of Nil, determined by type inference in the particular context.
* @return The empty list.
*/
static <T> Stream<T> empty() {
return Empty.instance();
}
/**
* Narrows a widened {@code Stream<? extends T>} to {@code Stream<T>}
* by performing a type safe-cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param stream A {@code Stream}.
* @param <T> Component type of the {@code Stream}.
* @return the given {@code stream} instance as narrowed type {@code Stream<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Stream<T> narrow(Stream<? extends T> stream) {
return (Stream<T>) stream;
}
/**
* Returns a singleton {@code Stream}, i.e. a {@code Stream} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new Stream instance containing the given element
*/
static <T> Stream<T> of(T element) {
return cons(element, Empty::instance);
}
/**
* Creates a Stream of the given elements.
*
* <pre><code> Stream.of(1, 2, 3, 4)
* = Nil.instance().prepend(4).prepend(3).prepend(2).prepend(1)
* = new Cons(1, new Cons(2, new Cons(3, new Cons(4, Nil.instance()))))</code></pre>
*
* @param <T> Component type of the Stream.
* @param elements Zero or more elements.
* @return A list containing the given elements in the same order.
*/
@SafeVarargs
static <T> Stream<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
return Stream.ofAll(new Iterator<T>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public T next() {
return elements[i++];
}
});
}
/**
* Returns a Stream containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Stream
* @param n The number of elements in the Stream
* @param f The Function computing element values
* @return A Stream consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
static <T> Stream<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Stream.ofAll(Collections.tabulate(n, f));
}
/**
* Returns a Stream containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Stream
* @param n The number of elements in the Stream
* @param s The Supplier computing element values
* @return A Stream of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
static <T> Stream<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return Stream.ofAll(Collections.fill(n, s));
}
/**
* Creates a Stream of the given elements.
*
* @param <T> Component type of the Stream.
* @param elements An Iterable of elements.
* @return A list containing the given elements in the same order.
*/
@SuppressWarnings("unchecked")
static <T> Stream<T> ofAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof Stream) {
return (Stream<T>) elements;
} else {
return StreamFactory.create(elements.iterator());
}
}
/**
* Creates a Stream based on the elements of a boolean array.
*
* @param array a boolean array
* @return A new Stream of Boolean values
*/
static Stream<Boolean> ofAll(boolean[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a byte array.
*
* @param array a byte array
* @return A new Stream of Byte values
*/
static Stream<Byte> ofAll(byte[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a char array.
*
* @param array a char array
* @return A new Stream of Character values
*/
static Stream<Character> ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a double array.
*
* @param array a double array
* @return A new Stream of Double values
*/
static Stream<Double> ofAll(double[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a float array.
*
* @param array a float array
* @return A new Stream of Float values
*/
static Stream<Float> ofAll(float[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of an int array.
*
* @param array an int array
* @return A new Stream of Integer values
*/
static Stream<Integer> ofAll(int[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a long array.
*
* @param array a long array
* @return A new Stream of Long values
*/
static Stream<Long> ofAll(long[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a short array.
*
* @param array a short array
* @return A new Stream of Short values
*/
static Stream<Short> ofAll(short[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
static Stream<Character> range(char from, char toExclusive) {
return Stream.ofAll(Iterator.range(from, toExclusive));
}
static Stream<Character> rangeBy(char from, char toExclusive, int step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
static Stream<Double> rangeBy(double from, double toExclusive, double step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.range(0, 0) // = Stream()
* Stream.range(2, 0) // = Stream()
* Stream.range(-2, 2) // = Stream(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or {@code Nil} if {@code from >= toExclusive}
*/
static Stream<Integer> range(int from, int toExclusive) {
return Stream.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeBy(1, 3, 1) // = Stream(1, 2)
* Stream.rangeBy(1, 4, 2) // = Stream(1, 3)
* Stream.rangeBy(4, 1, -2) // = Stream(4, 2)
* Stream.rangeBy(4, 1, 2) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Integer> rangeBy(int from, int toExclusive, int step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.range(0L, 0L) // = Stream()
* Stream.range(2L, 0L) // = Stream()
* Stream.range(-2L, 2L) // = Stream(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or {@code Nil} if {@code from >= toExclusive}
*/
static Stream<Long> range(long from, long toExclusive) {
return Stream.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeBy(1L, 3L, 1L) // = Stream(1L, 2L)
* Stream.rangeBy(1L, 4L, 2L) // = Stream(1L, 3L)
* Stream.rangeBy(4L, 1L, -2L) // = Stream(4L, 2L)
* Stream.rangeBy(4L, 1L, 2L) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Long> rangeBy(long from, long toExclusive, long step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
static Stream<Character> rangeClosed(char from, char toInclusive) {
return Stream.ofAll(Iterator.rangeClosed(from, toInclusive));
}
static Stream<Character> rangeClosedBy(char from, char toInclusive, int step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
static Stream<Double> rangeClosedBy(double from, double toInclusive, double step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosed(0, 0) // = Stream(0)
* Stream.rangeClosed(2, 0) // = Stream()
* Stream.rangeClosed(-2, 2) // = Stream(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or {@code Nil} if {@code from > toInclusive}
*/
static Stream<Integer> rangeClosed(int from, int toInclusive) {
return Stream.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosedBy(1, 3, 1) // = Stream(1, 2, 3)
* Stream.rangeClosedBy(1, 4, 2) // = Stream(1, 3)
* Stream.rangeClosedBy(4, 1, -2) // = Stream(4, 2)
* Stream.rangeClosedBy(4, 1, 2) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosed(0L, 0L) // = Stream(0L)
* Stream.rangeClosed(2L, 0L) // = Stream()
* Stream.rangeClosed(-2L, 2L) // = Stream(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or {@code Nil} if {@code from > toInclusive}
*/
static Stream<Long> rangeClosed(long from, long toInclusive) {
return Stream.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosedBy(1L, 3L, 1L) // = Stream(1L, 2L, 3L)
* Stream.rangeClosedBy(1L, 4L, 2L) // = Stream(1L, 3L)
* Stream.rangeClosedBy(4L, 1L, -2L) // = Stream(4L, 2L)
* Stream.rangeClosedBy(4L, 1L, 2L) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Long> rangeClosedBy(long from, long toInclusive, long step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Repeats an element infinitely often.
*
* @param t An element
* @param <T> Element type
* @return A new Stream containing infinite {@code t}'s.
*/
static <T> Stream<T> continually(T t) {
return Stream.ofAll(Iterator.continually(t));
}
@Override
default Stream<T> append(T element) {
return isEmpty() ? Stream.of(element) : new AppendElements<>(head(), Queue.of(element), this::tail);
}
@Override
default Stream<T> appendAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return Stream.ofAll(isEmpty() ? elements : Iterator.concat(this, elements));
}
/**
* Appends itself to the end of stream with {@code mapper} function.
* <p>
* <strong>Example:</strong>
* <p>
* Well known Scala code for Fibonacci infinite sequence
* <pre>
* <code>
* val fibs:Stream[Int] = 0 #:: 1 #:: (fibs zip fibs.tail).map{ t => t._1 + t._2 }
* </code>
* </pre>
* can be transformed to
* <pre>
* <code>
* Stream.of(0, 1).appendSelf(self -> self.zip(self.tail()).map(t -> t._1 + t._2));
* </code>
* </pre>
*
* @param mapper an mapper
* @return a new Stream
*/
default Stream<T> appendSelf(Function<? super Stream<T>, ? extends Stream<T>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? this : new AppendSelf<>((Cons<T>) this, mapper).stream();
}
@Override
default Stream<Stream<T>> combinations() {
return Stream.rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity());
}
@Override
default Stream<Stream<T>> combinations(int k) {
return Combinations.apply(this, Math.max(k, 0));
}
@Override
default Iterator<Stream<T>> crossProduct(int power) {
return Collections.crossProduct(Stream.empty(), this, power);
}
/**
* Repeat the elements of this Stream infinitely.
* <p>
* Example:
* <pre>
* <code>
* // = 1, 2, 3, 1, 2, 3, 1, 2, 3, ...
* Stream.of(1, 2, 3).cycle();
* </code>
* </pre>
*
* @return A new Stream containing this elements cycled.
*/
default Stream<T> cycle() {
return isEmpty() ? this : appendSelf(Function.identity());
}
/**
* Repeat the elements of this Stream {@code count} times.
* <p>
* Example:
* <pre>
* <code>
* // = empty
* Stream.of(1, 2, 3).cycle(0);
*
* // = 1, 2, 3
* Stream.of(1, 2, 3).cycle(1);
*
* // = 1, 2, 3, 1, 2, 3, 1, 2, 3
* Stream.of(1, 2, 3).cycle(3);
* </code>
* </pre>
*
* @param count the number of cycles to be performed
* @return A new Stream containing this elements cycled {@code count} times.
*/
default Stream<T> cycle(int count) {
if (count <= 0 || isEmpty()) {
return empty();
} else {
final Stream<T> self = this;
return Stream.ofAll(new Iterator<T>() {
Stream<T> stream = self;
int i = count - 1;
@Override
public boolean hasNext() {
return !stream.isEmpty() || i > 0;
}
@Override
public T next() {
if (stream.isEmpty()) {
i--;
stream = self;
}
final T result = stream.head();
stream = stream.tail();
return result;
}
});
}
}
@Override
default Stream<T> distinct() {
return distinctBy(Function.identity());
}
@Override
default Stream<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<T> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
default <U> Stream<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
default Stream<T> drop(long n) {
Stream<T> stream = this;
while (n-- > 0 && !stream.isEmpty()) {
stream = stream.tail();
}
return stream;
}
@Override
default Stream<T> dropRight(long n) {
if (n <= 0) {
return this;
} else {
return DropRight.apply(take(n).toList(), List.empty(), drop(n));
}
}
@Override
default Stream<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
default Stream<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
Stream<T> stream = this;
while (!stream.isEmpty() && predicate.test(stream.head())) {
stream = stream.tail();
}
return stream;
}
@Override
default Stream<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
Stream<T> stream = this;
while (!stream.isEmpty() && !predicate.test(stream.head())) {
stream = stream.tail();
}
final Stream<T> finalStream = stream;
return stream.isEmpty() ? Stream.empty()
: cons(stream.head(), () -> finalStream.tail().filter(predicate));
}
@Override
default <U> Stream<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? Empty.instance() : Stream.ofAll(new Iterator<U>() {
final Iterator<? extends T> inputs = Stream.this.iterator();
java.util.Iterator<? extends U> current = java.util.Collections.emptyIterator();
@Override
public boolean hasNext() {
boolean currentHasNext;
while (!(currentHasNext = current.hasNext()) && inputs.hasNext()) {
current = mapper.apply(inputs.next()).iterator();
}
return currentHasNext;
}
@Override
public U next() {
return current.next();
}
});
}
@Override
default T get(int index) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("get(" + index + ") on Nil");
}
if (index < 0) {
throw new IndexOutOfBoundsException("get(" + index + ")");
}
Stream<T> stream = this;
for (int i = index - 1; i >= 0; i--) {
stream = stream.tail();
if (stream.isEmpty()) {
throw new IndexOutOfBoundsException("get(" + index + ") on Stream of size " + (index - i));
}
}
return stream.head();
}
@Override
default <C> Map<C, Stream<T>> groupBy(Function<? super T, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return iterator().groupBy(classifier).map((c, it) -> Tuple.of(c, Stream.ofAll(it)));
}
@Override
default Iterator<Stream<T>> grouped(long size) {
return sliding(size, size);
}
@Override
default boolean hasDefiniteSize() {
return false;
}
@Override
default int indexOf(T element, int from) {
int index = 0;
for (Stream<T> stream = this; !stream.isEmpty(); stream = stream.tail(), index++) {
if (index >= from && Objects.equals(stream.head(), element)) {
return index;
}
}
return -1;
}
@Override
default Stream<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty stream");
} else {
final Stream<T> tail = tail();
if (tail.isEmpty()) {
return Empty.instance();
} else {
return cons(head(), tail::init);
}
}
}
@Override
default Option<Stream<T>> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
@Override
default Stream<T> insert(int index, T element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
} else if (index == 0) {
return cons(element, () -> this);
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on Nil");
} else {
return cons(head(), () -> tail().insert(index - 1, element));
}
}
@Override
default Stream<T> insertAll(int index, Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
} else if (index == 0) {
return isEmpty() ? Stream.ofAll(elements) : Stream.ofAll(elements).appendAll(this);
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on Nil");
} else {
return cons(head(), () -> tail().insertAll(index - 1, elements));
}
}
@Override
default Stream<T> intersperse(T element) {
if (isEmpty()) {
return this;
} else {
return cons(head(), () -> {
final Stream<T> tail = tail();
return tail.isEmpty() ? tail : cons(element, () -> tail.intersperse(element));
});
}
}
@Override
default boolean isTraversableAgain() {
return true;
}
@Override
default int lastIndexOf(T element, int end) {
int result = -1, index = 0;
for (Stream<T> stream = this; index <= end && !stream.isEmpty(); stream = stream.tail(), index++) {
if (Objects.equals(stream.head(), element)) {
result = index;
}
}
return result;
}
@Override
default int length() {
return foldLeft(0, (n, ignored) -> n + 1);
}
@Override
default <U> Stream<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Empty.instance();
} else {
return cons(mapper.apply(head()), () -> tail().map(mapper));
}
}
@Override
default Stream<T> padTo(int length, T element) {
if (length <= 0) {
return this;
} else if (isEmpty()) {
return Stream.continually(element).take(length);
} else {
return cons(head(), () -> tail().padTo(length - 1, element));
}
}
@Override
default Stream<T> leftPadTo(int length, T element) {
final int actualLength = length();
if (length <= actualLength) {
return this;
} else {
return Stream.continually(element).take(length - actualLength).appendAll(this);
}
}
@Override
default Stream<T> patch(int from, Iterable<? extends T> that, int replaced) {
from = from < 0 ? 0 : from;
replaced = replaced < 0 ? 0 : replaced;
Stream<T> result = take(from).appendAll(that);
from += replaced;
result = result.appendAll(drop(from));
return result;
}
@Override
default Tuple2<Stream<T>, Stream<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(filter(predicate), filter(predicate.negate()));
}
@Override
default Stream<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (isEmpty()) {
return this;
} else {
final T head = head();
action.accept(head);
return cons(head, () -> tail().peek(action));
}
}
@Override
default Stream<Stream<T>> permutations() {
if (isEmpty()) {
return Empty.instance();
} else {
final Stream<T> tail = tail();
if (tail.isEmpty()) {
return Stream.of(this);
} else {
final Stream<Stream<T>> zero = Empty.instance();
return distinct().foldLeft(zero, (xs, x) -> {
final Function<Stream<T>, Stream<T>> prepend = l -> l.prepend(x);
return xs.appendAll(remove(x).permutations().map(prepend));
});
}
}
}
@Override
default Stream<T> prepend(T element) {
return cons(element, () -> this);
}
@Override
default Stream<T> prependAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return Stream.ofAll(elements).appendAll(this);
}
@Override
default Stream<T> remove(T element) {
if (isEmpty()) {
return this;
} else {
final T head = head();
return Objects.equals(head, element) ? tail() : cons(head, () -> tail().remove(element));
}
}
@Override
default Stream<T> removeFirst(Predicate<T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return this;
} else {
final T head = head();
return predicate.test(head) ? tail() : cons(head, () -> tail().removeFirst(predicate));
}
}
@Override
default Stream<T> removeLast(Predicate<T> predicate) {
return isEmpty() ? this : reverse().removeFirst(predicate).reverse();
}
@Override
default Stream<T> removeAt(int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("removeAt(" + index + ")");
} else if (index == 0) {
return tail();
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("removeAt() on Nil");
} else {
return cons(head(), () -> tail().removeAt(index - 1));
}
}
@Override
default Stream<T> removeAll(T element) {
return Collections.removeAll(this, element);
}
@Override
default Stream<T> removeAll(Iterable<? extends T> elements) {
return Collections.removeAll(this, elements);
}
@Override
default Stream<T> replace(T currentElement, T newElement) {
if (isEmpty()) {
return this;
} else {
final T head = head();
if (Objects.equals(head, currentElement)) {
return cons(newElement, this::tail);
} else {
return cons(head, () -> tail().replace(currentElement, newElement));
}
}
}
@Override
default Stream<T> replaceAll(T currentElement, T newElement) {
if (isEmpty()) {
return this;
} else {
final T head = head();
final T newHead = Objects.equals(head, currentElement) ? newElement : head;
return cons(newHead, () -> tail().replaceAll(currentElement, newElement));
}
}
@Override
default Stream<T> retainAll(Iterable<? extends T> elements) {
return Collections.retainAll(this, elements);
}
@Override
default Stream<T> reverse() {
return isEmpty() ? this : foldLeft(Stream.empty(), Stream::prepend);
}
@Override
default Stream<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
default <U> Stream<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
// lazily streams the elements of an iterator
return Stream.ofAll(iterator().scanLeft(zero, operation));
}
// not lazy!
@Override
default <U> Stream<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanRight(this, zero, operation, Stream.empty(), Stream::prepend, Function.identity());
}
@Override
default Stream<T> slice(long beginIndex, long endIndex) {
if (beginIndex >= endIndex || isEmpty()) {
return empty();
} else {
final long lowerBound = Math.max(beginIndex, 0);
if (lowerBound == 0) {
return cons(head(), () -> tail().slice(0, endIndex - 1));
} else {
return tail().slice(lowerBound - 1, endIndex - 1);
}
}
}
@Override
default Iterator<Stream<T>> sliding(long size) {
return sliding(size, 1);
}
@Override
default Iterator<Stream<T>> sliding(long size, long step) {
return iterator().sliding(size, step).map(Stream::ofAll);
}
@Override
default Stream<T> sorted() {
return isEmpty() ? this : toJavaStream().sorted().collect(Stream.collector());
}
@Override
default Stream<T> sorted(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(Stream.collector());
}
@Override
default <U extends Comparable<? super U>> Stream<T> sortBy(Function<? super T, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
default <U> Stream<T> sortBy(Comparator<? super U> comparator, Function<? super T, ? extends U> mapper) {
final Function<? super T, ? extends U> domain = Function1.of(mapper::apply).memoized();
return toJavaStream()
.sorted((e1, e2) -> comparator.compare(domain.apply(e1), domain.apply(e2)))
.collect(collector());
}
@Override
default Tuple2<Stream<T>, Stream<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(takeWhile(predicate), dropWhile(predicate));
}
@Override
default Tuple2<Stream<T>, Stream<T>> splitAt(long n) {
return Tuple.of(take(n), drop(n));
}
@Override
default Tuple2<Stream<T>, Stream<T>> splitAt(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(takeWhile(predicate.negate()), dropWhile(predicate.negate()));
}
@Override
default Tuple2<Stream<T>, Stream<T>> splitAtInclusive(Predicate<? super T> predicate) {
final Tuple2<Stream<T>, Stream<T>> split = splitAt(predicate);
if (split._2.isEmpty()) {
return split;
} else {
return Tuple.of(split._1.append(split._2.head()), split._2.tail());
}
}
@Override
default Spliterator<T> spliterator() {
// the focus of the Stream API is on random-access collections of *known size*
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
default String stringPrefix() {
return "Stream";
}
@Override
default Stream<T> subSequence(int beginIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ")");
}
Stream<T> result = this;
for (int i = 0; i < beginIndex; i++, result = result.tail()) {
if (result.isEmpty()) {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ") on Stream of size " + i);
}
}
return result;
}
@Override
default Stream<T> subSequence(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex > endIndex) {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ", " + endIndex + ")");
}
if (beginIndex == endIndex) {
return Empty.instance();
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("subSequence of Nil");
} else if (beginIndex == 0) {
return cons(head(), () -> tail().subSequence(0, endIndex - 1));
} else {
return tail().subSequence(beginIndex - 1, endIndex - 1);
}
}
@Override
Stream<T> tail();
@Override
default Option<Stream<T>> tailOption() {
return isEmpty() ? Option.none() : Option.some(tail());
}
@Override
default Stream<T> take(long n) {
if (n < 1 || isEmpty()) {
return Empty.instance();
} else if (n == 1) {
return cons(head(), Stream::empty);
} else {
return cons(head(), () -> tail().take(n - 1));
}
}
@Override
default Stream<T> takeRight(long n) {
Stream<T> right = this;
Stream<T> remaining = drop(n);
while (!remaining.isEmpty()) {
right = right.tail();
remaining = remaining.tail();
}
return right;
}
@Override
default Stream<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
default Stream<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Empty.instance();
} else {
final T head = head();
if (predicate.test(head)) {
return cons(head, () -> tail().takeWhile(predicate));
} else {
return Empty.instance();
}
}
}
/**
* Transforms this {@code Stream}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
default <U> U transform(Function<? super Stream<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
default <U> Stream<U> unit(Iterable<? extends U> iterable) {
return Stream.ofAll(iterable);
}
@Override
default <T1, T2> Tuple2<Stream<T1>, Stream<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
final Stream<Tuple2<? extends T1, ? extends T2>> stream = map(unzipper);
final Stream<T1> stream1 = stream.map(t -> t._1);
final Stream<T2> stream2 = stream.map(t -> t._2);
return Tuple.of(stream1, stream2);
}
@Override
default <T1, T2, T3> Tuple3<Stream<T1>, Stream<T2>, Stream<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
final Stream<Tuple3<? extends T1, ? extends T2, ? extends T3>> stream = map(unzipper);
final Stream<T1> stream1 = stream.map(t -> t._1);
final Stream<T2> stream2 = stream.map(t -> t._2);
final Stream<T3> stream3 = stream.map(t -> t._3);
return Tuple.of(stream1, stream2, stream3);
}
@Override
default Stream<T> update(int index, T element) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("update(" + index + ", e) on Nil");
}
if (index < 0) {
throw new IndexOutOfBoundsException("update(" + index + ", e)");
}
Stream<T> preceding = Empty.instance();
Stream<T> tail = this;
for (int i = index; i > 0; i--, tail = tail.tail()) {
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("update at " + index);
}
preceding = preceding.prepend(tail.head());
}
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("update at " + index);
}
// skip the current head element because it is replaced
return preceding.reverse().appendAll(tail.tail().prepend(element));
}
@Override
default <U> Stream<Tuple2<T, U>> zip(Iterable<? extends U> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
return Stream.ofAll(iterator().zip(iterable));
}
@Override
default <U> Stream<Tuple2<T, U>> zipAll(Iterable<? extends U> iterable, T thisElem, U thatElem) {
Objects.requireNonNull(iterable, "iterable is null");
return Stream.ofAll(iterator().zipAll(iterable, thisElem, thatElem));
}
@Override
default Stream<Tuple2<T, Long>> zipWithIndex() {
return Stream.ofAll(iterator().zipWithIndex());
}
/**
* Extends (continues) this {@code Stream} with a constantly repeated value.
*
* @param next value with which the stream should be extended
* @return new {@code Stream} composed from this stream extended with a Stream of provided value
*/
default Stream<T> extend(T next) {
return Stream.ofAll(this.appendAll(Stream.continually(next)));
}
/**
* Extends (continues) this {@code Stream} with values provided by a {@code Supplier}
*
* @param nextSupplier a supplier which will provide values for extending a stream
* @return new {@code Stream} composed from this stream extended with values provided by the supplier
*/
default Stream<T> extend(Supplier<? extends T> nextSupplier) {
Objects.requireNonNull(nextSupplier, "nextSupplier is null");
return Stream.ofAll(appendAll(Stream.continually(nextSupplier)));
}
/**
* Extends (continues) this {@code Stream} with a Stream of values created by applying
* consecutively provided {@code Function} to the last element of the original Stream.
*
* @param nextFunction a function which calculates the next value basing on the previous value
* @return new {@code Stream} composed from this stream extended with values calculated by the provided function
*/
default Stream<T> extend(Function<? super T, ? extends T> nextFunction) {
Objects.requireNonNull(nextFunction, "nextFunction is null");
if (isEmpty()) {
return this;
} else {
final Stream<T> that = this;
return Stream.ofAll(new AbstractIterator<T>() {
Stream<T> stream = that;
T last = null;
@Override
protected T getNext() {
if (stream.isEmpty()) {
stream = Stream.iterate(nextFunction.apply(last), nextFunction);
}
last = stream.head();
stream = stream.tail();
return last;
}
@Override
public boolean hasNext() {
return true;
}
});
}
}
/**
* The empty Stream.
* <p>
* This is a singleton, i.e. not Cloneable.
*
* @param <T> Component type of the Stream.
* @since 1.1.0
*/
final class Empty<T> implements Stream<T>, Serializable {
private static final long serialVersionUID = 1L;
private static final Empty<?> INSTANCE = new Empty<>();
// hidden
private Empty() {
}
/**
* Returns the singleton empty Stream instance.
*
* @param <T> Component type of the Stream
* @return The empty Stream
*/
@SuppressWarnings("unchecked")
public static <T> Empty<T> instance() {
return (Empty<T>) INSTANCE;
}
@Override
public T head() {
throw new NoSuchElementException("head of empty stream");
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public Iterator<T> iterator() {
return Iterator.empty();
}
@Override
public Stream<T> tail() {
throw new UnsupportedOperationException("tail of empty stream");
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return 1;
}
@Override
public String toString() {
return stringPrefix() + "()";
}
/**
* Instance control for object serialization.
*
* @return The singleton instance of Nil.
* @see java.io.Serializable
*/
private Object readResolve() {
return INSTANCE;
}
}
/**
* Non-empty {@code Stream}, consisting of a {@code head}, and {@code tail}.
*
* @param <T> Component type of the Stream.
* @since 1.1.0
*/
abstract class Cons<T> implements Stream<T> {
private static final long serialVersionUID = 1L;
final T head;
final Lazy<Stream<T>> tail;
Cons(T head, Supplier<Stream<T>> tail) {
Objects.requireNonNull(tail, "tail is null");
this.head = head;
this.tail = Lazy.of(tail);
}
@Override
public T head() {
return head;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public Iterator<T> iterator() {
return new StreamIterator<>(this);
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof Stream) {
Stream<?> stream1 = this;
Stream<?> stream2 = (Stream<?>) o;
while (!stream1.isEmpty() && !stream2.isEmpty()) {
final boolean isEqual = Objects.equals(stream1.head(), stream2.head());
if (!isEqual) {
return false;
}
stream1 = stream1.tail();
stream2 = stream2.tail();
}
return stream1.isEmpty() && stream2.isEmpty();
} else {
return false;
}
}
@Override
public int hashCode() {
return Collections.hash(this);
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder(stringPrefix()).append("(");
Stream<T> stream = this;
while (stream != null && !stream.isEmpty()) {
final Cons<T> cons = (Cons<T>) stream;
builder.append(cons.head);
if (cons.tail.isEvaluated()) {
stream = stream.tail();
if (!stream.isEmpty()) {
builder.append(", ");
}
} else {
builder.append(", ?");
stream = null;
}
}
return builder.append(")").toString();
}
}
}
interface StreamModule {
final class ConsImpl<T> extends Cons<T> implements Serializable {
private static final long serialVersionUID = 1L;
ConsImpl(T head, Supplier<Stream<T>> tail) {
super(head, tail);
}
@Override
public Stream<T> tail() {
return tail.get();
}
private Object writeReplace() {
return new SerializationProxy<>(this);
}
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
}
final class AppendElements<T> extends Cons<T> implements Serializable {
private static final long serialVersionUID = 1L;
private final Queue<T> queue;
AppendElements(T head, Queue<T> queue, Supplier<Stream<T>> tail) {
super(head, tail);
this.queue = queue;
}
@Override
public Stream<T> append(T element) {
return new AppendElements<>(head, queue.append(element), tail);
}
@Override
public Stream<T> appendAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return isEmpty() ? Stream.ofAll(queue) : new AppendElements<>(head, queue.appendAll(elements), tail);
}
@Override
public Stream<T> tail() {
Stream<T> t = tail.get();
if (t.isEmpty()) {
return Stream.ofAll(queue);
} else {
if (t instanceof ConsImpl) {
ConsImpl<T> c = (ConsImpl<T>) t;
return new AppendElements<>(c.head(), queue, c.tail);
} else {
AppendElements<T> a = (AppendElements<T>) t;
return new AppendElements<>(a.head(), a.queue.appendAll(queue), a.tail);
}
}
}
private Object writeReplace() {
return new SerializationProxy<>(this);
}
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
}
/**
* A serialization proxy which, in this context, is used to deserialize immutable, linked Streams with final
* instance fields.
*
* @param <T> The component type of the underlying stream.
*/
// DEV NOTE: The serialization proxy pattern is not compatible with non-final, i.e. extendable,
// classes. Also, it may not be compatible with circular object graphs.
final class SerializationProxy<T> implements Serializable {
private static final long serialVersionUID = 1L;
// the instance to be serialized/deserialized
private transient Cons<T> stream;
/**
* Constructor for the case of serialization.
* <p>
* The constructor of a SerializationProxy takes an argument that concisely represents the logical state of
* an instance of the enclosing class.
*
* @param stream a Cons
*/
SerializationProxy(Cons<T> stream) {
this.stream = stream;
}
/**
* Write an object to a serialization stream.
*
* @param s An object serialization stream.
* @throws java.io.IOException If an error occurs writing to the stream.
*/
private void writeObject(ObjectOutputStream s) throws IOException {
s.defaultWriteObject();
s.writeInt(stream.length());
for (Stream<T> l = stream; !l.isEmpty(); l = l.tail()) {
s.writeObject(l.head());
}
}
/**
* Read an object from a deserialization stream.
*
* @param s An object deserialization stream.
* @throws ClassNotFoundException If the object's class read from the stream cannot be found.
* @throws InvalidObjectException If the stream contains no stream elements.
* @throws IOException If an error occurs reading from the stream.
*/
private void readObject(ObjectInputStream s) throws ClassNotFoundException, IOException {
s.defaultReadObject();
final int size = s.readInt();
if (size <= 0) {
throw new InvalidObjectException("No elements");
}
Stream<T> temp = Empty.instance();
for (int i = 0; i < size; i++) {
@SuppressWarnings("unchecked")
final T element = (T) s.readObject();
temp = temp.append(element);
}
// DEV-NOTE: Cons is deserialized
stream = (Cons<T>) temp;
}
/**
* {@code readResolve} method for the serialization proxy pattern.
* <p>
* Returns a logically equivalent instance of the enclosing class. The presence of this method causes the
* serialization system to translate the serialization proxy back into an instance of the enclosing class
* upon deserialization.
*
* @return A deserialized instance of the enclosing class.
*/
private Object readResolve() {
return stream;
}
}
final class AppendSelf<T> {
private final Cons<T> self;
AppendSelf(Cons<T> self, Function<? super Stream<T>, ? extends Stream<T>> mapper) {
this.self = appendAll(self, mapper);
}
private Cons<T> appendAll(Cons<T> stream, Function<? super Stream<T>, ? extends Stream<T>> mapper) {
return (Cons<T>) Stream.cons(stream.head(), () -> {
final Stream<T> tail = stream.tail();
return tail.isEmpty() ? mapper.apply(self) : appendAll((Cons<T>) tail, mapper);
});
}
Cons<T> stream() {
return self;
}
}
interface Combinations {
static <T> Stream<Stream<T>> apply(Stream<T> elements, int k) {
if (k == 0) {
return Stream.of(Stream.empty());
} else {
return elements.zipWithIndex().flatMap(
t -> apply(elements.drop(t._2 + 1), (k - 1)).map((Stream<T> c) -> c.prepend(t._1))
);
}
}
}
interface DropRight {
// works with infinite streams by buffering elements
static <T> Stream<T> apply(List<T> front, List<T> rear, Stream<T> remaining) {
if (remaining.isEmpty()) {
return remaining;
} else if (front.isEmpty()) {
return apply(rear.reverse(), List.empty(), remaining);
} else {
return Stream.cons(front.head(),
() -> apply(front.tail(), rear.prepend(remaining.head()), remaining.tail()));
}
}
}
interface StreamFactory {
static <T> Stream<T> create(java.util.Iterator<? extends T> iterator) {
return iterator.hasNext() ? Stream.cons(iterator.next(), () -> create(iterator)) : Empty.instance();
}
}
final class StreamIterator<T> extends AbstractIterator<T> {
private Supplier<Stream<T>> current;
StreamIterator(Cons<T> stream) {
this.current = () -> stream;
}
@Override
public boolean hasNext() {
return !current.get().isEmpty();
}
@Override
public T getNext() {
final Stream<T> stream = current.get();
// DEV-NOTE: we make the stream even more lazy because the next head must not be evaluated on hasNext()
current = stream::tail;
return stream.head();
}
}
}
| 1 | 8,370 | (Mhh, we seem to use different formatters - we should unify them. I like the wildcards) | vavr-io-vavr | java |
@@ -54,8 +54,11 @@ func AdminDescribeTaskQueue(c *cli.Context) {
ctx, cancel := newContext(c)
defer cancel()
request := &workflowservice.DescribeTaskQueueRequest{
- Namespace: namespace,
- TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue},
+ Namespace: namespace,
+ TaskQueue: &taskqueuepb.TaskQueue{
+ Name: taskQueue,
+ Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
+ },
TaskQueueType: tlType,
IncludeTaskQueueStatus: true,
} | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cli
import (
"fmt"
"os"
"strconv"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
enumspb "go.temporal.io/api/enums/v1"
taskqueuepb "go.temporal.io/api/taskqueue/v1"
"go.temporal.io/api/workflowservice/v1"
"go.temporal.io/server/common/persistence"
)
// AdminDescribeTaskQueue displays poller and status information of task queue.
func AdminDescribeTaskQueue(c *cli.Context) {
frontendClient := cFactory.FrontendClient(c)
namespace := getRequiredGlobalOption(c, FlagNamespace)
taskQueue := getRequiredOption(c, FlagTaskQueue)
tlTypeInt, err := stringToEnum(c.String(FlagTaskQueueType), enumspb.TaskQueueType_value)
if err != nil {
ErrorAndExit("Failed to parse TaskQueue Type", err)
}
tlType := enumspb.TaskQueueType(tlTypeInt)
if tlType == enumspb.TASK_QUEUE_TYPE_UNSPECIFIED {
ErrorAndExit("TaskQueue type Unspecified is currently not supported", nil)
}
ctx, cancel := newContext(c)
defer cancel()
request := &workflowservice.DescribeTaskQueueRequest{
Namespace: namespace,
TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue},
TaskQueueType: tlType,
IncludeTaskQueueStatus: true,
}
response, err := frontendClient.DescribeTaskQueue(ctx, request)
if err != nil {
ErrorAndExit("Operation DescribeTaskQueue failed.", err)
}
taskQueueStatus := response.GetTaskQueueStatus()
if taskQueueStatus == nil {
ErrorAndExit(colorMagenta("No taskqueue status information."), nil)
}
printTaskQueueStatus(taskQueueStatus)
fmt.Printf("\n")
pollers := response.Pollers
if len(pollers) == 0 {
ErrorAndExit(colorMagenta("No poller for taskqueue: "+taskQueue), nil)
}
printPollerInfo(pollers, tlType)
}
func printTaskQueueStatus(taskQueueStatus *taskqueuepb.TaskQueueStatus) {
taskIDBlock := taskQueueStatus.GetTaskIdBlock()
table := tablewriter.NewWriter(os.Stdout)
table.SetBorder(false)
table.SetColumnSeparator("|")
table.SetHeader([]string{"Read Level", "Ack Level", "Backlog", "Lease Start TaskId", "Lease End TaskId"})
table.SetHeaderLine(false)
table.SetHeaderColor(tableHeaderBlue, tableHeaderBlue, tableHeaderBlue, tableHeaderBlue, tableHeaderBlue)
table.Append([]string{strconv.FormatInt(taskQueueStatus.GetReadLevel(), 10),
strconv.FormatInt(taskQueueStatus.GetAckLevel(), 10),
strconv.FormatInt(taskQueueStatus.GetBacklogCountHint(), 10),
strconv.FormatInt(taskIDBlock.GetStartId(), 10),
strconv.FormatInt(taskIDBlock.GetEndId(), 10)})
table.Render()
}
func printPollerInfo(pollers []*taskqueuepb.PollerInfo, taskQueueType enumspb.TaskQueueType) {
table := tablewriter.NewWriter(os.Stdout)
table.SetBorder(false)
table.SetColumnSeparator("|")
if taskQueueType == enumspb.TASK_QUEUE_TYPE_ACTIVITY {
table.SetHeader([]string{"Activity Poller Identity", "Last Access Time"})
} else {
table.SetHeader([]string{"Workflow Poller Identity", "Last Access Time"})
}
table.SetHeaderLine(false)
table.SetHeaderColor(tableHeaderBlue, tableHeaderBlue)
for _, poller := range pollers {
table.Append([]string{poller.GetIdentity(), convertTime(poller.GetLastAccessTime(), false)})
}
table.Render()
}
// AdminListTaskQueueTasks displays task information
func AdminListTaskQueueTasks(c *cli.Context) {
namespace := getRequiredOption(c, FlagNamespaceID)
tlName := getRequiredOption(c, FlagTaskQueue)
tlTypeInt, err := stringToEnum(c.String(FlagTaskQueueType), enumspb.TaskQueueType_value)
if err != nil {
ErrorAndExit("Failed to parse TaskQueue Type", err)
}
tlType := enumspb.TaskQueueType(tlTypeInt)
if tlType == enumspb.TASK_QUEUE_TYPE_UNSPECIFIED {
ErrorAndExit("TaskQueue type Unspecified is currently not supported", nil)
}
minReadLvl := getRequiredInt64Option(c, FlagMinReadLevel)
maxReadLvl := getRequiredInt64Option(c, FlagMaxReadLevel)
workflowID := c.String(FlagWorkflowID)
runID := c.String(FlagRunID)
pFactory := CreatePersistenceFactory(c)
taskManager, err := pFactory.NewTaskManager()
if err != nil {
ErrorAndExit("Failed to initialize task manager", err)
}
req := &persistence.GetTasksRequest{NamespaceID: namespace, TaskQueue: tlName, TaskType: tlType, ReadLevel: minReadLvl, MaxReadLevel: &maxReadLvl}
paginationFunc := func(paginationToken []byte) ([]interface{}, []byte, error) {
response, err := taskManager.GetTasks(req)
if err != nil {
return nil, nil, err
}
tasks := response.Tasks
if workflowID != "" {
filteredTasks := tasks[:0]
for _, task := range tasks {
if task.Data.WorkflowId != workflowID {
continue
}
if runID != "" && task.Data.RunId != runID {
continue
}
filteredTasks = append(filteredTasks, task)
}
tasks = filteredTasks
}
var items []interface{}
for _, task := range tasks {
items = append(items, task)
}
return items, nil, nil
}
paginate(c, paginationFunc)
}
| 1 | 9,869 | Should `kind` be a command line parameter also? | temporalio-temporal | go |
@@ -43,7 +43,7 @@ namespace Nethermind.JsonRpc.Data
Gas = transaction.GasLimit;
Input = Data = transaction.Data;
Type = transaction.Type;
- AccessList = TryGetAccessListItems();
+ AccessList = transaction.AccessList is null ? null : AccessListItemForRpc.FromAccessList(transaction.AccessList);
Signature? signature = transaction.Signature;
if (signature != null) | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.Collections.Generic;
using System.Linq;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Eip2930;
using Nethermind.Core.Extensions;
using Nethermind.Int256;
using Newtonsoft.Json;
namespace Nethermind.JsonRpc.Data
{
public class TransactionForRpc
{
public TransactionForRpc(Transaction transaction) : this(null, null, null, transaction) { }
public TransactionForRpc(Keccak? blockHash, long? blockNumber, int? txIndex, Transaction transaction)
{
Hash = transaction.Hash;
Nonce = transaction.Nonce;
BlockHash = blockHash;
BlockNumber = blockNumber;
TransactionIndex = txIndex;
From = transaction.SenderAddress;
To = transaction.To;
Value = transaction.Value;
GasPrice = transaction.GasPrice;
Gas = transaction.GasLimit;
Input = Data = transaction.Data;
Type = transaction.Type;
AccessList = TryGetAccessListItems();
Signature? signature = transaction.Signature;
if (signature != null)
{
R = new UInt256(signature.R, true);
S = new UInt256(signature.S, true);
V = (UInt256?)signature.V;
}
AccessListItemForRpc[]? TryGetAccessListItems()
{
int? accessListLength = transaction.AccessList?.Data.Count;
if (accessListLength == null)
{
return null;
}
AccessListItemForRpc[] accessList = new AccessListItemForRpc[(int)accessListLength];
for (int i = 0; i < accessListLength; i++)
{
Address address = transaction.AccessList.Data.Keys.ElementAt(i);
IEnumerable<UInt256>? keys = TryGetHashSet(transaction.AccessList.Data.Values.ElementAt(i));
accessList[i] = new AccessListItemForRpc(address, keys);
}
return accessList;
IEnumerable<UInt256>? TryGetHashSet(IReadOnlySet<UInt256> argValue)
=> argValue != null ? new HashSet<UInt256>(argValue) : null;
}
}
// ReSharper disable once UnusedMember.Global
public TransactionForRpc()
{
}
public Keccak? Hash { get; set; }
public UInt256? Nonce { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Include)]
public Keccak? BlockHash { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Include)]
public long? BlockNumber { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Include)]
public long? TransactionIndex { get; set; }
public Address? From { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Include)]
public Address? To { get; set; }
public UInt256? Value { get; set; }
public UInt256? GasPrice { get; set; }
public long? Gas { get; set; }
public byte[]? Data { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Include)]
public byte[]? Input { get; set; }
public TxType Type { get; set; }
public AccessListItemForRpc[]? AccessList { get; set; }
public UInt256? V { get; set; }
public UInt256? S { get; set; }
public UInt256? R { get; set; }
public Transaction ToTransactionWithDefaults(ulong? chainId = null)
{
Transaction tx = new();
tx.GasLimit = Gas ?? 90000;
tx.GasPrice = GasPrice ?? 20.GWei();
tx.Nonce = (ulong)(Nonce ?? 0); // here pick the last nonce?
tx.To = To;
tx.SenderAddress = From;
tx.Value = Value ?? 0;
tx.Data = Data ?? Input;
tx.Type = Type;
tx.AccessList = TryGetAccessList();
tx.ChainId = chainId;
return tx;
}
public Transaction ToTransaction(ulong? chainId = null)
{
Transaction tx = new();
tx.GasLimit = Gas ?? 0;
tx.GasPrice = GasPrice ?? 0;
tx.Nonce = (ulong)(Nonce ?? 0); // here pick the last nonce?
tx.To = To;
tx.SenderAddress = From;
tx.Value = Value ?? 0;
tx.Data = Data ?? Input;
tx.Type = Type;
tx.AccessList = TryGetAccessList();
tx.ChainId = chainId;
return tx;
}
private AccessList? TryGetAccessList()
{
if (Type != TxType.AccessList || AccessList == null)
{
return null;
}
AccessListBuilder accessListBuilder = new();
for (int i = 0; i < AccessList.Length; i++)
{
accessListBuilder.AddAddress(AccessList[i].Address);
for (int j = 0; j < AccessList[i].StorageKeys.Length; j++)
{
accessListBuilder.AddStorage(AccessList[i].StorageKeys[j]);
}
}
return accessListBuilder.ToAccessList();
}
}
}
| 1 | 25,212 | can we have it inside FromAccessList? | NethermindEth-nethermind | .cs |
@@ -64,6 +64,19 @@ var rawRequestPaths = map[string]bool{
"/v2/teal/compile": true,
}
+// unauthorizedRequestError is generated when we receive 401 error from the server. This error includes the inner error
+// as well as the likely parameters that caused the issue.
+type unauthorizedRequestError struct {
+ errorString string
+ apiToken string
+ url string
+}
+
+// Error format an error string for the unauthorizedRequestError error.
+func (e unauthorizedRequestError) Error() string {
+ return fmt.Sprintf("Unauthorized request to `%s` when using token `%s` : %s", e.url, e.apiToken, e.errorString)
+}
+
// RestClient manages the REST interface for a calling user.
type RestClient struct {
serverURL url.URL | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package client
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/google/go-querystring/query"
generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
privateV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/private"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
"github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
)
const (
authHeader = "X-Algo-API-Token"
healthCheckEndpoint = "/health"
maxRawResponseBytes = 50e6
)
// APIVersion is used to define which server side API version would be used when making http requests to the server
type APIVersion string
const (
// APIVersionV1 suggests that the RestClient would use v1 calls whenever it's available for the given request.
APIVersionV1 APIVersion = "v1"
// APIVersionV2 suggests that the RestClient would use v2 calls whenever it's available for the given request.
APIVersionV2 APIVersion = "v2"
)
// rawRequestPaths is a set of paths where the body should not be urlencoded
var rawRequestPaths = map[string]bool{
"/v1/transactions": true,
"/v2/teal/dryrun": true,
"/v2/teal/compile": true,
}
// RestClient manages the REST interface for a calling user.
type RestClient struct {
serverURL url.URL
apiToken string
versionAffinity APIVersion
}
// MakeRestClient is the factory for constructing a RestClient for a given endpoint
func MakeRestClient(url url.URL, apiToken string) RestClient {
return RestClient{
serverURL: url,
apiToken: apiToken,
versionAffinity: APIVersionV1,
}
}
// SetAPIVersionAffinity sets the client affinity to use a specific version of the API
func (client *RestClient) SetAPIVersionAffinity(affinity APIVersion) (previousAffinity APIVersion) {
previousAffinity = client.versionAffinity
client.versionAffinity = affinity
return
}
// extractError checks if the response signifies an error (for now, StatusCode != 200 or StatusCode != 201).
// If so, it returns the error.
// Otherwise, it returns nil.
func extractError(resp *http.Response) error {
if resp.StatusCode == 200 || resp.StatusCode == 201 {
return nil
}
errorBuf, _ := ioutil.ReadAll(resp.Body) // ignore returned error
return fmt.Errorf("HTTP %v: %s", resp.Status, errorBuf)
}
// stripTransaction gets a transaction of the form "tx-XXXXXXXX" and truncates the "tx-" part, if it starts with "tx-"
func stripTransaction(tx string) string {
if strings.HasPrefix(tx, "tx-") {
return strings.SplitAfter(tx, "-")[1]
}
return tx
}
// submitForm is a helper used for submitting (ex.) GETs and POSTs to the server
func (client RestClient) submitForm(response interface{}, path string, request interface{}, requestMethod string, encodeJSON bool, decodeJSON bool) error {
var err error
queryURL := client.serverURL
queryURL.Path = path
var req *http.Request
var body io.Reader
if request != nil {
if rawRequestPaths[path] {
reqBytes, ok := request.([]byte)
if !ok {
return fmt.Errorf("couldn't decode raw request as bytes")
}
body = bytes.NewBuffer(reqBytes)
} else {
v, err := query.Values(request)
if err != nil {
return err
}
queryURL.RawQuery = v.Encode()
if encodeJSON {
jsonValue, _ := json.Marshal(request)
body = bytes.NewBuffer(jsonValue)
}
}
}
req, err = http.NewRequest(requestMethod, queryURL.String(), body)
if err != nil {
return err
}
// If we add another endpoint that does not require auth, we should add a
// requiresAuth argument to submitForm rather than checking here
if path != healthCheckEndpoint {
req.Header.Set(authHeader, client.apiToken)
}
httpClient := &http.Client{}
resp, err := httpClient.Do(req)
if err != nil {
return err
}
// Ensure response isn't too large
resp.Body = http.MaxBytesReader(nil, resp.Body, maxRawResponseBytes)
defer resp.Body.Close()
err = extractError(resp)
if err != nil {
return err
}
if decodeJSON {
dec := json.NewDecoder(resp.Body)
return dec.Decode(&response)
}
// Response must implement RawResponse
raw, ok := response.(v1.RawResponse)
if !ok {
return fmt.Errorf("can only decode raw response into type implementing v1.RawResponse")
}
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
raw.SetBytes(bodyBytes)
return nil
}
// get performs a GET request to the specific path against the server
func (client RestClient) get(response interface{}, path string, request interface{}) error {
return client.submitForm(response, path, request, "GET", false /* encodeJSON */, true /* decodeJSON */)
}
// getRaw behaves identically to get but doesn't json decode the response, and
// the response must implement the v1.RawResponse interface
func (client RestClient) getRaw(response v1.RawResponse, path string, request interface{}) error {
return client.submitForm(response, path, request, "GET", false /* encodeJSON */, false /* decodeJSON */)
}
// post sends a POST request to the given path with the given request object.
// No query parameters will be sent if request is nil.
// response must be a pointer to an object as post writes the response there.
func (client RestClient) post(response interface{}, path string, request interface{}) error {
return client.submitForm(response, path, request, "POST", true /* encodeJSON */, true /* decodeJSON */)
}
// Status retrieves the StatusResponse from the running node
// the StatusResponse includes data like the consensus version and current round
// Not supported
func (client RestClient) Status() (response generatedV2.NodeStatusResponse, err error) {
switch client.versionAffinity {
case APIVersionV2:
err = client.get(&response, "/v2/status", nil)
default:
var nodeStatus v1.NodeStatus
err = client.get(&nodeStatus, "/v1/status", nil)
if err == nil {
response = fillNodeStatusResponse(nodeStatus)
}
}
return
}
// HealthCheck does a health check on the the potentially running node,
// returning an error if the API is down
func (client RestClient) HealthCheck() error {
return client.get(nil, "/health", nil)
}
func fillNodeStatusResponse(nodeStatus v1.NodeStatus) generatedV2.NodeStatusResponse {
return generatedV2.NodeStatusResponse{
LastRound: nodeStatus.LastRound,
LastVersion: nodeStatus.LastVersion,
NextVersion: nodeStatus.NextVersion,
NextVersionRound: nodeStatus.NextVersionRound,
NextVersionSupported: nodeStatus.NextVersionSupported,
TimeSinceLastRound: uint64(nodeStatus.TimeSinceLastRound),
CatchupTime: uint64(nodeStatus.CatchupTime),
StoppedAtUnsupportedRound: nodeStatus.StoppedAtUnsupportedRound,
}
}
// StatusAfterBlock waits for a block to occur then returns the StatusResponse after that block
// blocks on the node end
// Not supported
func (client RestClient) StatusAfterBlock(blockNum uint64) (response generatedV2.NodeStatusResponse, err error) {
switch client.versionAffinity {
case APIVersionV2:
err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil)
default:
var nodeStatus v1.NodeStatus
err = client.get(&nodeStatus, fmt.Sprintf("/v1/status/wait-for-block-after/%d", blockNum), nil)
if err == nil {
response = fillNodeStatusResponse(nodeStatus)
}
}
return
}
type pendingTransactionsParams struct {
Max uint64 `url:"max"`
}
// GetPendingTransactions asks algod for a snapshot of current pending txns on the node, bounded by maxTxns.
// If maxTxns = 0, fetches as many transactions as possible.
func (client RestClient) GetPendingTransactions(maxTxns uint64) (response v1.PendingTransactions, err error) {
err = client.get(&response, fmt.Sprintf("/v1/transactions/pending"), pendingTransactionsParams{maxTxns})
return
}
// Versions retrieves the VersionResponse from the running node
// the VersionResponse includes data like version number and genesis ID
func (client RestClient) Versions() (response common.Version, err error) {
err = client.get(&response, "/versions", nil)
return
}
// LedgerSupply gets the supply details for the specified node's Ledger
func (client RestClient) LedgerSupply() (response v1.Supply, err error) {
err = client.get(&response, "/v1/ledger/supply", nil)
return
}
type transactionsByAddrParams struct {
FirstRound uint64 `url:"firstRound"`
LastRound uint64 `url:"lastRound"`
Max uint64 `url:"max"`
}
type assetsParams struct {
AssetIdx uint64 `url:"assetIdx"`
Max uint64 `url:"max"`
}
type appsParams struct {
AppIdx uint64 `url:"appIdx"`
Max uint64 `url:"max"`
}
type rawblockParams struct {
Raw uint64 `url:"raw"`
}
type rawAccountParams struct {
Format string `url:"format"`
}
// TransactionsByAddr returns all transactions for a PK [addr] in the [first,
// last] rounds range.
func (client RestClient) TransactionsByAddr(addr string, first, last, max uint64) (response v1.TransactionList, err error) {
err = client.get(&response, fmt.Sprintf("/v1/account/%s/transactions", addr), transactionsByAddrParams{first, last, max})
return
}
// AssetInformation gets the AssetInformationResponse associated with the passed asset index
func (client RestClient) AssetInformation(index uint64) (response v1.AssetParams, err error) {
err = client.get(&response, fmt.Sprintf("/v1/asset/%d", index), nil)
return
}
// Assets gets up to max assets with maximum asset index assetIdx
func (client RestClient) Assets(assetIdx, max uint64) (response v1.AssetList, err error) {
err = client.get(&response, "/v1/assets", assetsParams{assetIdx, max})
return
}
// AssetInformationV2 gets the AssetInformationResponse associated with the passed asset index
func (client RestClient) AssetInformationV2(index uint64) (response generatedV2.Asset, err error) {
err = client.get(&response, fmt.Sprintf("/v2/assets/%d", index), nil)
return
}
// ApplicationInformation gets the ApplicationInformationResponse associated
// with the passed application index
func (client RestClient) ApplicationInformation(index uint64) (response generatedV2.Application, err error) {
err = client.get(&response, fmt.Sprintf("/v2/applications/%d", index), nil)
return
}
// AccountInformation also gets the AccountInformationResponse associated with the passed address
func (client RestClient) AccountInformation(address string) (response v1.Account, err error) {
err = client.get(&response, fmt.Sprintf("/v1/account/%s", address), nil)
return
}
// AccountInformationV2 gets the AccountData associated with the passed address
func (client RestClient) AccountInformationV2(address string) (response generatedV2.Account, err error) {
err = client.get(&response, fmt.Sprintf("/v2/accounts/%s", address), nil)
return
}
// Blob represents arbitrary blob of data satisfying v1.RawResponse interface
type Blob []byte
// SetBytes fulfills the RawResponse interface on Blob
func (blob *Blob) SetBytes(b []byte) {
*blob = b
}
// RawAccountInformationV2 gets the raw AccountData associated with the passed address
func (client RestClient) RawAccountInformationV2(address string) (response []byte, err error) {
var blob Blob
err = client.getRaw(&blob, fmt.Sprintf("/v2/accounts/%s", address), rawAccountParams{Format: "msgpack"})
response = blob
return
}
// TransactionInformation gets information about a specific transaction involving a specific account
func (client RestClient) TransactionInformation(accountAddress, transactionID string) (response v1.Transaction, err error) {
transactionID = stripTransaction(transactionID)
err = client.get(&response, fmt.Sprintf("/v1/account/%s/transaction/%s", accountAddress, transactionID), nil)
return
}
// PendingTransactionInformation gets information about a recently issued
// transaction. There are several cases when this might succeed:
//
// - transaction committed (CommittedRound > 0)
// - transaction still in the pool (CommittedRound = 0, PoolError = "")
// - transaction removed from pool due to error (CommittedRound = 0, PoolError != "")
//
// Or the transaction may have happened sufficiently long ago that the
// node no longer remembers it, and this will return an error.
func (client RestClient) PendingTransactionInformation(transactionID string) (response v1.Transaction, err error) {
transactionID = stripTransaction(transactionID)
err = client.get(&response, fmt.Sprintf("/v1/transactions/pending/%s", transactionID), nil)
return
}
// SuggestedFee gets the recommended transaction fee from the node
func (client RestClient) SuggestedFee() (response v1.TransactionFee, err error) {
err = client.get(&response, "/v1/transactions/fee", nil)
return
}
// SuggestedParams gets the suggested transaction parameters
func (client RestClient) SuggestedParams() (response v1.TransactionParams, err error) {
err = client.get(&response, "/v1/transactions/params", nil)
return
}
// SendRawTransaction gets a SignedTxn and broadcasts it to the network
func (client RestClient) SendRawTransaction(txn transactions.SignedTxn) (response v1.TransactionID, err error) {
err = client.post(&response, "/v1/transactions", protocol.Encode(&txn))
return
}
// SendRawTransactionGroup gets a SignedTxn group and broadcasts it to the network
func (client RestClient) SendRawTransactionGroup(txgroup []transactions.SignedTxn) error {
// response is not terribly useful: it's the txid of the first transaction,
// which can be computed by the client anyway..
var enc []byte
for _, tx := range txgroup {
enc = append(enc, protocol.Encode(&tx)...)
}
var response v1.TransactionID
return client.post(&response, "/v1/transactions", enc)
}
// Block gets the block info for the given round
func (client RestClient) Block(round uint64) (response v1.Block, err error) {
err = client.get(&response, fmt.Sprintf("/v1/block/%d", round), nil)
return
}
// RawBlock gets the encoded, raw msgpack block for the given round
func (client RestClient) RawBlock(round uint64) (response v1.RawBlock, err error) {
err = client.getRaw(&response, fmt.Sprintf("/v1/block/%d", round), rawblockParams{1})
return
}
// Shutdown requests the node to shut itself down
func (client RestClient) Shutdown() (err error) {
response := 1
err = client.post(&response, "/v2/shutdown", nil)
return
}
// AbortCatchup aborts the currently running catchup
func (client RestClient) AbortCatchup(catchpointLabel string) (response privateV2.CatchpointAbortResponse, err error) {
err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, "DELETE", false, true)
return
}
// Catchup start catching up to the give catchpoint label
func (client RestClient) Catchup(catchpointLabel string) (response privateV2.CatchpointStartResponse, err error) {
err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, "POST", false, true)
return
}
// GetGoRoutines gets a dump of the goroutines from pprof
// Not supported
func (client RestClient) GetGoRoutines(ctx context.Context) (goRoutines string, err error) {
// issue a "/debug/pprof/goroutine?debug=1" request
query := make(map[string]string)
query["debug"] = "1"
goRoutines, err = client.doGetWithQuery(ctx, "/debug/pprof/goroutine", query)
return
}
// Compile compiles the given program and returned the compiled program
func (client RestClient) Compile(program []byte) (compiledProgram []byte, programHash crypto.Digest, err error) {
var compileResponse generatedV2.CompileResponse
err = client.submitForm(&compileResponse, "/v2/teal/compile", program, "POST", false, true)
if err != nil {
return nil, crypto.Digest{}, err
}
compiledProgram, err = base64.StdEncoding.DecodeString(compileResponse.Result)
if err != nil {
return nil, crypto.Digest{}, err
}
var progAddr basics.Address
progAddr, err = basics.UnmarshalChecksumAddress(compileResponse.Hash)
if err != nil {
return nil, crypto.Digest{}, err
}
programHash = crypto.Digest(progAddr)
return
}
func (client RestClient) doGetWithQuery(ctx context.Context, path string, queryArgs map[string]string) (result string, err error) {
queryURL := client.serverURL
queryURL.Path = path
req, err := http.NewRequest("GET", queryURL.String(), nil)
if err != nil {
return
}
q := req.URL.Query()
for k, v := range queryArgs {
q.Add(k, v)
}
req.URL.RawQuery = q.Encode()
req.Header.Set(authHeader, client.apiToken)
httpClient := http.Client{}
resp, err := httpClient.Do(req.WithContext(ctx))
if err != nil {
return
}
defer resp.Body.Close()
err = extractError(resp)
if err != nil {
return
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
result = string(bytes)
return
}
// RawDryrun gets the raw DryrunResponse associated with the passed address
func (client RestClient) RawDryrun(data []byte) (response []byte, err error) {
var blob Blob
err = client.submitForm(&blob, "/v2/teal/dryrun", data, "POST", false /* encodeJSON */, false /* decodeJSON */)
response = blob
return
}
| 1 | 40,645 | Why not embed filterASCII in this function? | algorand-go-algorand | go |
@@ -1872,7 +1872,11 @@ public class ExecutorManager extends EventHandler implements
// process flow with current snapshot of activeExecutors
selectExecutorAndDispatchFlow(reference, exflow, new HashSet<Executor>(activeExecutors));
}
- currentContinuousFlowProcessed++;
+
+ // do not count failed flow processsing (flows still in queue)
+ if(queuedFlows.getFlow(exflow.getExecutionId()) == null) {
+ currentContinuousFlowProcessed++;
+ }
}
}
| 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import java.io.File;
import java.io.IOException;
import java.lang.Thread.State;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import azkaban.alert.Alerter;
import azkaban.event.Event;
import azkaban.event.Event.Type;
import azkaban.event.EventHandler;
import azkaban.executor.selector.ExecutorComparator;
import azkaban.executor.selector.ExecutorFilter;
import azkaban.executor.selector.ExecutorSelector;
import azkaban.project.Project;
import azkaban.project.ProjectWhitelist;
import azkaban.scheduler.ScheduleStatisticManager;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import azkaban.utils.Pair;
import azkaban.utils.Props;
/**
* Executor manager used to manage the client side job.
*
*/
public class ExecutorManager extends EventHandler implements
ExecutorManagerAdapter {
static final String AZKABAN_EXECUTOR_SELECTOR_FILTERS =
"azkaban.executorselector.filters";
static final String AZKABAN_EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
static final String AZKABAN_QUEUEPROCESSING_ENABLED =
"azkaban.queueprocessing.enabled";
static final String AZKABAN_USE_MULTIPLE_EXECUTORS =
"azkaban.use.multiple.executors";
private static final String AZKABAN_WEBSERVER_QUEUE_SIZE =
"azkaban.webserver.queue.size";
private static final String AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
private static final String AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
private static final String AZKABAN_EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
private static final String AZKABAN_MAX_DISPATCHING_ERRORS_PERMITTED =
"azkaban.maxDispatchingErrors";
private static Logger logger = Logger.getLogger(ExecutorManager.class);
private ExecutorLoader executorLoader;
private CleanerThread cleanerThread;
private ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>> runningFlows =
new ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>>();
private ConcurrentHashMap<Integer, ExecutableFlow> recentlyFinished =
new ConcurrentHashMap<Integer, ExecutableFlow>();
QueuedExecutions queuedFlows;
final private Set<Executor> activeExecutors = new HashSet<Executor>();
private QueueProcessorThread queueProcessor;
private ExecutingManagerUpdaterThread executingManager;
// 12 weeks
private static final long DEFAULT_EXECUTION_LOGS_RETENTION_MS = 3 * 4 * 7
* 24 * 60 * 60 * 1000l;
private long lastCleanerThreadCheckTime = -1;
private long lastThreadCheckTime = -1;
private String updaterStage = "not started";
private Map<String, Alerter> alerters;
File cacheDir;
private final Props azkProps;
private List<String> filterList;
private Map<String, Integer> comparatorWeightsMap;
private long lastSuccessfulExecutorInfoRefresh;
private ExecutorService executorInforRefresherService;
public ExecutorManager(Props azkProps, ExecutorLoader loader,
Map<String, Alerter> alerters) throws ExecutorManagerException {
this.alerters = alerters;
this.azkProps = azkProps;
this.executorLoader = loader;
this.setupExecutors();
this.loadRunningFlows();
queuedFlows =
new QueuedExecutions(azkProps.getLong(AZKABAN_WEBSERVER_QUEUE_SIZE, 100000));
this.loadQueuedFlows();
cacheDir = new File(azkProps.getString("cache.directory", "cache"));
executingManager = new ExecutingManagerUpdaterThread();
executingManager.start();
if(isMultiExecutorMode()) {
setupMultiExecutorMode();
}
long executionLogsRetentionMs =
azkProps.getLong("execution.logs.retention.ms",
DEFAULT_EXECUTION_LOGS_RETENTION_MS);
cleanerThread = new CleanerThread(executionLogsRetentionMs);
cleanerThread.start();
}
private void setupMultiExecutorMode() {
// initliatize hard filters for executor selector from azkaban.properties
String filters = azkProps.getString(AZKABAN_EXECUTOR_SELECTOR_FILTERS, "");
if (filters != null) {
filterList = Arrays.asList(StringUtils.split(filters, ","));
}
// initliatize comparator feature weights for executor selector from
// azkaban.properties
Map<String, String> compListStrings =
azkProps.getMapByPrefix(AZKABAN_EXECUTOR_SELECTOR_COMPARATOR_PREFIX);
if (compListStrings != null) {
comparatorWeightsMap = new TreeMap<String, Integer>();
for (Map.Entry<String, String> entry : compListStrings.entrySet()) {
comparatorWeightsMap.put(entry.getKey(), Integer.valueOf(entry.getValue()));
}
}
executorInforRefresherService =
Executors.newFixedThreadPool(azkProps.getInt(
AZKABAN_EXECUTORINFO_REFRESH_MAX_THREADS, 5));
// configure queue processor
queueProcessor =
new QueueProcessorThread(azkProps.getBoolean(
AZKABAN_QUEUEPROCESSING_ENABLED, true), azkProps.getLong(
AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_MS, 50000), azkProps.getInt(
AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW, 5), azkProps.getInt(
AZKABAN_MAX_DISPATCHING_ERRORS_PERMITTED, activeExecutors.size()));
queueProcessor.start();
}
/**
*
* {@inheritDoc}
* @see azkaban.executor.ExecutorManagerAdapter#setupExecutors()
*/
@Override
public void setupExecutors() throws ExecutorManagerException {
Set<Executor> newExecutors = new HashSet<Executor>();
if (isMultiExecutorMode()) {
logger.info("Initializing multi executors from database");
newExecutors.addAll(executorLoader.fetchActiveExecutors());
} else if (azkProps.containsKey("executor.port")) {
// Add local executor, if specified as per properties
String executorHost = azkProps.getString("executor.host", "localhost");
int executorPort = azkProps.getInt("executor.port");
logger.info(String.format("Initializing local executor %s:%d",
executorHost, executorPort));
Executor executor =
executorLoader.fetchExecutor(executorHost, executorPort);
if (executor == null) {
executor = executorLoader.addExecutor(executorHost, executorPort);
} else if (!executor.isActive()) {
executor.setActive(true);
executorLoader.updateExecutor(executor);
}
newExecutors.add(new Executor(executor.getId(), executorHost,
executorPort, true));
}
if (newExecutors.isEmpty()) {
logger.error("No active executor found");
throw new ExecutorManagerException("No active executor found");
} else if(newExecutors.size() > 1 && !isMultiExecutorMode()) {
logger.error("Multiple local executors specified");
throw new ExecutorManagerException("Multiple local executors specified");
} else {
// clear all active executors, only if we have at least one new active
// executors
activeExecutors.clear();
activeExecutors.addAll(newExecutors);
}
}
private boolean isMultiExecutorMode() {
return azkProps.getBoolean(AZKABAN_USE_MULTIPLE_EXECUTORS, false);
}
/**
* Refresh Executor stats for all the actie executors in this executorManager
*/
private void refreshExecutors() {
synchronized (activeExecutors) {
List<Pair<Executor, Future<String>>> futures =
new ArrayList<Pair<Executor, Future<String>>>();
for (final Executor executor : activeExecutors) {
// execute each executorInfo refresh task to fetch
Future<String> fetchExecutionInfo =
executorInforRefresherService.submit(new Callable<String>() {
@Override
public String call() throws Exception {
return callExecutorForJsonString(executor.getHost(),
executor.getPort(), "/serverstastics", null);
}
});
futures.add(new Pair<Executor, Future<String>>(executor,
fetchExecutionInfo));
}
boolean wasSuccess = true;
for (Pair<Executor, Future<String>> refreshPair : futures) {
Executor executor = refreshPair.getFirst();
executor.setExecutorInfo(null); // invalidate cached EXecutorInfo
try {
// max 5 secs
String jsonString = refreshPair.getSecond().get(5, TimeUnit.SECONDS);
executor.setExecutorInfo(ExecutorInfo.fromJSONString(jsonString));
logger.info(String.format(
"Successfully refreshed executor: %s with executor info : %s",
executor, jsonString));
} catch (TimeoutException e) {
wasSuccess = false;
logger.error("Timed out while waiting for ExecutorInfo refresh"
+ executor, e);
} catch (Exception e) {
wasSuccess = false;
logger.error("Failed to update ExecutorInfo for executor : "
+ executor, e);
}
}
// update is successful for all executors
if (wasSuccess) {
lastSuccessfulExecutorInfoRefresh = System.currentTimeMillis();
}
}
}
/**
* Throws exception if running in local mode
* {@inheritDoc}
* @see azkaban.executor.ExecutorManagerAdapter#disableQueueProcessorThread()
*/
@Override
public void disableQueueProcessorThread() throws ExecutorManagerException {
if (isMultiExecutorMode()) {
queueProcessor.setActive(false);
} else {
throw new ExecutorManagerException(
"Cannot disable QueueProcessor in local mode");
}
}
/**
* Throws exception if running in local mode
* {@inheritDoc}
* @see azkaban.executor.ExecutorManagerAdapter#enableQueueProcessorThread()
*/
@Override
public void enableQueueProcessorThread() throws ExecutorManagerException {
if (isMultiExecutorMode()) {
queueProcessor.setActive(true);
} else {
throw new ExecutorManagerException(
"Cannot enable QueueProcessor in local mode");
}
}
public State getQueueProcessorThreadState() {
if (isMultiExecutorMode())
return queueProcessor.getState();
else
return State.NEW; // not started in local mode
}
/**
* Returns state of QueueProcessor False, no flow is being dispatched True ,
* flows are being dispatched as expected
*
* @return
*/
public boolean isQueueProcessorThreadActive() {
if (isMultiExecutorMode())
return queueProcessor.isActive();
else
return false;
}
/**
* Return last Successful ExecutorInfo Refresh for all active executors
*
* @return
*/
public long getLastSuccessfulExecutorInfoRefresh() {
return this.lastSuccessfulExecutorInfoRefresh;
}
/**
* Get currently supported Comparators available to use via azkaban.properties
*
* @return
*/
public Set<String> getAvailableExecutorComparatorNames() {
return ExecutorComparator.getAvailableComparatorNames();
}
/**
* Get currently supported filters available to use via azkaban.properties
*
* @return
*/
public Set<String> getAvailableExecutorFilterNames() {
return ExecutorFilter.getAvailableFilterNames();
}
@Override
public State getExecutorManagerThreadState() {
return executingManager.getState();
}
public String getExecutorThreadStage() {
return updaterStage;
}
@Override
public boolean isExecutorManagerThreadActive() {
return executingManager.isAlive();
}
@Override
public long getLastExecutorManagerThreadCheckTime() {
return lastThreadCheckTime;
}
public long getLastCleanerThreadCheckTime() {
return this.lastCleanerThreadCheckTime;
}
@Override
public Collection<Executor> getAllActiveExecutors() {
return Collections.unmodifiableCollection(activeExecutors);
}
/**
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#fetchExecutor(int)
*/
@Override
public Executor fetchExecutor(int executorId) throws ExecutorManagerException {
for (Executor executor : activeExecutors) {
if (executor.getId() == executorId) {
return executor;
}
}
return executorLoader.fetchExecutor(executorId);
}
@Override
public Set<String> getPrimaryServerHosts() {
// Only one for now. More probably later.
HashSet<String> ports = new HashSet<String>();
for (Executor executor : activeExecutors) {
ports.add(executor.getHost() + ":" + executor.getPort());
}
return ports;
}
@Override
public Set<String> getAllActiveExecutorServerHosts() {
// Includes non primary server/hosts
HashSet<String> ports = new HashSet<String>();
for (Executor executor : activeExecutors) {
ports.add(executor.getHost() + ":" + executor.getPort());
}
// include executor which were initially active and still has flows running
for (Pair<ExecutionReference, ExecutableFlow> running : runningFlows
.values()) {
ExecutionReference ref = running.getFirst();
ports.add(ref.getHost() + ":" + ref.getPort());
}
return ports;
}
private void loadRunningFlows() throws ExecutorManagerException {
runningFlows.putAll(executorLoader.fetchActiveFlows());
// Finalize all flows which were running on an executor which is now
// inactive
for (Pair<ExecutionReference, ExecutableFlow> pair : runningFlows.values()) {
if (!activeExecutors.contains(pair.getFirst().getExecutor())) {
finalizeFlows(pair.getSecond());
}
}
}
/*
* load queued flows i.e with active_execution_reference and not assigned to
* any executor
*/
private void loadQueuedFlows() throws ExecutorManagerException {
List<Pair<ExecutionReference, ExecutableFlow>> retrievedExecutions =
executorLoader.fetchQueuedFlows();
if (retrievedExecutions != null) {
for (Pair<ExecutionReference, ExecutableFlow> pair : retrievedExecutions) {
queuedFlows.enqueue(pair.getSecond(), pair.getFirst());
}
}
}
/**
* Gets a list of all the active (running flows and non-dispatched flows)
* executions for a given project and flow {@inheritDoc}. Results should
* be sorted as we assume this while setting up pipelined execution Id.
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int,
* java.lang.String)
*/
@Override
public List<Integer> getRunningFlows(int projectId, String flowId) {
List<Integer> executionIds = new ArrayList<Integer>();
executionIds.addAll(getRunningFlowsHelper(projectId, flowId,
queuedFlows.getAllEntries()));
executionIds.addAll(getRunningFlowsHelper(projectId, flowId,
runningFlows.values()));
Collections.sort(executionIds);
return executionIds;
}
/* Helper method for getRunningFlows */
private List<Integer> getRunningFlowsHelper(int projectId, String flowId,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
List<Integer> executionIds = new ArrayList<Integer>();
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getFlowId().equals(flowId)
&& ref.getSecond().getProjectId() == projectId) {
executionIds.add(ref.getFirst().getExecId());
}
}
return executionIds;
}
/**
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getActiveFlowsWithExecutor()
*/
@Override
public List<Pair<ExecutableFlow, Executor>> getActiveFlowsWithExecutor()
throws IOException {
List<Pair<ExecutableFlow, Executor>> flows =
new ArrayList<Pair<ExecutableFlow, Executor>>();
getActiveFlowsWithExecutorHelper(flows, queuedFlows.getAllEntries());
getActiveFlowsWithExecutorHelper(flows, runningFlows.values());
return flows;
}
/* Helper method for getActiveFlowsWithExecutor */
private void getActiveFlowsWithExecutorHelper(
List<Pair<ExecutableFlow, Executor>> flows,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(new Pair<ExecutableFlow, Executor>(ref.getSecond(), ref
.getFirst().getExecutor()));
}
}
/**
* Checks whether the given flow has an active (running, non-dispatched)
* executions {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#isFlowRunning(int,
* java.lang.String)
*/
@Override
public boolean isFlowRunning(int projectId, String flowId) {
boolean isRunning = false;
isRunning =
isRunning
|| isFlowRunningHelper(projectId, flowId, queuedFlows.getAllEntries());
isRunning =
isRunning
|| isFlowRunningHelper(projectId, flowId, runningFlows.values());
return isRunning;
}
/* Search a running flow in a collection */
private boolean isFlowRunningHelper(int projectId, String flowId,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getProjectId() == projectId
&& ref.getSecond().getFlowId().equals(flowId)) {
return true;
}
}
return false;
}
/**
* Fetch ExecutableFlow from an active (running, non-dispatched) or from
* database {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getExecutableFlow(int)
*/
@Override
public ExecutableFlow getExecutableFlow(int execId)
throws ExecutorManagerException {
if (runningFlows.containsKey(execId)) {
return runningFlows.get(execId).getSecond();
} else if (queuedFlows.hasExecution(execId)) {
return queuedFlows.getFlow(execId);
} else {
return executorLoader.fetchExecutableFlow(execId);
}
}
/**
* Get all active (running, non-dispatched) flows
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows()
*/
@Override
public List<ExecutableFlow> getRunningFlows() {
ArrayList<ExecutableFlow> flows = new ArrayList<ExecutableFlow>();
getActiveFlowHelper(flows, queuedFlows.getAllEntries());
getActiveFlowHelper(flows, runningFlows.values());
return flows;
}
/*
* Helper method to get all running flows from a Pair<ExecutionReference,
* ExecutableFlow collection
*/
private void getActiveFlowHelper(ArrayList<ExecutableFlow> flows,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(ref.getSecond());
}
}
/**
* Get execution Ids of all active (running, non-dispatched) flows
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows()
*/
public String getRunningFlowIds() {
List<Integer> allIds = new ArrayList<Integer>();
getRunningFlowsIdsHelper(allIds, queuedFlows.getAllEntries());
getRunningFlowsIdsHelper(allIds, runningFlows.values());
Collections.sort(allIds);
return allIds.toString();
}
/**
* Get execution Ids of all non-dispatched flows
*
* {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows()
*/
public String getQueuedFlowIds() {
List<Integer> allIds = new ArrayList<Integer>();
getRunningFlowsIdsHelper(allIds, queuedFlows.getAllEntries());
Collections.sort(allIds);
return allIds.toString();
}
/* Helper method to flow ids of all running flows */
private void getRunningFlowsIdsHelper(List<Integer> allIds,
Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (Pair<ExecutionReference, ExecutableFlow> ref : collection) {
allIds.add(ref.getSecond().getExecutionId());
}
}
public List<ExecutableFlow> getRecentlyFinishedFlows() {
return new ArrayList<ExecutableFlow>(recentlyFinished.values());
}
@Override
public List<ExecutableFlow> getExecutableFlows(Project project,
String flowId, int skip, int size) throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(project.getId(), flowId, skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(int skip, int size)
throws ExecutorManagerException {
List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(String flowIdContains,
int skip, int size) throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null,
0, -1, -1, skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(String projContain,
String flowContain, String userContain, int status, long begin, long end,
int skip, int size) throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(projContain, flowContain, userContain,
status, begin, end, skip, size);
return flows;
}
@Override
public List<ExecutableJobInfo> getExecutableJobs(Project project,
String jobId, int skip, int size) throws ExecutorManagerException {
List<ExecutableJobInfo> nodes =
executorLoader.fetchJobHistory(project.getId(), jobId, skip, size);
return nodes;
}
@Override
public int getNumberOfJobExecutions(Project project, String jobId)
throws ExecutorManagerException {
return executorLoader.fetchNumExecutableNodes(project.getId(), jobId);
}
@Override
public int getNumberOfExecutions(Project project, String flowId)
throws ExecutorManagerException {
return executorLoader.fetchNumExecutableFlows(project.getId(), flowId);
}
@Override
public LogData getExecutableFlowLog(ExecutableFlow exFlow, int offset,
int length) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "flow");
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, offsetParam, lengthParam);
return LogData.createLogDataFromObject(result);
} else {
LogData value =
executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset,
length);
return value;
}
}
@Override
public LogData getExecutionJobLog(ExecutableFlow exFlow, String jobId,
int offset, int length, int attempt) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "job");
Pair<String, String> jobIdParam =
new Pair<String, String>("jobId", jobId);
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return LogData.createLogDataFromObject(result);
} else {
LogData value =
executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt,
offset, length);
return value;
}
}
@Override
public List<Object> getExecutionJobStats(ExecutableFlow exFlow, String jobId,
int attempt) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
return executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId,
attempt);
}
Pair<String, String> jobIdParam = new Pair<String, String>("jobId", jobId);
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION,
jobIdParam, attemptParam);
@SuppressWarnings("unchecked")
List<Object> jobStats = (List<Object>) result.get("attachments");
return jobStats;
}
@Override
public JobMetaData getExecutionJobMetaData(ExecutableFlow exFlow,
String jobId, int offset, int length, int attempt)
throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "job");
Pair<String, String> jobIdParam =
new Pair<String, String>("jobId", jobId);
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.METADATA_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return JobMetaData.createJobMetaDataFromObject(result);
} else {
return null;
}
}
/**
* if flows was dispatched to an executor, cancel by calling Executor else if
* flow is still in queue, remove from queue and finalize {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#cancelFlow(azkaban.executor.ExecutableFlow,
* java.lang.String)
*/
@Override
public void cancelFlow(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
if (runningFlows.containsKey(exFlow.getExecutionId())) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
callExecutorServer(pair.getFirst(), ConnectorParams.CANCEL_ACTION,
userId);
} else if (queuedFlows.hasExecution(exFlow.getExecutionId())) {
queuedFlows.dequeue(exFlow.getExecutionId());
finalizeFlows(exFlow);
} else {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
}
}
@Override
public void resumeFlow(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
callExecutorServer(pair.getFirst(), ConnectorParams.RESUME_ACTION, userId);
}
}
@Override
public void pauseFlow(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
callExecutorServer(pair.getFirst(), ConnectorParams.PAUSE_ACTION, userId);
}
}
@Override
public void pauseExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_PAUSE_JOBS, userId,
jobIds);
}
@Override
public void resumeExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RESUME_JOBS, userId,
jobIds);
}
@Override
public void retryFailures(ExecutableFlow exFlow, String userId)
throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_FAILURES, userId);
}
@Override
public void retryExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_JOBS, userId,
jobIds);
}
@Override
public void disableExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_DISABLE_JOBS, userId,
jobIds);
}
@Override
public void enableExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_ENABLE_JOBS, userId,
jobIds);
}
@Override
public void cancelExecutingJobs(ExecutableFlow exFlow, String userId,
String... jobIds) throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_CANCEL_JOBS, userId,
jobIds);
}
@SuppressWarnings("unchecked")
private Map<String, Object> modifyExecutingJobs(ExecutableFlow exFlow,
String command, String userId, String... jobIds)
throws ExecutorManagerException {
synchronized (exFlow) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
Map<String, Object> response = null;
if (jobIds != null && jobIds.length > 0) {
for (String jobId : jobIds) {
if (!jobId.isEmpty()) {
ExecutableNode node = exFlow.getExecutableNode(jobId);
if (node == null) {
throw new ExecutorManagerException("Job " + jobId
+ " doesn't exist in execution " + exFlow.getExecutionId()
+ ".");
}
}
}
String ids = StringUtils.join(jobIds, ',');
response =
callExecutorServer(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<String, String>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command),
new Pair<String, String>(ConnectorParams.MODIFY_JOBS_LIST, ids));
} else {
response =
callExecutorServer(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<String, String>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command));
}
return response;
}
}
private void applyDisabledJobs(List<Object> disabledJobs,
ExecutableFlowBase exflow) {
for (Object disabled : disabledJobs) {
if (disabled instanceof String) {
String nodeName = (String) disabled;
ExecutableNode node = exflow.getExecutableNode(nodeName);
if (node != null) {
node.setStatus(Status.DISABLED);
}
} else if (disabled instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> nestedDisabled = (Map<String, Object>) disabled;
String nodeName = (String) nestedDisabled.get("id");
@SuppressWarnings("unchecked")
List<Object> subDisabledJobs =
(List<Object>) nestedDisabled.get("children");
if (nodeName == null || subDisabledJobs == null) {
return;
}
ExecutableNode node = exflow.getExecutableNode(nodeName);
if (node != null && node instanceof ExecutableFlowBase) {
applyDisabledJobs(subDisabledJobs, (ExecutableFlowBase) node);
}
}
}
}
@Override
public String submitExecutableFlow(ExecutableFlow exflow, String userId)
throws ExecutorManagerException {
synchronized (exflow) {
String flowId = exflow.getFlowId();
logger.info("Submitting execution flow " + flowId + " by " + userId);
String message = "";
if (queuedFlows.isFull()) {
message =
String
.format(
"Failed to submit %s for project %s. Azkaban has overrun its webserver queue capacity",
flowId, exflow.getProjectName());
logger.error(message);
} else {
int projectId = exflow.getProjectId();
exflow.setSubmitUser(userId);
exflow.setSubmitTime(System.currentTimeMillis());
List<Integer> running = getRunningFlows(projectId, flowId);
ExecutionOptions options = exflow.getExecutionOptions();
if (options == null) {
options = new ExecutionOptions();
}
if (options.getDisabledJobs() != null) {
applyDisabledJobs(options.getDisabledJobs(), exflow);
}
if (!running.isEmpty()) {
if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) {
Collections.sort(running);
Integer runningExecId = running.get(running.size() - 1);
options.setPipelineExecutionId(runningExecId);
message =
"Flow " + flowId + " is already running with exec id "
+ runningExecId + ". Pipelining level "
+ options.getPipelineLevel() + ". \n";
} else if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_SKIP)) {
throw new ExecutorManagerException("Flow " + flowId
+ " is already running. Skipping execution.",
ExecutorManagerException.Reason.SkippedExecution);
} else {
// The settings is to run anyways.
message =
"Flow " + flowId + " is already running with exec id "
+ StringUtils.join(running, ",")
+ ". Will execute concurrently. \n";
}
}
boolean memoryCheck =
!ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(),
ProjectWhitelist.WhitelistType.MemoryCheck);
options.setMemoryCheck(memoryCheck);
// The exflow id is set by the loader. So it's unavailable until after
// this call.
executorLoader.uploadExecutableFlow(exflow);
// We create an active flow reference in the datastore. If the upload
// fails, we remove the reference.
ExecutionReference reference =
new ExecutionReference(exflow.getExecutionId());
if (isMultiExecutorMode()) {
//Take MultiExecutor route
executorLoader.addActiveExecutableReference(reference);
queuedFlows.enqueue(exflow, reference);
} else {
// assign only local executor we have
Executor choosenExecutor = activeExecutors.iterator().next();
executorLoader.addActiveExecutableReference(reference);
try {
dispatch(reference, exflow, choosenExecutor);
} catch (ExecutorManagerException e) {
executorLoader.removeActiveExecutableReference(reference
.getExecId());
throw e;
}
}
message +=
"Execution submitted successfully with exec id "
+ exflow.getExecutionId();
}
return message;
}
}
private void cleanOldExecutionLogs(long millis) {
try {
int count = executorLoader.removeExecutionLogsByTime(millis);
logger.info("Cleaned up " + count + " log entries.");
} catch (ExecutorManagerException e) {
e.printStackTrace();
}
}
private Map<String, Object> callExecutorServer(ExecutableFlow exflow,
Executor executor, String action) throws ExecutorManagerException {
try {
return callExecutorServer(executor.getHost(), executor.getPort(), action,
exflow.getExecutionId(), null, (Pair<String, String>[]) null);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(ExecutionReference ref,
String action, String user) throws ExecutorManagerException {
try {
return callExecutorServer(ref.getHost(), ref.getPort(), action,
ref.getExecId(), user, (Pair<String, String>[]) null);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(ExecutionReference ref,
String action, Pair<String, String>... params)
throws ExecutorManagerException {
try {
return callExecutorServer(ref.getHost(), ref.getPort(), action,
ref.getExecId(), null, params);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(ExecutionReference ref,
String action, String user, Pair<String, String>... params)
throws ExecutorManagerException {
try {
return callExecutorServer(ref.getHost(), ref.getPort(), action,
ref.getExecId(), user, params);
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
private Map<String, Object> callExecutorServer(String host, int port,
String action, Integer executionId, String user,
Pair<String, String>... params) throws IOException {
List<Pair<String, String>> paramList = new ArrayList<Pair<String,String>>();
// if params = null
if(params != null) {
paramList.addAll(Arrays.asList(params));
}
paramList
.add(new Pair<String, String>(ConnectorParams.ACTION_PARAM, action));
paramList.add(new Pair<String, String>(ConnectorParams.EXECID_PARAM, String
.valueOf(executionId)));
paramList.add(new Pair<String, String>(ConnectorParams.USER_PARAM, user));
Map<String, Object> jsonResponse =
callExecutorForJsonObject(host, port, "/executor", paramList);
return jsonResponse;
}
/*
* Helper method used by ExecutorManager to call executor and return json
* object map
*/
private Map<String, Object> callExecutorForJsonObject(String host, int port,
String path, List<Pair<String, String>> paramList) throws IOException {
String responseString =
callExecutorForJsonString(host, port, path, paramList);
@SuppressWarnings("unchecked")
Map<String, Object> jsonResponse =
(Map<String, Object>) JSONUtils.parseJSONFromString(responseString);
String error = (String) jsonResponse.get(ConnectorParams.RESPONSE_ERROR);
if (error != null) {
throw new IOException(error);
}
return jsonResponse;
}
/*
* Helper method used by ExecutorManager to call executor and return raw json
* string
*/
private String callExecutorForJsonString(String host, int port, String path,
List<Pair<String, String>> paramList) throws IOException {
if (paramList == null) {
paramList = new ArrayList<Pair<String, String>>();
}
ExecutorApiClient apiclient = ExecutorApiClient.getInstance();
@SuppressWarnings("unchecked")
URI uri =
ExecutorApiClient.buildUri(host, port, path, true,
paramList.toArray(new Pair[0]));
return apiclient.httpGet(uri, null);
}
/**
* Manage servlet call for stats servlet in Azkaban execution server
* {@inheritDoc}
*
* @throws ExecutorManagerException
*
* @see azkaban.executor.ExecutorManagerAdapter#callExecutorStats(java.lang.String,
* azkaban.utils.Pair[])
*/
@Override
public Map<String, Object> callExecutorStats(int executorId, String action,
Pair<String, String>... params) throws IOException, ExecutorManagerException {
Executor executor = fetchExecutor(executorId);
List<Pair<String, String>> paramList =
new ArrayList<Pair<String, String>>();
// if params = null
if (params != null) {
paramList.addAll(Arrays.asList(params));
}
paramList
.add(new Pair<String, String>(ConnectorParams.ACTION_PARAM, action));
return callExecutorForJsonObject(executor.getHost(), executor.getPort(),
"/stats", paramList);
}
@Override
public Map<String, Object> callExecutorJMX(String hostPort, String action,
String mBean) throws IOException {
List<Pair<String, String>> paramList =
new ArrayList<Pair<String, String>>();
paramList.add(new Pair<String, String>(action, ""));
if(mBean != null) {
paramList.add(new Pair<String, String>(ConnectorParams.JMX_MBEAN, mBean));
}
String[] hostPortSplit = hostPort.split(":");
return callExecutorForJsonObject(hostPortSplit[0],
Integer.valueOf(hostPortSplit[1]), "/jmx", paramList);
}
@Override
public void shutdown() {
if (isMultiExecutorMode()) {
queueProcessor.shutdown();
}
executingManager.shutdown();
}
private class ExecutingManagerUpdaterThread extends Thread {
private boolean shutdown = false;
public ExecutingManagerUpdaterThread() {
this.setName("ExecutorManagerUpdaterThread");
}
// 10 mins recently finished threshold.
private long recentlyFinishedLifetimeMs = 600000;
private int waitTimeIdleMs = 2000;
private int waitTimeMs = 500;
// When we have an http error, for that flow, we'll check every 10 secs, 6
// times (1 mins) before we evict.
private int numErrors = 6;
private long errorThreshold = 10000;
private void shutdown() {
shutdown = true;
}
@SuppressWarnings("unchecked")
public void run() {
while (!shutdown) {
try {
lastThreadCheckTime = System.currentTimeMillis();
updaterStage = "Starting update all flows.";
Map<Executor, List<ExecutableFlow>> exFlowMap =
getFlowToExecutorMap();
ArrayList<ExecutableFlow> finishedFlows =
new ArrayList<ExecutableFlow>();
ArrayList<ExecutableFlow> finalizeFlows =
new ArrayList<ExecutableFlow>();
if (exFlowMap.size() > 0) {
for (Map.Entry<Executor, List<ExecutableFlow>> entry : exFlowMap
.entrySet()) {
List<Long> updateTimesList = new ArrayList<Long>();
List<Integer> executionIdsList = new ArrayList<Integer>();
Executor executor = entry.getKey();
updaterStage =
"Starting update flows on " + executor.getHost() + ":"
+ executor.getPort();
// We pack the parameters of the same host together before we
// query.
fillUpdateTimeAndExecId(entry.getValue(), executionIdsList,
updateTimesList);
Pair<String, String> updateTimes =
new Pair<String, String>(
ConnectorParams.UPDATE_TIME_LIST_PARAM,
JSONUtils.toJSON(updateTimesList));
Pair<String, String> executionIds =
new Pair<String, String>(ConnectorParams.EXEC_ID_LIST_PARAM,
JSONUtils.toJSON(executionIdsList));
Map<String, Object> results = null;
try {
results =
callExecutorServer(executor.getHost(),
executor.getPort(), ConnectorParams.UPDATE_ACTION,
null, null, executionIds, updateTimes);
} catch (IOException e) {
logger.error(e);
for (ExecutableFlow flow : entry.getValue()) {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(flow.getExecutionId());
updaterStage =
"Failed to get update. Doing some clean up for flow "
+ pair.getSecond().getExecutionId();
if (pair != null) {
ExecutionReference ref = pair.getFirst();
int numErrors = ref.getNumErrors();
if (ref.getNumErrors() < this.numErrors) {
ref.setNextCheckTime(System.currentTimeMillis()
+ errorThreshold);
ref.setNumErrors(++numErrors);
} else {
logger.error("Evicting flow " + flow.getExecutionId()
+ ". The executor is unresponsive.");
// TODO should send out an unresponsive email here.
finalizeFlows.add(pair.getSecond());
}
}
}
}
// We gets results
if (results != null) {
List<Map<String, Object>> executionUpdates =
(List<Map<String, Object>>) results
.get(ConnectorParams.RESPONSE_UPDATED_FLOWS);
for (Map<String, Object> updateMap : executionUpdates) {
try {
ExecutableFlow flow = updateExecution(updateMap);
updaterStage = "Updated flow " + flow.getExecutionId();
if (isFinished(flow)) {
finishedFlows.add(flow);
finalizeFlows.add(flow);
}
} catch (ExecutorManagerException e) {
ExecutableFlow flow = e.getExecutableFlow();
logger.error(e);
if (flow != null) {
logger.error("Finalizing flow " + flow.getExecutionId());
finalizeFlows.add(flow);
}
}
}
}
}
updaterStage = "Evicting old recently finished flows.";
evictOldRecentlyFinished(recentlyFinishedLifetimeMs);
// Add new finished
for (ExecutableFlow flow : finishedFlows) {
if (flow.getScheduleId() >= 0
&& flow.getStatus() == Status.SUCCEEDED) {
ScheduleStatisticManager.invalidateCache(flow.getScheduleId(),
cacheDir);
}
fireEventListeners(Event.create(flow, Type.FLOW_FINISHED));
recentlyFinished.put(flow.getExecutionId(), flow);
}
updaterStage =
"Finalizing " + finalizeFlows.size() + " error flows.";
// Kill error flows
for (ExecutableFlow flow : finalizeFlows) {
finalizeFlows(flow);
}
}
updaterStage = "Updated all active flows. Waiting for next round.";
synchronized (this) {
try {
if (runningFlows.size() > 0) {
this.wait(waitTimeMs);
} else {
this.wait(waitTimeIdleMs);
}
} catch (InterruptedException e) {
}
}
} catch (Exception e) {
logger.error(e);
}
}
}
}
private void finalizeFlows(ExecutableFlow flow) {
int execId = flow.getExecutionId();
updaterStage = "finalizing flow " + execId;
// First we check if the execution in the datastore is complete
try {
ExecutableFlow dsFlow;
if (isFinished(flow)) {
dsFlow = flow;
} else {
updaterStage = "finalizing flow " + execId + " loading from db";
dsFlow = executorLoader.fetchExecutableFlow(execId);
// If it's marked finished, we're good. If not, we fail everything and
// then mark it finished.
if (!isFinished(dsFlow)) {
updaterStage = "finalizing flow " + execId + " failing the flow";
failEverything(dsFlow);
executorLoader.updateExecutableFlow(dsFlow);
}
}
updaterStage = "finalizing flow " + execId + " deleting active reference";
// Delete the executing reference.
if (flow.getEndTime() == -1) {
flow.setEndTime(System.currentTimeMillis());
executorLoader.updateExecutableFlow(dsFlow);
}
executorLoader.removeActiveExecutableReference(execId);
updaterStage = "finalizing flow " + execId + " cleaning from memory";
runningFlows.remove(execId);
fireEventListeners(Event.create(dsFlow, Type.FLOW_FINISHED));
recentlyFinished.put(execId, dsFlow);
} catch (ExecutorManagerException e) {
logger.error(e);
}
// TODO append to the flow log that we forced killed this flow because the
// target no longer had
// the reference.
updaterStage = "finalizing flow " + execId + " alerting and emailing";
ExecutionOptions options = flow.getExecutionOptions();
// But we can definitely email them.
Alerter mailAlerter = alerters.get("email");
if (flow.getStatus() == Status.FAILED || flow.getStatus() == Status.KILLED) {
if (options.getFailureEmails() != null
&& !options.getFailureEmails().isEmpty()) {
try {
mailAlerter
.alertOnError(
flow,
"Executor no longer seems to be running this execution. Most likely due to executor bounce.");
} catch (Exception e) {
logger.error(e);
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
String alertType = options.getFlowParameters().get("alert.type");
Alerter alerter = alerters.get(alertType);
if (alerter != null) {
try {
alerter
.alertOnError(
flow,
"Executor no longer seems to be running this execution. Most likely due to executor bounce.");
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.error("Failed to alert by " + alertType);
}
} else {
logger.error("Alerter type " + alertType
+ " doesn't exist. Failed to alert.");
}
}
} else {
if (options.getSuccessEmails() != null
&& !options.getSuccessEmails().isEmpty()) {
try {
mailAlerter.alertOnSuccess(flow);
} catch (Exception e) {
logger.error(e);
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
String alertType = options.getFlowParameters().get("alert.type");
Alerter alerter = alerters.get(alertType);
if (alerter != null) {
try {
alerter.alertOnSuccess(flow);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.error("Failed to alert by " + alertType);
}
} else {
logger.error("Alerter type " + alertType
+ " doesn't exist. Failed to alert.");
}
}
}
}
private void failEverything(ExecutableFlow exFlow) {
long time = System.currentTimeMillis();
for (ExecutableNode node : exFlow.getExecutableNodes()) {
switch (node.getStatus()) {
case SUCCEEDED:
case FAILED:
case KILLED:
case SKIPPED:
case DISABLED:
continue;
// case UNKNOWN:
case READY:
node.setStatus(Status.KILLED);
break;
default:
node.setStatus(Status.FAILED);
break;
}
if (node.getStartTime() == -1) {
node.setStartTime(time);
}
if (node.getEndTime() == -1) {
node.setEndTime(time);
}
}
if (exFlow.getEndTime() == -1) {
exFlow.setEndTime(time);
}
exFlow.setStatus(Status.FAILED);
}
private void evictOldRecentlyFinished(long ageMs) {
ArrayList<Integer> recentlyFinishedKeys =
new ArrayList<Integer>(recentlyFinished.keySet());
long oldAgeThreshold = System.currentTimeMillis() - ageMs;
for (Integer key : recentlyFinishedKeys) {
ExecutableFlow flow = recentlyFinished.get(key);
if (flow.getEndTime() < oldAgeThreshold) {
// Evict
recentlyFinished.remove(key);
}
}
}
private ExecutableFlow updateExecution(Map<String, Object> updateData)
throws ExecutorManagerException {
Integer execId =
(Integer) updateData.get(ConnectorParams.UPDATE_MAP_EXEC_ID);
if (execId == null) {
throw new ExecutorManagerException(
"Response is malformed. Need exec id to update.");
}
Pair<ExecutionReference, ExecutableFlow> refPair =
this.runningFlows.get(execId);
if (refPair == null) {
throw new ExecutorManagerException(
"No running flow found with the execution id. Removing " + execId);
}
ExecutionReference ref = refPair.getFirst();
ExecutableFlow flow = refPair.getSecond();
if (updateData.containsKey("error")) {
// The flow should be finished here.
throw new ExecutorManagerException((String) updateData.get("error"), flow);
}
// Reset errors.
ref.setNextCheckTime(0);
ref.setNumErrors(0);
Status oldStatus = flow.getStatus();
flow.applyUpdateObject(updateData);
Status newStatus = flow.getStatus();
ExecutionOptions options = flow.getExecutionOptions();
if (oldStatus != newStatus && newStatus.equals(Status.FAILED_FINISHING)) {
// We want to see if we should give an email status on first failure.
if (options.getNotifyOnFirstFailure()) {
Alerter mailAlerter = alerters.get("email");
try {
mailAlerter.alertOnFirstError(flow);
} catch (Exception e) {
e.printStackTrace();
logger.error("Failed to send first error email." + e.getMessage());
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
String alertType = options.getFlowParameters().get("alert.type");
Alerter alerter = alerters.get(alertType);
if (alerter != null) {
try {
alerter.alertOnFirstError(flow);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.error("Failed to alert by " + alertType);
}
} else {
logger.error("Alerter type " + alertType
+ " doesn't exist. Failed to alert.");
}
}
}
return flow;
}
public boolean isFinished(ExecutableFlow flow) {
switch (flow.getStatus()) {
case SUCCEEDED:
case FAILED:
case KILLED:
return true;
default:
return false;
}
}
private void fillUpdateTimeAndExecId(List<ExecutableFlow> flows,
List<Integer> executionIds, List<Long> updateTimes) {
for (ExecutableFlow flow : flows) {
executionIds.add(flow.getExecutionId());
updateTimes.add(flow.getUpdateTime());
}
}
/* Group Executable flow by Executors to reduce number of REST calls */
private Map<Executor, List<ExecutableFlow>> getFlowToExecutorMap() {
HashMap<Executor, List<ExecutableFlow>> exFlowMap =
new HashMap<Executor, List<ExecutableFlow>>();
for (Pair<ExecutionReference, ExecutableFlow> runningFlow : runningFlows
.values()) {
ExecutionReference ref = runningFlow.getFirst();
ExecutableFlow flow = runningFlow.getSecond();
Executor executor = ref.getExecutor();
// We can set the next check time to prevent the checking of certain
// flows.
if (ref.getNextCheckTime() >= System.currentTimeMillis()) {
continue;
}
List<ExecutableFlow> flows = exFlowMap.get(executor);
if (flows == null) {
flows = new ArrayList<ExecutableFlow>();
exFlowMap.put(executor, flows);
}
flows.add(flow);
}
return exFlowMap;
}
@Override
public int getExecutableFlows(int projectId, String flowId, int from,
int length, List<ExecutableFlow> outputList)
throws ExecutorManagerException {
List<ExecutableFlow> flows =
executorLoader.fetchFlowHistory(projectId, flowId, from, length);
outputList.addAll(flows);
return executorLoader.fetchNumExecutableFlows(projectId, flowId);
}
@Override
public List<ExecutableFlow> getExecutableFlows(int projectId, String flowId,
int from, int length, Status status) throws ExecutorManagerException {
return executorLoader.fetchFlowHistory(projectId, flowId, from, length,
status);
}
/*
* cleaner thread to clean up execution_logs, etc in DB. Runs every day.
*/
private class CleanerThread extends Thread {
// log file retention is 1 month.
// check every day
private static final long CLEANER_THREAD_WAIT_INTERVAL_MS =
24 * 60 * 60 * 1000;
private final long executionLogsRetentionMs;
private boolean shutdown = false;
private long lastLogCleanTime = -1;
public CleanerThread(long executionLogsRetentionMs) {
this.executionLogsRetentionMs = executionLogsRetentionMs;
this.setName("AzkabanWebServer-Cleaner-Thread");
}
@SuppressWarnings("unused")
public void shutdown() {
shutdown = true;
this.interrupt();
}
public void run() {
while (!shutdown) {
synchronized (this) {
try {
lastCleanerThreadCheckTime = System.currentTimeMillis();
// Cleanup old stuff.
long currentTime = System.currentTimeMillis();
if (currentTime - CLEANER_THREAD_WAIT_INTERVAL_MS > lastLogCleanTime) {
cleanExecutionLogs();
lastLogCleanTime = currentTime;
}
wait(CLEANER_THREAD_WAIT_INTERVAL_MS);
} catch (InterruptedException e) {
logger.info("Interrupted. Probably to shut down.");
}
}
}
}
private void cleanExecutionLogs() {
logger.info("Cleaning old logs from execution_logs");
long cutoff = DateTime.now().getMillis() - executionLogsRetentionMs;
logger.info("Cleaning old log files before "
+ new DateTime(cutoff).toString());
cleanOldExecutionLogs(DateTime.now().getMillis()
- executionLogsRetentionMs);
}
}
/**
* Calls executor to dispatch the flow, update db to assign the executor and
* in-memory state of executableFlow
*/
private void dispatch(ExecutionReference reference, ExecutableFlow exflow,
Executor choosenExecutor) throws ExecutorManagerException {
exflow.setUpdateTime(System.currentTimeMillis());
executorLoader.assignExecutor(choosenExecutor.getId(),
exflow.getExecutionId());
try {
callExecutorServer(exflow, choosenExecutor,
ConnectorParams.EXECUTE_ACTION);
} catch (ExecutorManagerException ex) {
logger.error("Rolling back executor assignment for execution id:"
+ exflow.getExecutionId(), ex);
executorLoader.unassignExecutor(exflow.getExecutionId());
throw new ExecutorManagerException(ex);
}
reference.setExecutor(choosenExecutor);
// move from flow to running flows
runningFlows.put(exflow.getExecutionId(),
new Pair<ExecutionReference, ExecutableFlow>(reference, exflow));
logger.info(String.format(
"Successfully dispatched exec %d with error count %d",
exflow.getExecutionId(), reference.getNumErrors()));
}
/*
* This thread is responsible for processing queued flows using dispatcher and
* making rest api calls to executor server
*/
private class QueueProcessorThread extends Thread {
private static final long QUEUE_PROCESSOR_WAIT_IN_MS = 1000;
private final int maxDispatchingErrors;
private final long activeExecutorRefreshWindowInMilisec;
private final int activeExecutorRefreshWindowInFlows;
private volatile boolean shutdown = false;
private volatile boolean isActive = true;
public QueueProcessorThread(boolean isActive,
long activeExecutorRefreshWindowInTime,
int activeExecutorRefreshWindowInFlows,
int maxDispatchingErrors) {
setActive(isActive);
this.maxDispatchingErrors = maxDispatchingErrors;
this.activeExecutorRefreshWindowInFlows =
activeExecutorRefreshWindowInFlows;
this.activeExecutorRefreshWindowInMilisec =
activeExecutorRefreshWindowInTime;
this.setName("AzkabanWebServer-QueueProcessor-Thread");
}
public void setActive(boolean isActive) {
this.isActive = isActive;
logger.info("QueueProcessorThread active turned " + this.isActive);
}
public boolean isActive() {
return isActive;
}
public void shutdown() {
shutdown = true;
this.interrupt();
}
public void run() {
// Loops till QueueProcessorThread is shutdown
while (!shutdown) {
synchronized (this) {
try {
// start processing queue if active, other wait for sometime
if (isActive) {
processQueuedFlows(activeExecutorRefreshWindowInMilisec,
activeExecutorRefreshWindowInFlows);
}
wait(QUEUE_PROCESSOR_WAIT_IN_MS);
} catch (Exception e) {
logger.error(
"QueueProcessorThread Interrupted. Probably to shut down.", e);
}
}
}
}
/* Method responsible for processing the non-dispatched flows */
private void processQueuedFlows(long activeExecutorsRefreshWindow,
int maxContinuousFlowProcessed) throws InterruptedException,
ExecutorManagerException {
long lastExecutorRefreshTime = 0;
Pair<ExecutionReference, ExecutableFlow> runningCandidate;
int currentContinuousFlowProcessed = 0;
while (isActive() && (runningCandidate = queuedFlows.fetchHead()) != null) {
ExecutionReference reference = runningCandidate.getFirst();
ExecutableFlow exflow = runningCandidate.getSecond();
long currentTime = System.currentTimeMillis();
// if we have dispatched more than maxContinuousFlowProcessed or
// It has been more then activeExecutorsRefreshWindow millisec since we
// refreshed
if (currentTime - lastExecutorRefreshTime > activeExecutorsRefreshWindow
|| currentContinuousFlowProcessed >= maxContinuousFlowProcessed) {
// Refresh executorInfo for all activeExecutors
refreshExecutors();
lastExecutorRefreshTime = currentTime;
currentContinuousFlowProcessed = 0;
}
/**
* <pre>
* TODO: Work around till we improve Filters to have a notion of GlobalSystemState.
* Currently we try each queued flow once to infer a global busy state
* Possible improvements:-
* 1. Move system level filters in refreshExecutors and sleep if we have all executors busy after refresh
* 2. Implement GlobalSystemState in selector or in a third place to manage system filters. Basically
* taking out all the filters which do not depend on the flow but are still being part of Selector.
* Assumptions:-
* 1. no one else except QueueProcessor is updating ExecutableFlow update time
* 2. re-attempting a flow (which has been tried before) is considered as all executors are busy
* </pre>
*/
if(exflow.getUpdateTime() > lastExecutorRefreshTime) {
// put back in the queue
queuedFlows.enqueue(exflow, reference);
long sleepInterval =
activeExecutorsRefreshWindow
- (currentTime - lastExecutorRefreshTime);
// wait till next executor refresh
sleep(sleepInterval);
} else {
exflow.setUpdateTime(currentTime);
// process flow with current snapshot of activeExecutors
selectExecutorAndDispatchFlow(reference, exflow, new HashSet<Executor>(activeExecutors));
}
currentContinuousFlowProcessed++;
}
}
/* process flow with a snapshot of available Executors */
private void selectExecutorAndDispatchFlow(ExecutionReference reference,
ExecutableFlow exflow, Set<Executor> availableExecutors)
throws ExecutorManagerException {
synchronized (exflow) {
Executor selectedExecutor = selectExecutor(exflow, availableExecutors);
if (selectedExecutor != null) {
try {
dispatch(reference, exflow, selectedExecutor);
} catch (ExecutorManagerException e) {
logger.warn(String.format(
"Executor %s responded with exception for exec: %d",
selectedExecutor, exflow.getExecutionId()), e);
handleDispatchExceptionCase(reference, exflow, selectedExecutor,
availableExecutors);
}
} else {
handleNoExecutorSelectedCase(reference, exflow);
}
}
}
/* Helper method to fetch overriding Executor, if a valid user has specifed otherwise return null */
private Executor getUserSpecifiedExecutor(ExecutionOptions options,
int executionId) {
Executor executor = null;
if (options != null
&& options.getFlowParameters() != null
&& options.getFlowParameters().containsKey(
ExecutionOptions.USE_EXECUTOR)) {
try {
int executorId =
Integer.valueOf(options.getFlowParameters().get(
ExecutionOptions.USE_EXECUTOR));
executor = fetchExecutor(executorId);
if (executor == null) {
logger
.warn(String
.format(
"User specified executor id: %d for execution id: %d is not active, Looking up db.",
executorId, executionId));
executor = executorLoader.fetchExecutor(executorId);
if (executor == null) {
logger
.warn(String
.format(
"User specified executor id: %d for execution id: %d is missing from db. Defaulting to availableExecutors",
executorId, executionId));
}
}
} catch (ExecutorManagerException ex) {
logger.error("Failed to fetch user specified executor for exec_id = "
+ executionId, ex);
}
}
return executor;
}
/* Choose Executor for exflow among the available executors */
private Executor selectExecutor(ExecutableFlow exflow,
Set<Executor> availableExecutors) {
Executor choosenExecutor =
getUserSpecifiedExecutor(exflow.getExecutionOptions(),
exflow.getExecutionId());
// If no executor was specified by admin
if (choosenExecutor == null) {
logger.info("Using dispatcher for execution id :"
+ exflow.getExecutionId());
ExecutorSelector selector = new ExecutorSelector(filterList, comparatorWeightsMap);
choosenExecutor = selector.getBest(availableExecutors, exflow);
}
return choosenExecutor;
}
private void handleDispatchExceptionCase(ExecutionReference reference,
ExecutableFlow exflow, Executor lastSelectedExecutor,
Set<Executor> remainingExecutors) throws ExecutorManagerException {
logger
.info(String
.format(
"Reached handleDispatchExceptionCase stage for exec %d with error count %d",
exflow.getExecutionId(), reference.getNumErrors()));
reference.setNumErrors(reference.getNumErrors() + 1);
if (reference.getNumErrors() > this.maxDispatchingErrors
|| remainingExecutors.size() <= 1) {
logger.error("Failed to process queued flow");
finalizeFlows(exflow);
} else {
remainingExecutors.remove(lastSelectedExecutor);
// try other executors except chosenExecutor
selectExecutorAndDispatchFlow(reference, exflow, remainingExecutors);
}
}
private void handleNoExecutorSelectedCase(ExecutionReference reference,
ExecutableFlow exflow) throws ExecutorManagerException {
logger
.info(String
.format(
"Reached handleNoExecutorSelectedCase stage for exec %d with error count %d",
exflow.getExecutionId(), reference.getNumErrors()));
// TODO: handle scenario where a high priority flow failing to get
// schedule can starve all others
queuedFlows.enqueue(exflow, reference);
}
}
} | 1 | 10,872 | I am curious, shouldn't "currentContinuousFlowProcessed++;" be added right after line 1873? otherwise we will count 1 extra when a exflow wakes up from the sleep section,.even though it hasn't been assigned | azkaban-azkaban | java |
@@ -162,6 +162,7 @@ def _get_configtypes():
configtypes._Numeric] and
issubclass(e, configtypes.BaseType))
yield from inspect.getmembers(configtypes, predicate)
+ # pylint: enable=protected-access
def _get_setting_types_quickref(): | 1 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Generate asciidoc source for qutebrowser based on docstrings."""
import os
import sys
import shutil
import os.path
import inspect
import subprocess
import tempfile
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
# We import qutebrowser.app so all @cmdutils-register decorators are run.
import qutebrowser.app
from scripts import asciidoc2html, utils
from qutebrowser import qutebrowser, commands
from qutebrowser.commands import cmdutils, argparser
from qutebrowser.config import configdata, configtypes
from qutebrowser.utils import docutils, usertypes
FILE_HEADER = """
// DO NOT EDIT THIS FILE DIRECTLY!
// It is autogenerated by running:
// $ python3 scripts/dev/src2asciidoc.py
""".lstrip()
class UsageFormatter(argparse.HelpFormatter):
"""Patched HelpFormatter to include some asciidoc markup in the usage.
This does some horrible things, but the alternative would be to reimplement
argparse.HelpFormatter while copying 99% of the code :-/
"""
def _format_usage(self, usage, actions, groups, _prefix):
"""Override _format_usage to not add the 'usage:' prefix."""
return super()._format_usage(usage, actions, groups, '')
def _get_default_metavar_for_optional(self, action):
"""Do name transforming when getting metavar."""
return argparser.arg_name(action.dest.upper())
def _get_default_metavar_for_positional(self, action):
"""Do name transforming when getting metavar."""
return argparser.arg_name(action.dest)
def _metavar_formatter(self, action, default_metavar):
"""Override _metavar_formatter to add asciidoc markup to metavars.
Most code here is copied from Python 3.4's argparse.py.
"""
if action.metavar is not None:
result = "'{}'".format(action.metavar)
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = ('{' + ','.join('*{}*'.format(e) for e in choice_strs) +
'}')
else:
result = "'{}'".format(default_metavar)
def fmt(tuple_size):
"""Format the result according to the tuple size."""
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return fmt
def _format_actions_usage(self, actions, groups):
"""Override _format_actions_usage to add asciidoc markup to flags.
Because argparse.py's _format_actions_usage is very complex, we first
monkey-patch the option strings to include the asciidoc markup, then
run the original method, then undo the patching.
"""
old_option_strings = {}
for action in actions:
old_option_strings[action] = action.option_strings[:]
action.option_strings = ['*{}*'.format(s)
for s in action.option_strings]
ret = super()._format_actions_usage(actions, groups)
for action in actions:
action.option_strings = old_option_strings[action]
return ret
def _open_file(name, mode='w'):
"""Open a file with a preset newline/encoding mode."""
return open(name, mode, newline='\n', encoding='utf-8')
def _get_cmd_syntax(_name, cmd):
"""Get the command syntax for a command.
We monkey-patch the parser's formatter_class here to use our UsageFormatter
which adds some asciidoc markup.
"""
old_fmt_class = cmd.parser.formatter_class
cmd.parser.formatter_class = UsageFormatter
usage = cmd.parser.format_usage().rstrip()
cmd.parser.formatter_class = old_fmt_class
return usage
def _get_command_quickref(cmds):
"""Generate the command quick reference."""
out = []
out.append('[options="header",width="75%",cols="25%,75%"]')
out.append('|==============')
out.append('|Command|Description')
for name, cmd in cmds:
desc = inspect.getdoc(cmd.handler).splitlines()[0]
out.append('|<<{},{}>>|{}'.format(name, name, desc))
out.append('|==============')
return '\n'.join(out)
def _get_setting_quickref():
"""Generate the settings quick reference."""
out = []
out.append('')
out.append('[options="header",width="75%",cols="25%,75%"]')
out.append('|==============')
out.append('|Setting|Description')
for opt in sorted(configdata.DATA.values()):
desc = opt.description.splitlines()[0]
out.append('|<<{},{}>>|{}'.format(opt.name, opt.name, desc))
out.append('|==============')
return '\n'.join(out)
def _get_configtypes():
"""Get configtypes classes to document."""
predicate = lambda e: (inspect.isclass(e) and
e not in [configtypes.BaseType,
configtypes.MappingType,
# pylint: disable=protected-access
configtypes._Numeric] and
issubclass(e, configtypes.BaseType))
yield from inspect.getmembers(configtypes, predicate)
def _get_setting_types_quickref():
"""Generate the setting types quick reference."""
out = []
out.append('[[types]]')
out.append('[options="header",width="75%",cols="25%,75%"]')
out.append('|==============')
out.append('|Type|Description')
for name, typ in _get_configtypes():
parser = docutils.DocstringParser(typ)
desc = parser.short_desc
if parser.long_desc:
desc += '\n\n' + parser.long_desc
out.append('|{}|{}'.format(name, desc))
out.append('|==============')
return '\n'.join(out)
def _get_command_doc(name, cmd):
"""Generate the documentation for a command."""
output = ['[[{}]]'.format(name)]
output += ['=== {}'.format(name)]
syntax = _get_cmd_syntax(name, cmd)
if syntax != name:
output.append('Syntax: +:{}+'.format(syntax))
output.append("")
parser = docutils.DocstringParser(cmd.handler)
output.append(parser.short_desc)
if parser.long_desc:
output.append("")
output.append(parser.long_desc)
output += list(_get_command_doc_args(cmd, parser))
output += list(_get_command_doc_count(cmd, parser))
output += list(_get_command_doc_notes(cmd))
output.append("")
output.append("")
return '\n'.join(output)
def _get_command_doc_args(cmd, parser):
"""Get docs for the arguments of a command.
Args:
cmd: The Command to get the docs for.
parser: The DocstringParser to use.
Yield:
Strings which should be added to the docs.
"""
if cmd.pos_args:
yield ""
yield "==== positional arguments"
for arg, name in cmd.pos_args:
try:
yield "* +'{}'+: {}".format(name, parser.arg_descs[arg])
except KeyError as e:
raise KeyError("No description for arg {} of command "
"'{}'!".format(e, cmd.name)) from e
if cmd.opt_args:
yield ""
yield "==== optional arguments"
for arg, (long_flag, short_flag) in cmd.opt_args.items():
try:
yield '* +*{}*+, +*{}*+: {}'.format(short_flag, long_flag,
parser.arg_descs[arg])
except KeyError as e:
raise KeyError("No description for arg {} of command "
"'{}'!".format(e, cmd.name)) from e
def _get_command_doc_count(cmd, parser):
"""Get docs for the count of a command.
Args:
cmd: The Command to get the docs for.
parser: The DocstringParser to use.
Yield:
Strings which should be added to the docs.
"""
for param in inspect.signature(cmd.handler).parameters.values():
if cmd.get_arg_info(param).count:
yield ""
yield "==== count"
try:
yield parser.arg_descs[param.name]
except KeyError as e:
raise KeyError("No description for count arg {!r} of command "
"{!r}!".format(param.name, cmd.name)) from e
def _get_command_doc_notes(cmd):
"""Get docs for the notes of a command.
Args:
cmd: The Command to get the docs for.
parser: The DocstringParser to use.
Yield:
Strings which should be added to the docs.
"""
if (cmd.maxsplit is not None or cmd.no_cmd_split or
cmd.no_replace_variables and cmd.name != "spawn"):
yield ""
yield "==== note"
if cmd.maxsplit is not None:
yield ("* This command does not split arguments after the last "
"argument and handles quotes literally.")
if cmd.no_cmd_split:
yield ("* With this command, +;;+ is interpreted literally "
"instead of splitting off a second command.")
if cmd.no_replace_variables and cmd.name != "spawn":
yield r"* This command does not replace variables like +\{url\}+."
def _get_action_metavar(action, nargs=1):
"""Get the metavar to display for an argparse action.
Args:
action: The argparse action to get the metavar for.
nargs: The nargs setting for the related argument.
"""
if action.metavar is not None:
if isinstance(action.metavar, str):
elems = [action.metavar] * nargs
else:
elems = action.metavar
return ' '.join("'{}'".format(e) for e in elems)
elif action.choices is not None:
choices = ','.join(str(e) for e in action.choices)
return "'{{{}}}'".format(choices)
else:
return "'{}'".format(action.dest.upper())
def _format_action_args(action):
"""Get an argument string based on an argparse action."""
if action.nargs is None:
return _get_action_metavar(action)
elif action.nargs == '?':
return '[{}]'.format(_get_action_metavar(action))
elif action.nargs == '*':
return '[{mv} [{mv} ...]]'.format(mv=_get_action_metavar(action))
elif action.nargs == '+':
return '{mv} [{mv} ...]'.format(mv=_get_action_metavar(action))
elif action.nargs == '...':
return '...'
else:
return _get_action_metavar(action, nargs=action.nargs)
def _format_action(action):
"""Get an invocation string/help from an argparse action."""
if action.help == argparse.SUPPRESS:
return None
if not action.option_strings:
invocation = '*{}*::'.format(_get_action_metavar(action))
else:
parts = []
if action.nargs == 0:
# Doesn't take a value, so the syntax is -s, --long
parts += ['*{}*'.format(s) for s in action.option_strings]
else:
# Takes a value, so the syntax is -s ARGS or --long ARGS.
args_string = _format_action_args(action)
for opt in action.option_strings:
parts.append('*{}* {}'.format(opt, args_string))
invocation = ', '.join(parts) + '::'
return '{}\n {}\n'.format(invocation, action.help)
def generate_commands(filename):
"""Generate the complete commands section."""
with _open_file(filename) as f:
f.write(FILE_HEADER)
f.write("= Commands\n\n")
f.write(commands.__doc__)
normal_cmds = []
hidden_cmds = []
debug_cmds = []
for name, cmd in cmdutils.cmd_dict.items():
if cmd.deprecated:
continue
if cmd.hide:
hidden_cmds.append((name, cmd))
elif cmd.debug:
debug_cmds.append((name, cmd))
else:
normal_cmds.append((name, cmd))
normal_cmds.sort()
hidden_cmds.sort()
debug_cmds.sort()
f.write("\n")
f.write("== Normal commands\n")
f.write(".Quick reference\n")
f.write(_get_command_quickref(normal_cmds) + '\n')
for name, cmd in normal_cmds:
f.write(_get_command_doc(name, cmd))
f.write("\n")
f.write("== Hidden commands\n")
f.write(".Quick reference\n")
f.write(_get_command_quickref(hidden_cmds) + '\n')
for name, cmd in hidden_cmds:
f.write(_get_command_doc(name, cmd))
f.write("\n")
f.write("== Debugging commands\n")
f.write("These commands are mainly intended for debugging. They are "
"hidden if qutebrowser was started without the "
"`--debug`-flag.\n")
f.write("\n")
f.write(".Quick reference\n")
f.write(_get_command_quickref(debug_cmds) + '\n')
for name, cmd in debug_cmds:
f.write(_get_command_doc(name, cmd))
def _generate_setting_backend_info(f, opt):
""""Generate backend information for the given option."""
all_backends = [usertypes.Backend.QtWebKit, usertypes.Backend.QtWebEngine]
if opt.raw_backends is not None:
for name, conditional in sorted(opt.raw_backends.items()):
if conditional is True:
pass
elif conditional is False:
f.write("\nOn {}, this setting is unavailable.\n".format(name))
else:
f.write("\nOn {}, this setting requires {} or newer.\n"
.format(name, conditional))
elif opt.backends == all_backends:
pass
elif opt.backends == [usertypes.Backend.QtWebKit]:
f.write("\nThis setting is only available with the QtWebKit "
"backend.\n")
elif opt.backends == [usertypes.Backend.QtWebEngine]:
f.write("\nThis setting is only available with the QtWebEngine "
"backend.\n")
else:
raise ValueError("Invalid value {!r} for opt.backends"
.format(opt.backends))
def _generate_setting_option(f, opt):
"""Generate documentation for a single section."""
f.write("\n")
f.write('[[{}]]'.format(opt.name) + "\n")
f.write("=== {}".format(opt.name) + "\n")
f.write(opt.description + "\n")
f.write("\n")
typ = opt.typ.get_name().replace(',', ',')
f.write('Type: <<types,{typ}>>\n'.format(typ=typ))
f.write("\n")
valid_values = opt.typ.get_valid_values()
if valid_values is not None and valid_values.generate_docs:
f.write("Valid values:\n")
f.write("\n")
for val in valid_values:
try:
desc = valid_values.descriptions[val]
f.write(" * +{}+: {}".format(val, desc) + "\n")
except KeyError:
f.write(" * +{}+".format(val) + "\n")
f.write("\n")
f.write("Default: {}\n".format(opt.typ.to_doc(opt.default)))
_generate_setting_backend_info(f, opt)
def generate_settings(filename):
"""Generate the complete settings section."""
configdata.init()
with _open_file(filename) as f:
f.write(FILE_HEADER)
f.write("= Setting reference\n\n")
f.write("== All settings\n")
f.write(_get_setting_quickref() + "\n")
for opt in sorted(configdata.DATA.values()):
_generate_setting_option(f, opt)
f.write("\n== Setting types\n")
f.write(_get_setting_types_quickref() + "\n")
def _format_block(filename, what, data):
"""Format a block in a file.
The block is delimited by markers like these:
// QUTE_*_START
...
// QUTE_*_END
The * part is the part which should be given as 'what'.
Args:
filename: The file to change.
what: What to change (authors, options, etc.)
data; A list of strings which is the new data.
"""
what = what.upper()
oshandle, tmpname = tempfile.mkstemp()
try:
with _open_file(filename, mode='r') as infile, \
_open_file(oshandle, mode='w') as temp:
found_start = False
found_end = False
for line in infile:
if line.strip() == '// QUTE_{}_START'.format(what):
temp.write(line)
temp.write(''.join(data))
found_start = True
elif line.strip() == '// QUTE_{}_END'.format(what.upper()):
temp.write(line)
found_end = True
elif (not found_start) or found_end:
temp.write(line)
if not found_start:
raise Exception("Marker '// QUTE_{}_START' not found in "
"'{}'!".format(what, filename))
elif not found_end:
raise Exception("Marker '// QUTE_{}_END' not found in "
"'{}'!".format(what, filename))
except:
os.remove(tmpname)
raise
else:
os.remove(filename)
shutil.move(tmpname, filename)
def regenerate_manpage(filename):
"""Update manpage OPTIONS using an argparse parser."""
# pylint: disable=protected-access
parser = qutebrowser.get_argparser()
groups = []
# positionals, optionals and user-defined groups
for group in parser._action_groups:
groupdata = []
groupdata.append('=== {}'.format(group.title))
if group.description is not None:
groupdata.append(group.description)
for action in group._group_actions:
action_data = _format_action(action)
if action_data is not None:
groupdata.append(action_data)
groups.append('\n'.join(groupdata))
options = '\n'.join(groups)
# epilog
if parser.epilog is not None:
options += parser.epilog
_format_block(filename, 'options', options)
def regenerate_cheatsheet():
"""Generate cheatsheet PNGs based on the SVG."""
files = [
('doc/img/cheatsheet-small.png', 300, 185),
('doc/img/cheatsheet-big.png', 3342, 2060),
]
for filename, x, y in files:
subprocess.check_call(['inkscape', '-e', filename, '-b', 'white',
'-w', str(x), '-h', str(y),
'misc/cheatsheet.svg'])
def main():
"""Regenerate all documentation."""
utils.change_cwd()
print("Generating manpage...")
regenerate_manpage('doc/qutebrowser.1.asciidoc')
print("Generating settings help...")
generate_settings('doc/help/settings.asciidoc')
print("Generating command help...")
generate_commands('doc/help/commands.asciidoc')
if '--cheatsheet' in sys.argv:
print("Regenerating cheatsheet .pngs")
regenerate_cheatsheet()
if '--html' in sys.argv:
asciidoc2html.main()
if __name__ == '__main__':
main()
| 1 | 19,419 | You can probably move this up after the `._Numeric` line. | qutebrowser-qutebrowser | py |
@@ -203,6 +203,7 @@ class SGEJobTask(luigi.Task):
description="don't tarball (and extract) the luigi project files")
def __init__(self, *args, **kwargs):
+ super(SGEJobTask, self).__init__(*args, **kwargs)
if self.job_name:
# use explicitly provided job name
pass | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
try:
import cPickle as pickle
except ImportError:
import pickle
import luigi
import luigi.hadoop
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
- job_name_format: String that can be passed in to customize the job name
string passed to qsub; e.g. "Task123_{task_family}_{n_cpu}...".
- job_name: Exact job name to pass to qsub.
- run_locally: Run locally instead of on the cluster.
- poll_time: the length of time to wait in order to poll qstat
- dont_remove_tmp_dir: Instead of deleting the temporary directory, keep it.
- no_tarball: Don't create a tarball of the luigi project directory. Can be
useful to reduce I/O requirements when the luigi directory is accessible
from cluster nodes already.
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
job_name_format = luigi.Parameter(
significant=False, default=None, description="A string that can be "
"formatted with class variables to name the job with qsub.")
job_name = luigi.Parameter(
significant=False, default=None,
description="Explicit job name given via qsub.")
run_locally = luigi.BoolParameter(
significant=False,
description="run locally instead of on the cluster")
poll_time = luigi.IntParameter(
significant=False, default=POLL_TIME,
description="specify the wait time to poll qstat for the job status")
dont_remove_tmp_dir = luigi.BoolParameter(
significant=False,
description="don't delete the temporary directory used (for debugging)")
no_tarball = luigi.BoolParameter(
significant=False,
description="don't tarball (and extract) the luigi project files")
def __init__(self, *args, **kwargs):
if self.job_name:
# use explicitly provided job name
pass
elif self.job_name_format:
# define the job name with the provided format
self.job_name = self.job_name_format.format(
task_family=self.task_family, **self.__dict__)
else:
# default to the task family
self.job_name = self.task_family
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = self.task_id + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
if not self.no_tarball:
# Make sure that all the class's dependencies are tarred and available
# This is not necessary if luigi is importable from the cluster node
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
luigi.hadoop.create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
if self.run_locally:
self.work()
else:
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(d)
else:
pickle.dump(self, open(self.job_file, "w"))
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}" "{2}"'.format(
runner_path, self.tmp_dir, os.getcwd()) # enclose tmp_dir in quotes to protect from special escape chars
if self.no_tarball:
job_str += ' "--no-tarball"'
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if (self.tmp_dir and os.path.exists(self.tmp_dir) and not self.dont_remove_tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
subprocess.call(["rm", "-rf", self.tmp_dir])
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(self.poll_time)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
| 1 | 15,656 | @Tarrasch this is another change you need to pay attention, it seems `SGEJobTask` is not calling `super.__init__` which break the test case some how. | spotify-luigi | py |
@@ -0,0 +1,14 @@
+class CreateCompletions < ActiveRecord::Migration
+ def change
+ create_table :completions do |t|
+ t.string :trail_object_id
+ t.string :trail_name
+ t.belongs_to :user
+
+ t.timestamps
+ end
+
+ add_index :completions, :user_id
+ add_index :completions, :trail_object_id
+ end
+end | 1 | 1 | 7,556 | Do we need indices for this table? | thoughtbot-upcase | rb |
|
@@ -447,6 +447,19 @@ static int process_data(struct flb_http_client *c)
return FLB_HTTP_MORE;
}
+#if defined FLB_HAVE_TESTS_OSSFUZZ
+int fuzz_process_data(struct flb_http_client *c);
+int fuzz_process_data(struct flb_http_client *c) {
+ return process_data(c);
+}
+
+int fuzz_check_connection(struct flb_http_client *c);
+int fuzz_check_connection(struct flb_http_client *c) {
+ return check_connection(c);
+}
+
+#endif
+
static int proxy_parse(const char *proxy, struct flb_http_client *c)
{
int len; | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2020 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This is a very simple HTTP Client interface which aims to provide an
* easy way to issue HTTP requests and handle reponses from the input/output
* plugins.
*
* It scope is:
*
* - Use upstream connections.
* - Support 'retry' in case the HTTP server timeouts a connection.
* - Get return Status, Headers and Body content if found.
* - If Upstream supports keepalive, adjust headers
*/
#define _GNU_SOURCE
#include <string.h>
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_kv.h>
#include <fluent-bit/flb_log.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_http_client.h>
#include <fluent-bit/flb_http_client_debug.h>
#include <fluent-bit/flb_utils.h>
#include <mbedtls/base64.h>
/*
* Removes the port from the host header
*/
int flb_http_strip_port_from_host(struct flb_http_client *c)
{
struct mk_list *head;
struct flb_kv *kv;
char *out_host;
struct flb_upstream *u = c->u_conn->u;
if (!c->host) {
out_host = u->tcp_host;
} else {
out_host = (char *) c->host;
}
mk_list_foreach(head, &c->headers) {
kv = mk_list_entry(head, struct flb_kv, _head);
if (strcasecmp("Host", kv->key) == 0) {
flb_sds_destroy(kv->val);
kv->val = NULL;
kv->val = flb_sds_create(out_host);
if (!kv->val) {
flb_errno();
return -1;
}
return 0;
}
}
return -1;
}
int flb_http_allow_duplicated_headers(struct flb_http_client *c, int allow)
{
if (allow != FLB_TRUE && allow != FLB_FALSE) {
return -1;
}
c->allow_dup_headers = allow;
return 0;
}
/* check if there is enough space in the client header buffer */
static int header_available(struct flb_http_client *c, int bytes)
{
int available;
available = c->header_size - c->header_len;
if (available < bytes) {
return -1;
}
return 0;
}
/* Try to find a header value in the buffer */
static int header_lookup(struct flb_http_client *c,
const char *header, int header_len,
const char **out_val, int *out_len)
{
char *p;
char *crlf;
char *end;
/* Lookup the beginning of the header */
p = strcasestr(c->resp.data, header);
end = strstr(c->resp.data, "\r\n\r\n");
if (!p) {
if (end) {
/* The headers are complete but the header is not there */
return FLB_HTTP_NOT_FOUND;
}
/* We need more data */
return FLB_HTTP_MORE;
}
/* Exclude matches in the body */
if (end && p > end) {
return FLB_HTTP_NOT_FOUND;
}
/* Lookup CRLF (end of line \r\n) */
crlf = strstr(p, "\r\n");
if (!crlf) {
return FLB_HTTP_MORE;
}
p += header_len;
*out_val = p;
*out_len = (crlf - p);
return FLB_HTTP_OK;
}
/* HTTP/1.1: Check if we have a Chunked Transfer Encoding */
static int check_chunked_encoding(struct flb_http_client *c)
{
int ret;
int len;
const char *header = NULL;
ret = header_lookup(c, "Transfer-Encoding: ", 19,
&header, &len);
if (ret == FLB_HTTP_NOT_FOUND) {
/* If the header is missing, this is fine */
c->resp.chunked_encoding = FLB_FALSE;
return FLB_HTTP_OK;
}
else if (ret == FLB_HTTP_MORE) {
return FLB_HTTP_MORE;
}
if (strncasecmp(header, "chunked", len) == 0) {
c->resp.chunked_encoding = FLB_TRUE;
}
return FLB_HTTP_OK;
}
/* Check response for a 'Content-Length' header */
static int check_content_length(struct flb_http_client *c)
{
int ret;
int len;
const char *header;
char tmp[256];
if (c->resp.status == 204) {
c->resp.content_length = -1;
return FLB_HTTP_OK;
}
ret = header_lookup(c, "Content-Length: ", 16,
&header, &len);
if (ret == FLB_HTTP_MORE) {
return FLB_HTTP_MORE;
}
else if (ret == FLB_HTTP_NOT_FOUND) {
return FLB_HTTP_NOT_FOUND;
}
if (len > sizeof(tmp) - 1) {
/* Value too long */
return FLB_HTTP_ERROR;
}
/* Copy to temporary buffer */
memcpy(tmp, header, len);
tmp[len] = '\0';
c->resp.content_length = atoi(tmp);
return FLB_HTTP_OK;
}
/* Check response for a 'Connection' header */
static int check_connection(struct flb_http_client *c)
{
int ret;
int len;
const char *header;
char *buf;
ret = header_lookup(c, "Connection: ", 12,
&header, &len);
if (ret == FLB_HTTP_NOT_FOUND) {
return FLB_HTTP_NOT_FOUND;
}
buf = flb_malloc(len + 1);
if (!buf) {
flb_errno();
return -1;
}
memcpy(buf, header, len);
buf[len] = '\0';
if (strncasecmp(buf, "close", 5) == 0) {
c->resp.connection_close = FLB_TRUE;
}
else if (strcasestr(buf, "keep-alive")) {
c->resp.connection_close = FLB_FALSE;
}
flb_free(buf);
return FLB_HTTP_OK;
}
static inline void consume_bytes(char *buf, int bytes, int length)
{
memmove(buf, buf + bytes, length - bytes);
}
static int process_chunked_data(struct flb_http_client *c)
{
long len;
long drop;
long val;
char *p;
char tmp[32];
struct flb_http_response *r = &c->resp;
chunk_start:
p = strstr(r->chunk_processed_end, "\r\n");
if (!p) {
return FLB_HTTP_MORE;
}
/* Hexa string length */
len = (p - r->chunk_processed_end);
if ((len > sizeof(tmp) - 1) || len == 0) {
return FLB_HTTP_ERROR;
}
p += 2;
/* Copy hexa string to temporary buffer */
memcpy(tmp, r->chunk_processed_end, len);
tmp[len] = '\0';
/* Convert hexa string to decimal */
errno = 0;
val = strtol(tmp, NULL, 16);
if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
|| (errno != 0 && val == 0)) {
flb_errno();
return FLB_HTTP_ERROR;
}
/*
* 'val' contains the expected number of bytes, check current lengths
* and do buffer adjustments.
*
* we do val + 2 because the chunk always ends with \r\n
*/
val += 2;
/* Number of bytes after the Chunk header */
len = r->data_len - (p - r->data);
if (len < val) {
return FLB_HTTP_MORE;
}
/* From the current chunk we expect it ends with \r\n */
if (p[val -2] != '\r' || p[val - 1] != '\n') {
return FLB_HTTP_ERROR;
}
/*
* At this point we are just fine, the chunk is valid, next steps:
*
* 1. check possible last chunk
* 2. drop chunk header from the buffer
* 3. remove chunk ending \r\n
*/
/* 1. Validate ending chunk */
if (val - 2 == 0) {
/*
* For an ending chunk we expect:
*
* 0\r\n
* \r\n
*
* so at least we need 5 bytes in the buffer
*/
len = r->data_len - (r->chunk_processed_end - r->data);
if (len < 5) {
return FLB_HTTP_MORE;
}
if (r->chunk_processed_end[3] != '\r' ||
r->chunk_processed_end[4] != '\n') {
return FLB_HTTP_ERROR;
}
}
/* 2. Drop chunk header */
drop = (p - r->chunk_processed_end);
len = r->data_len - (r->chunk_processed_end - r->data);
consume_bytes(r->chunk_processed_end, drop, len);
r->data_len -= drop;
r->data[r->data_len] = '\0';
/* 3. Remove chunk ending \r\n */
drop = 2;
r->chunk_processed_end += labs(val - 2);
len = r->data_len - (r->chunk_processed_end - r->data);
consume_bytes(r->chunk_processed_end, drop, len);
r->data_len -= drop;
/* Always append a NULL byte */
r->data[r->data_len] = '\0';
/* Is this the last chunk ? */
if ((val - 2 == 0)) {
/* Update payload size */
r->payload_size = r->data_len - (r->headers_end - r->data);
return FLB_HTTP_OK;
}
/* If we have some remaining bytes, start over */
len = r->data_len - (r->chunk_processed_end - r->data);
if (len > 0) {
goto chunk_start;
}
return FLB_HTTP_MORE;
}
static int process_data(struct flb_http_client *c)
{
int ret;
char code[4];
char *tmp;
if (c->resp.data_len < 15) {
/* we need more data */
return FLB_HTTP_MORE;
}
/* HTTP response status */
if (c->resp.status <= 0) {
memcpy(code, c->resp.data + 9, 3);
code[3] = '\0';
c->resp.status = atoi(code);
}
/* Try to lookup content length */
if (c->resp.content_length == -1 && c->resp.chunked_encoding == FLB_FALSE) {
ret = check_content_length(c);
if (ret == FLB_HTTP_ERROR) {
return FLB_HTTP_ERROR;
}
}
/* Chunked encoding for HTTP/1.1 (no content length of course) */
if ((c->flags & FLB_HTTP_11) && c->resp.content_length == -1) {
if (c->resp.chunked_encoding == FLB_FALSE) {
ret = check_chunked_encoding(c);
if (ret == FLB_HTTP_ERROR) {
return FLB_HTTP_ERROR;
}
}
}
if (!c->resp.headers_end) {
tmp = strstr(c->resp.data, "\r\n\r\n");
if (tmp) {
c->resp.headers_end = tmp + 4;
if (c->resp.chunked_encoding == FLB_TRUE) {
c->resp.chunk_processed_end = c->resp.headers_end;
}
/* Mark the payload */
if ((tmp - c->resp.data + 4) < c->resp.data_len) {
c->resp.payload = tmp += 4;
c->resp.payload_size = (c->resp.data_len - (tmp - c->resp.data));
}
}
else {
return FLB_HTTP_MORE;
}
}
/* Re-check if an ending exists, if so process payload if required */
if (c->resp.headers_end) {
/* Mark the payload */
if (!c->resp.payload &&
c->resp.headers_end - c->resp.data < c->resp.data_len) {
c->resp.payload = c->resp.headers_end;
c->resp.payload_size = (c->resp.data_len - (c->resp.headers_end - c->resp.data));
}
if (c->resp.content_length >= 0) {
c->resp.payload_size = c->resp.data_len;
c->resp.payload_size -= (c->resp.headers_end - c->resp.data);
if (c->resp.payload_size >= c->resp.content_length) {
return FLB_HTTP_OK;
}
}
else if (c->resp.chunked_encoding == FLB_TRUE) {
ret = process_chunked_data(c);
if (ret == FLB_HTTP_ERROR) {
return FLB_HTTP_ERROR;
}
else if (ret == FLB_HTTP_OK) {
return FLB_HTTP_OK;
}
}
else {
return FLB_HTTP_OK;
}
}
else if (c->resp.headers_end && c->resp.content_length <= 0) {
return FLB_HTTP_OK;
}
return FLB_HTTP_MORE;
}
static int proxy_parse(const char *proxy, struct flb_http_client *c)
{
int len;
int port;
int off = 0;
const char *s;
const char *e;
const char *host;
len = strlen(proxy);
if (len < 7) {
return -1;
}
/* Protocol lookup */
if (strncmp(proxy, "http://", 7) == 0) {
port = 80;
off = 7;
c->proxy.type = FLB_HTTP_PROXY_HTTP;
}
else if (strncmp(proxy, "https://", 8) == 0) {
port = 443;
off = 8;
c->proxy.type = FLB_HTTP_PROXY_HTTPS;
}
else {
return -1;
}
/* Separate host/ip from port if any */
s = proxy + off;
if (*s == '[') {
/* IPv6 address (RFC 3986) */
e = strchr(++s, ']');
if (!e) {
return -1;
}
host = strndup(s, e - s);
s = e + 1;
} else {
e = s;
while (!(*e == '\0' || *e == ':' || *e == '/')) {
++e;
}
if (e == s) {
return -1;
}
host = strndup(s, e - s);
s = e;
}
if (*s == ':') {
port = atoi(++s);
}
flb_trace("[http_client] proxy type=%i host=%s port=%i",
c->proxy.type, host, port);
c->proxy.host = host;
c->proxy.port = port;
return 0;
}
static int add_host_and_content_length(struct flb_http_client *c)
{
int len;
flb_sds_t tmp;
flb_sds_t host;
char *out_host;
int out_port;
size_t size;
struct flb_upstream *u = c->u_conn->u;
if (!c->host) {
if (u->proxied_host) {
out_host = u->proxied_host;
}
else {
out_host = u->tcp_host;
}
}
else {
out_host = (char *) c->host;
}
len = strlen(out_host);
host = flb_sds_create_size(len + 32);
if (!host) {
flb_error("[http_client] cannot create temporal buffer");
return -1;
}
if (c->port == 0) {
if (u->proxied_port != 0 ) {
out_port = u->proxied_port;
}
else {
out_port = u->tcp_port;
}
}
else {
out_port = c->port;
}
if (c->flags & FLB_IO_TLS && out_port == 443) {
tmp = flb_sds_copy(host, out_host, strlen(out_host));
}
else {
tmp = flb_sds_printf(&host, "%s:%i", out_host, out_port);
}
if (!tmp) {
flb_sds_destroy(host);
flb_error("[http_client] cannot compose temporary host header");
return -1;
}
host = tmp;
tmp = NULL;
flb_http_add_header(c, "Host", 4, host, flb_sds_len(host));
flb_sds_destroy(host);
/* Content-Length */
if (c->body_len >= 0) {
size = 32;
tmp = flb_malloc(size);
if (!tmp) {
flb_errno();
return -1;
}
len = snprintf(tmp, size - 1, "%i", c->body_len);
flb_http_add_header(c, "Content-Length", 14, tmp, len);
flb_free(tmp);
}
return 0;
}
struct flb_http_client *flb_http_client(struct flb_upstream_conn *u_conn,
int method, const char *uri,
const char *body, size_t body_len,
const char *host, int port,
const char *proxy, int flags)
{
int ret;
char *p;
char *buf = NULL;
char *str_method = NULL;
char *fmt_plain = \
"%s %s HTTP/1.%i\r\n";
char *fmt_proxy = \
"%s http://%s:%i%s HTTP/1.%i\r\n"
"Proxy-Connection: KeepAlive\r\n";
// TODO: IPv6 should have the format of [ip]:port
char *fmt_connect = \
"%s %s:%i HTTP/1.%i\r\n"
"Proxy-Connection: KeepAlive\r\n";
struct flb_http_client *c;
switch (method) {
case FLB_HTTP_GET:
str_method = "GET";
break;
case FLB_HTTP_POST:
str_method = "POST";
break;
case FLB_HTTP_PUT:
str_method = "PUT";
break;
case FLB_HTTP_HEAD:
str_method = "HEAD";
break;
case FLB_HTTP_CONNECT:
str_method = "CONNECT";
break;
};
buf = flb_calloc(1, FLB_HTTP_BUF_SIZE);
if (!buf) {
flb_errno();
return NULL;
}
/* FIXME: handler for HTTPS proxy */
if (proxy) {
flb_debug("[http_client] using http_proxy %s for header", proxy);
ret = snprintf(buf, FLB_HTTP_BUF_SIZE,
fmt_proxy,
str_method,
host,
port,
uri,
flags & FLB_HTTP_10 ? 0 : 1);
}
else if (method == FLB_HTTP_CONNECT) {
flb_debug("[http_client] using HTTP CONNECT for proxy: proxy host %s, proxy port %i", host, port);
ret = snprintf(buf, FLB_HTTP_BUF_SIZE,
fmt_connect,
str_method,
host,
port,
flags & FLB_HTTP_10 ? 0 : 1);
}
else {
flb_debug("[http_client] not using http_proxy for header");
ret = snprintf(buf, FLB_HTTP_BUF_SIZE,
fmt_plain,
str_method,
uri,
flags & FLB_HTTP_10 ? 0 : 1);
}
if (ret == -1) {
flb_errno();
flb_free(buf);
return NULL;
}
c = flb_calloc(1, sizeof(struct flb_http_client));
if (!c) {
flb_free(buf);
return NULL;
}
c->u_conn = u_conn;
c->method = method;
c->uri = uri;
c->host = host;
c->port = port;
c->header_buf = buf;
c->header_size = FLB_HTTP_BUF_SIZE;
c->header_len = ret;
c->flags = flags;
c->allow_dup_headers = FLB_TRUE;
mk_list_init(&c->headers);
/* Check if we have a query string */
p = strchr(uri, '?');
if (p) {
p++;
c->query_string = p;
}
/* Is Upstream connection using keepalive mode ? */
if (u_conn->u->flags & FLB_IO_TCP_KA) {
c->flags |= FLB_HTTP_KA;
}
/* Response */
c->resp.content_length = -1;
c->resp.connection_close = -1;
if ((flags & FLB_HTTP_10) == 0) {
c->flags |= FLB_HTTP_11;
}
if (body && body_len > 0) {
c->body_buf = body;
c->body_len = body_len;
}
add_host_and_content_length(c);
/* Check proxy data */
if (proxy) {
flb_debug("[http_client] Using http_proxy: %s", proxy);
ret = proxy_parse(proxy, c);
if (ret != 0) {
flb_debug("[http_client] Something wrong with the http_proxy parsing");
flb_http_client_destroy(c);
return NULL;
}
}
/* 'Read' buffer size */
c->resp.data = flb_malloc(FLB_HTTP_DATA_SIZE_MAX);
if (!c->resp.data) {
flb_errno();
flb_http_client_destroy(c);
return NULL;
}
c->resp.data_len = 0;
c->resp.data_size = FLB_HTTP_DATA_SIZE_MAX;
c->resp.data_size_max = FLB_HTTP_DATA_SIZE_MAX;
return c;
}
/*
* By default the HTTP client have a fixed buffer to read a response for a
* simple request. But in certain situations the caller might expect a
* larger response that exceed the buffer limit.
*
* This function allows to set a maximum buffer size for the client
* response where:
*
* 1. size = 0 no limit, read as much as possible.
* 2. size = N: specific limit, upon reach limit discard data (default: 4KB)
*/
int flb_http_buffer_size(struct flb_http_client *c, size_t size)
{
if (size < c->resp.data_size_max && size != 0) {
flb_error("[http] requested buffer size %lu (bytes) needs to be greater than "
"minimum size allowed %lu (bytes)",
size, c->resp.data_size_max);
return -1;
}
c->resp.data_size_max = size;
return 0;
}
size_t flb_http_buffer_available(struct flb_http_client *c)
{
return (c->resp.data_size - c->resp.data_len);
}
/*
* Increase the read buffer size based on the limits set by default or manually
* through the flb_http_buffer_size() function.
*
* The parameter 'size' is the amount of extra memory requested.
*/
int flb_http_buffer_increase(struct flb_http_client *c, size_t size,
size_t *out_size)
{
int off_payload = 0;
int off_headers_end = 0;
int off_chunk_processed_end = 0;
char *tmp;
size_t new_size;
size_t allocated;
*out_size = 0;
new_size = c->resp.data_size + size;
/* Limit exceeded, adjust */
if (c->resp.data_size_max != 0) {
if (new_size > c->resp.data_size_max) {
new_size = c->resp.data_size_max;
if (new_size <= c->resp.data_size) {
/* Can't expand the buffer any further. */
return -1;
}
}
}
if (c->resp.headers_end) {
off_headers_end = c->resp.headers_end - c->resp.data;
}
if (c->resp.chunk_processed_end) {
off_chunk_processed_end = c->resp.chunk_processed_end - c->resp.data;
}
/*
* The payload is a reference to a position of 'data' buffer,
* we need to adjust the pointer after a memory buffer size change.
*/
if (c->resp.payload_size > 0) {
off_payload = c->resp.payload - c->resp.data;
}
tmp = flb_realloc(c->resp.data, new_size);
if (!tmp) {
flb_errno();
return -1;
}
else {
allocated = new_size - c->resp.data_size;
c->resp.data = tmp;
c->resp.data_size = new_size;
if (off_headers_end > 0) {
c->resp.headers_end = c->resp.data + off_headers_end;
}
if (off_chunk_processed_end > 0) {
c->resp.chunk_processed_end = c->resp.data + off_chunk_processed_end;
}
if (off_payload > 0) {
c->resp.payload = c->resp.data + off_payload;
}
}
*out_size = allocated;
return 0;
}
/* Append a custom HTTP header to the request */
int flb_http_add_header(struct flb_http_client *c,
const char *key, size_t key_len,
const char *val, size_t val_len)
{
struct flb_kv *kv;
struct mk_list *tmp;
struct mk_list *head;
if (key_len < 1 || val_len < 1) {
return -1;
}
/* Check any previous header to avoid duplicates */
if (c->allow_dup_headers == FLB_FALSE) {
mk_list_foreach_safe(head, tmp, &c->headers) {
kv = mk_list_entry(head, struct flb_kv, _head);
if (flb_sds_casecmp(kv->key, key, key_len) == 0) {
/* the header already exists, remove it */
flb_kv_item_destroy(kv);
break;
}
}
}
/* register new header in the temporal kv list */
kv = flb_kv_item_create_len(&c->headers,
(char *) key, key_len, (char *) val, val_len);
if (!kv) {
return -1;
}
return 0;
}
static int http_header_push(struct flb_http_client *c, struct flb_kv *header)
{
char *tmp;
const char *key;
const char *val;
size_t key_len;
size_t val_len;
size_t required;
size_t new_size;
key = header->key;
key_len = flb_sds_len(header->key);
val = header->val;
val_len = flb_sds_len(header->val);
/*
* The new header will need enough space in the buffer:
*
* key : length of the key
* separator: ': ' (2 bytes)
* val : length of the key value
* CRLF : '\r\n' (2 bytes)
*/
required = key_len + 2 + val_len + 2;
if (header_available(c, required) != 0) {
if (required < 512) {
new_size = c->header_size + 512;
}
else {
new_size = c->header_size + required;
}
tmp = flb_realloc(c->header_buf, new_size);
if (!tmp) {
flb_errno();
return -1;
}
c->header_buf = tmp;
c->header_size = new_size;
}
/* append the header key */
memcpy(c->header_buf + c->header_len,
key, key_len);
c->header_len += key_len;
/* append the separator */
c->header_buf[c->header_len++] = ':';
c->header_buf[c->header_len++] = ' ';
/* append the header value */
memcpy(c->header_buf + c->header_len,
val, val_len);
c->header_len += val_len;
/* Append the ending header CRLF */
c->header_buf[c->header_len++] = '\r';
c->header_buf[c->header_len++] = '\n';
return 0;
}
static int http_headers_compose(struct flb_http_client *c)
{
int ret;
struct mk_list *head;
struct flb_kv *header;
/* Push header list to one buffer */
mk_list_foreach(head, &c->headers) {
header = mk_list_entry(head, struct flb_kv, _head);
ret = http_header_push(c, header);
if (ret != 0) {
flb_error("[http_client] cannot compose request headers");
return -1;
}
}
return 0;
}
static void http_headers_destroy(struct flb_http_client *c)
{
flb_kv_release(&c->headers);
}
int flb_http_set_keepalive(struct flb_http_client *c)
{
/* check if 'keepalive' mode is enabled in the Upstream connection */
if (c->u_conn->u->net.keepalive == FLB_FALSE) {
return -1;
}
/* append header */
return flb_http_add_header(c,
FLB_HTTP_HEADER_CONNECTION,
sizeof(FLB_HTTP_HEADER_CONNECTION) - 1,
FLB_HTTP_HEADER_KA,
sizeof(FLB_HTTP_HEADER_KA) - 1);
}
/* Adds a header specifying that the payload is compressed with gzip */
int flb_http_set_content_encoding_gzip(struct flb_http_client *c)
{
int ret;
ret = flb_http_add_header(c,
FLB_HTTP_HEADER_CONTENT_ENCODING,
sizeof(FLB_HTTP_HEADER_CONTENT_ENCODING) - 1,
"gzip", 4);
return ret;
}
int flb_http_set_callback_context(struct flb_http_client *c,
struct flb_callback *cb_ctx)
{
c->cb_ctx = cb_ctx;
return 0;
}
int flb_http_add_auth_header(struct flb_http_client *c,
const char *user, const char *passwd, const char *header) {
int ret;
int len_u;
int len_p;
int len_h;
int len_out;
char tmp[1024];
char *p;
size_t b64_len;
/*
* We allow a max of 255 bytes for user and password (255 each), meaning
* we need at least:
*
* 'Basic base64(user : passwd)' => ~688 bytes
*
*/
len_u = strlen(user);
if (passwd) {
len_p = strlen(passwd);
}
else {
len_p = 0;
}
p = flb_malloc(len_u + len_p + 2);
if (!p) {
flb_errno();
return -1;
}
memcpy(p, user, len_u);
p[len_u] = ':';
len_out = len_u + 1;
if (passwd) {
memcpy(p + len_out, passwd, len_p);
len_out += len_p;
}
p[len_out] = '\0';
memcpy(tmp, "Basic ", 6);
ret = mbedtls_base64_encode((unsigned char *) tmp + 6, sizeof(tmp) - 7, &b64_len,
(unsigned char *) p, len_out);
if (ret != 0) {
flb_free(p);
return -1;
}
flb_free(p);
b64_len += 6;
len_h = strlen(header);
ret = flb_http_add_header(c,
header,
len_h,
tmp, b64_len);
return ret;
}
int flb_http_basic_auth(struct flb_http_client *c,
const char *user, const char *passwd)
{
return flb_http_add_auth_header(c, user, passwd, FLB_HTTP_HEADER_AUTH);
}
int flb_http_proxy_auth(struct flb_http_client *c,
const char *user, const char *passwd)
{
return flb_http_add_auth_header(c, user, passwd, FLB_HTTP_HEADER_PROXY_AUTH);
}
int flb_http_do(struct flb_http_client *c, size_t *bytes)
{
int ret;
int r_bytes;
int crlf = 2;
int new_size;
ssize_t available;
size_t out_size;
size_t bytes_header = 0;
size_t bytes_body = 0;
char *tmp;
/* Append pending headers */
ret = http_headers_compose(c);
if (ret == -1) {
return -1;
}
/* check enough space for the ending CRLF */
if (header_available(c, crlf) != 0) {
new_size = c->header_size + 2;
tmp = flb_realloc(c->header_buf, new_size);
if (!tmp) {
return -1;
}
c->header_buf = tmp;
c->header_len = new_size;
}
/* Append the ending header CRLF */
c->header_buf[c->header_len++] = '\r';
c->header_buf[c->header_len++] = '\n';
#ifdef FLB_HAVE_HTTP_CLIENT_DEBUG
/* debug: request_headers callback */
flb_http_client_debug_cb(c, "_debug.http.request_headers");
/* debug: request_payload callback */
if (c->body_len > 0) {
flb_http_client_debug_cb(c, "_debug.http.request_payload");
}
#endif
flb_debug("[http_client] header=%s", c->header_buf);
/* Write the header */
ret = flb_io_net_write(c->u_conn,
c->header_buf, c->header_len,
&bytes_header);
if (ret == -1) {
flb_errno();
return -1;
}
if (c->body_len > 0) {
ret = flb_io_net_write(c->u_conn,
c->body_buf, c->body_len,
&bytes_body);
if (ret == -1) {
flb_errno();
return -1;
}
}
/* number of sent bytes */
*bytes = (bytes_header + bytes_body);
/* Read the server response, we need at least 19 bytes */
c->resp.data_len = 0;
while (1) {
available = flb_http_buffer_available(c) - 1;
if (available <= 1) {
/*
* If there is no more space available on our buffer, try to
* increase it.
*/
ret = flb_http_buffer_increase(c, FLB_HTTP_DATA_CHUNK,
&out_size);
if (ret == -1) {
/*
* We could not allocate more space, let the caller handle
* this.
*/
flb_upstream_conn_recycle(c->u_conn, FLB_FALSE);
return 0;
}
available = flb_http_buffer_available(c) - 1;
}
r_bytes = flb_io_net_read(c->u_conn,
c->resp.data + c->resp.data_len,
available);
if (r_bytes <= 0) {
if (c->flags & FLB_HTTP_10) {
break;
}
}
/* Always append a NULL byte */
if (r_bytes >= 0) {
c->resp.data_len += r_bytes;
c->resp.data[c->resp.data_len] = '\0';
ret = process_data(c);
if (ret == FLB_HTTP_ERROR) {
return -1;
}
else if (ret == FLB_HTTP_OK) {
break;
}
else if (ret == FLB_HTTP_MORE) {
continue;
}
}
else {
flb_error("[http_client] broken connection to %s:%i ?",
c->u_conn->u->tcp_host, c->u_conn->u->tcp_port);
return -1;
}
}
/* Check 'Connection' response header */
ret = check_connection(c);
if (ret == FLB_HTTP_OK) {
/*
* If the server replied that the connection will be closed
* and our Upstream connection is in keepalive mode, we must
* inactivate the connection.
*/
if (c->resp.connection_close == FLB_TRUE) {
/* Do not recycle the connection (no more keepalive) */
flb_upstream_conn_recycle(c->u_conn, FLB_FALSE);
flb_debug("[http_client] server %s:%i will close connection #%i",
c->u_conn->u->tcp_host, c->u_conn->u->tcp_port,
c->u_conn->fd);
}
}
#ifdef FLB_HAVE_HTTP_CLIENT_DEBUG
flb_http_client_debug_cb(c, "_debug.http.response_headers");
if (c->resp.payload_size > 0) {
flb_http_client_debug_cb(c, "_debug.http.response_payload");
}
#endif
return 0;
}
/*
* flb_http_client_proxy_connect opens a tunnel to a proxy server via
* http `CONNECT` method. This is needed for https traffic through a
* http proxy.
* More: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT
*/
int flb_http_client_proxy_connect(struct flb_upstream_conn *u_conn)
{
struct flb_upstream *u = u_conn->u;
struct flb_http_client *c;
size_t b_sent;
int ret = -1;
/* Don't pass proxy when using FLB_HTTP_CONNECT */
flb_debug("[upstream] establishing http tunneling to proxy: host %s port %d", u->tcp_host, u->tcp_port);
c = flb_http_client(u_conn, FLB_HTTP_CONNECT, "", NULL,
0, u->proxied_host, u->proxied_port, NULL, 0);
/* Setup proxy's username and password */
if (u->proxy_username && u->proxy_password) {
flb_debug("[upstream] proxy uses username %s password %s", u->proxy_username, u->proxy_password);
flb_http_proxy_auth(c, u->proxy_username, u->proxy_password);
}
flb_http_buffer_size(c, 4192);
flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
/* Send HTTP request */
ret = flb_http_do(c, &b_sent);
/* Validate HTTP response */
if (ret != 0) {
flb_error("[upstream] error in flb_establish_proxy: %d", ret);
ret = -1;
}
else {
/* The request was issued successfully, validate the 'error' field */
flb_debug("[upstream] proxy returned %d", c->resp.status);
if (c->resp.status == 200) {
ret = 0;
}
else {
flb_error("flb_establish_proxy error: %s", c->resp.payload);
ret = -1;
}
}
/* Cleanup */
flb_http_client_destroy(c);
return ret;
}
void flb_http_client_destroy(struct flb_http_client *c)
{
http_headers_destroy(c);
flb_free(c->resp.data);
flb_free(c->header_buf);
flb_free((void *)c->proxy.host);
flb_free(c);
}
| 1 | 13,252 | what about making this function static inline to avoid the extra declaration ? | fluent-fluent-bit | c |
@@ -21,6 +21,7 @@ import (
type protocol = string
type TableIDType uint8
+type GroupIDType = uint32
const LastTableID TableIDType = 0xff
| 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openflow
import (
"net"
"time"
)
type protocol = string
type TableIDType uint8
const LastTableID TableIDType = 0xff
type MissActionType uint32
type Range [2]uint32
const (
ProtocolIP protocol = "ip"
ProtocolARP protocol = "arp"
ProtocolTCP protocol = "tcp"
ProtocolUDP protocol = "udp"
ProtocolSCTP protocol = "sctp"
ProtocolICMP protocol = "icmp"
)
const (
TableMissActionDrop MissActionType = iota
TableMissActionNormal
TableMissActionNext
TableMissActionNone
)
const (
NxmFieldSrcMAC = "NXM_OF_ETH_SRC"
NxmFieldDstMAC = "NXM_OF_ETH_DST"
NxmFieldARPSha = "NXM_NX_ARP_SHA"
NxmFieldARPTha = "NXM_NX_ARP_THA"
NxmFieldARPSpa = "NXM_OF_ARP_SPA"
NxmFieldARPTpa = "NXM_OF_ARP_TPA"
NxmFieldCtLabel = "NXM_NX_CT_LABEL"
NxmFieldCtMark = "NXM_NX_CT_MARK"
NxmFieldARPOp = "NXM_OF_ARP_OP"
NxmFieldReg = "NXM_NX_REG"
)
// Bridge defines operations on an openflow bridge.
type Bridge interface {
CreateTable(id, next TableIDType, missAction MissActionType) Table
DeleteTable(id TableIDType) bool
DumpTableStatus() []TableStatus
// DumpFlows queries the Openflow entries from OFSwitch. The filter of the query is Openflow cookieID; the result is
// a map from flow cookieID to FlowStates.
DumpFlows(cookieID, cookieMask uint64) map[uint64]*FlowStates
// DeleteFlowsByCookie removes Openflow entries from OFSwitch. The removed Openflow entries use the specific CookieID.
DeleteFlowsByCookie(cookieID, cookieMask uint64) error
// AddFlowsInBundle syncs multiple Openflow entries in a single transaction. This operation could add new flows in
// "addFlows", modify flows in "modFlows", and remove flows in "delFlows" in the same bundle.
AddFlowsInBundle(addflows []Flow, modFlows []Flow, delFlows []Flow) error
// Connect initiates connection to the OFSwitch. It will block until the connection is established. connectCh is used to
// send notification whenever the switch is connected or reconnected.
Connect(maxRetrySec int, connectCh chan struct{}) error
// Disconnect stops connection to the OFSwitch.
Disconnect() error
// IsConnected returns the OFSwitch's connection status. The result is true if the OFSwitch is connected.
IsConnected() bool
}
// TableStatus represents the status of a specific flow table. The status is useful for debugging.
type TableStatus struct {
ID uint `json:"id"`
FlowCount uint `json:"flowCount"`
UpdateTime time.Time `json:"updateTime"`
}
type Table interface {
GetID() TableIDType
BuildFlow(priority uint16) FlowBuilder
GetMissAction() MissActionType
Status() TableStatus
GetNext() TableIDType
}
type Flow interface {
Add() error
Modify() error
Delete() error
MatchString() string
// CopyToBuilder returns a new FlowBuilder that copies the matches of the Flow, but does not copy the actions.
CopyToBuilder() FlowBuilder
// Reset ensures that the ofFlow object is "correct" and that the Add /
// Modify / Delete methods can be called on this object. This method
// should be called if a reconnection event happenened.
Reset()
}
type Action interface {
LoadARPOperation(value uint16) FlowBuilder
LoadRegRange(regID int, value uint32, to Range) FlowBuilder
LoadRange(name string, addr uint64, to Range) FlowBuilder
Move(from, to string) FlowBuilder
MoveRange(fromName, toName string, from, to Range) FlowBuilder
Resubmit(port uint16, table TableIDType) FlowBuilder
ResubmitToTable(table TableIDType) FlowBuilder
CT(commit bool, tableID TableIDType, zone int) CTAction
Drop() FlowBuilder
Output(port int) FlowBuilder
OutputFieldRange(from string, rng Range) FlowBuilder
OutputRegRange(regID int, rng Range) FlowBuilder
OutputInPort() FlowBuilder
SetDstMAC(addr net.HardwareAddr) FlowBuilder
SetSrcMAC(addr net.HardwareAddr) FlowBuilder
SetARPSha(addr net.HardwareAddr) FlowBuilder
SetARPTha(addr net.HardwareAddr) FlowBuilder
SetARPSpa(addr net.IP) FlowBuilder
SetARPTpa(addr net.IP) FlowBuilder
SetSrcIP(addr net.IP) FlowBuilder
SetDstIP(addr net.IP) FlowBuilder
SetTunnelDst(addr net.IP) FlowBuilder
DecTTL() FlowBuilder
Normal() FlowBuilder
Conjunction(conjID uint32, clauseID uint8, nClause uint8) FlowBuilder
}
type FlowBuilder interface {
MatchProtocol(name protocol) FlowBuilder
MatchReg(regID int, data uint32) FlowBuilder
MatchRegRange(regID int, data uint32, rng Range) FlowBuilder
MatchInPort(inPort uint32) FlowBuilder
MatchDstIP(ip net.IP) FlowBuilder
MatchDstIPNet(ipNet net.IPNet) FlowBuilder
MatchSrcIP(ip net.IP) FlowBuilder
MatchSrcIPNet(ipNet net.IPNet) FlowBuilder
MatchDstMAC(mac net.HardwareAddr) FlowBuilder
MatchSrcMAC(mac net.HardwareAddr) FlowBuilder
MatchARPSha(mac net.HardwareAddr) FlowBuilder
MatchARPTha(mac net.HardwareAddr) FlowBuilder
MatchARPSpa(ip net.IP) FlowBuilder
MatchARPTpa(ip net.IP) FlowBuilder
MatchARPOp(op uint16) FlowBuilder
MatchCTStateNew(isSet bool) FlowBuilder
MatchCTStateRel(isSet bool) FlowBuilder
MatchCTStateRpl(isSet bool) FlowBuilder
MatchCTStateEst(isSet bool) FlowBuilder
MatchCTStateTrk(isSet bool) FlowBuilder
MatchCTStateInv(isSet bool) FlowBuilder
MatchCTMark(value uint32) FlowBuilder
MatchConjID(value uint32) FlowBuilder
MatchTCPDstPort(port uint16) FlowBuilder
MatchUDPDstPort(port uint16) FlowBuilder
MatchSCTPDstPort(port uint16) FlowBuilder
Cookie(cookieID uint64) FlowBuilder
Action() Action
Done() Flow
}
type CTAction interface {
LoadToMark(value uint32) CTAction
LoadToLabelRange(value uint64, rng *Range) CTAction
MoveToLabel(fromName string, fromRng, labelRng *Range) CTAction
// NAT action is used if the packet is not committed into the conntrack zone, and is required to leverage the
// original NAT configurations.
NAT() CTAction
// SNAT actions is used to translate the source IP to a specific address or address in a pool when committing the
// packet into the conntrack zone. If a single IP is used as the target address, StartIP and EndIP in the range
// should be the same. portRange could be nil.
SNAT(ipRange *IPRange, portRange *PortRange) CTAction
// DNAT actions is used to translate the destination IP to a specific address or address in a pool when committing
// the packet into the conntrack zone. If a single IP is used as the target address, StartIP and EndIP in the range
// should be the same. portRange could be nil.
DNAT(ipRange *IPRange, portRange *PortRange) CTAction
CTDone() FlowBuilder
}
type ctBase struct {
commit bool
force bool
ctTable uint8
ctZone uint16
}
type IPRange struct {
StartIP net.IP
EndIP net.IP
}
type PortRange struct {
StartPort uint16
EndPort uint16
}
| 1 | 14,735 | I forget the difference between type definition with "=" and without "=" again, but can we unify the style? I believe the difference doesn't apply to TableIDType and GroupIDType whatever it is? | antrea-io-antrea | go |
@@ -49,7 +49,7 @@ return_code parseArguments(int argc,
"Number of threads to use")(
"core,k",
boost::program_options::value<double>(&contractor_config.core_factor)->default_value(1.0),
- "Percentage of the graph (in vertices) to contract [0..1]")(
+ "Percentage of the graph (in vertices) to contract [0..1]. Will always be 1.0")(
"segment-speed-file",
boost::program_options::value<std::vector<std::string>>(
&contractor_config.updater_config.segment_speed_lookup_paths) | 1 | #include "storage/io.hpp"
#include "osrm/contractor.hpp"
#include "osrm/contractor_config.hpp"
#include "osrm/exception.hpp"
#include "util/log.hpp"
#include "util/timezones.hpp"
#include "util/version.hpp"
#include <boost/filesystem.hpp>
#include <boost/program_options.hpp>
#include <boost/program_options/errors.hpp>
#include <tbb/task_scheduler_init.h>
#include <cstdlib>
#include <exception>
#include <new>
#include <ostream>
#include "util/meminfo.hpp"
using namespace osrm;
enum class return_code : unsigned
{
ok,
fail,
exit
};
return_code parseArguments(int argc,
char *argv[],
std::string &verbosity,
contractor::ContractorConfig &contractor_config)
{
// declare a group of options that will be allowed only on command line
boost::program_options::options_description generic_options("Options");
generic_options.add_options()("version,v", "Show version")("help,h", "Show this help message")(
"verbosity,l",
boost::program_options::value<std::string>(&verbosity)->default_value("INFO"),
std::string("Log verbosity level: " + util::LogPolicy::GetLevels()).c_str());
// declare a group of options that will be allowed on command line
boost::program_options::options_description config_options("Configuration");
config_options.add_options()(
"threads,t",
boost::program_options::value<unsigned int>(&contractor_config.requested_num_threads)
->default_value(tbb::task_scheduler_init::default_num_threads()),
"Number of threads to use")(
"core,k",
boost::program_options::value<double>(&contractor_config.core_factor)->default_value(1.0),
"Percentage of the graph (in vertices) to contract [0..1]")(
"segment-speed-file",
boost::program_options::value<std::vector<std::string>>(
&contractor_config.updater_config.segment_speed_lookup_paths)
->composing(),
"Lookup files containing nodeA, nodeB, speed data to adjust edge weights")(
"turn-penalty-file",
boost::program_options::value<std::vector<std::string>>(
&contractor_config.updater_config.turn_penalty_lookup_paths)
->composing(),
"Lookup files containing from_, to_, via_nodes, and turn penalties to adjust turn weights")(
"level-cache,o",
boost::program_options::value<bool>(&contractor_config.use_cached_priority)
->default_value(false),
"Use .level file to retain the contaction level for each node from the last run.")(
"edge-weight-updates-over-factor",
boost::program_options::value<double>(
&contractor_config.updater_config.log_edge_updates_factor)
->default_value(0.0),
"Use with `--segment-speed-file`. Provide an `x` factor, by which Extractor will log edge "
"weights updated by more than this factor")(
"parse-conditionals-from-now",
boost::program_options::value<std::time_t>(&contractor_config.updater_config.valid_now)
->default_value(0),
"Optional for conditional turn restriction parsing, provide a UTC time stamp from "
"which "
"to evaluate the validity of conditional turn restrictions")(
"time-zone-file",
boost::program_options::value<std::string>(&contractor_config.updater_config.tz_file_path),
"Required for conditional turn restriction parsing, provide a geojson file containing "
"time zone boundaries");
// hidden options, will be allowed on command line, but will not be shown to the user
boost::program_options::options_description hidden_options("Hidden options");
hidden_options.add_options()(
"input,i",
boost::program_options::value<boost::filesystem::path>(&contractor_config.base_path),
"Input file in .osm, .osm.bz2 or .osm.pbf format");
// positional option
boost::program_options::positional_options_description positional_options;
positional_options.add("input", 1);
// combine above options for parsing
boost::program_options::options_description cmdline_options;
cmdline_options.add(generic_options).add(config_options).add(hidden_options);
const auto *executable = argv[0];
boost::program_options::options_description visible_options(
"Usage: " + boost::filesystem::path(executable).filename().string() +
" <input.osrm> [options]");
visible_options.add(generic_options).add(config_options);
// parse command line options
boost::program_options::variables_map option_variables;
try
{
boost::program_options::store(boost::program_options::command_line_parser(argc, argv)
.options(cmdline_options)
.positional(positional_options)
.run(),
option_variables);
}
catch (const boost::program_options::error &e)
{
util::Log(logERROR) << e.what();
return return_code::fail;
}
if (option_variables.count("version"))
{
std::cout << OSRM_VERSION << std::endl;
return return_code::exit;
}
if (option_variables.count("help"))
{
std::cout << visible_options;
return return_code::exit;
}
boost::program_options::notify(option_variables);
if (!option_variables.count("input"))
{
std::cout << visible_options;
return return_code::fail;
}
return return_code::ok;
}
int main(int argc, char *argv[]) try
{
util::LogPolicy::GetInstance().Unmute();
std::string verbosity;
contractor::ContractorConfig contractor_config;
const return_code result = parseArguments(argc, argv, verbosity, contractor_config);
if (return_code::fail == result)
{
return EXIT_FAILURE;
}
if (return_code::exit == result)
{
return EXIT_SUCCESS;
}
util::LogPolicy::GetInstance().SetLevel(verbosity);
contractor_config.UseDefaultOutputNames(contractor_config.base_path);
if (1 > contractor_config.requested_num_threads)
{
util::Log(logERROR) << "Number of threads must be 1 or larger";
return EXIT_FAILURE;
}
const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads();
if (recommended_num_threads != contractor_config.requested_num_threads)
{
util::Log(logWARNING) << "The recommended number of threads is " << recommended_num_threads
<< "! This setting may have performance side-effects.";
}
if (!contractor_config.IsValid())
{
return EXIT_FAILURE;
}
util::Log() << "Input file: " << contractor_config.base_path.string() << ".osrm";
util::Log() << "Threads: " << contractor_config.requested_num_threads;
tbb::task_scheduler_init init(contractor_config.requested_num_threads);
osrm::contract(contractor_config);
util::DumpSTXXLStats();
util::DumpMemoryStats();
return EXIT_SUCCESS;
}
catch (const osrm::RuntimeError &e)
{
util::DumpSTXXLStats();
util::DumpMemoryStats();
util::Log(logERROR) << e.what();
return e.GetCode();
}
catch (const std::bad_alloc &e)
{
util::DumpSTXXLStats();
util::DumpMemoryStats();
util::Log(logERROR) << e.what();
util::Log(logERROR) << "Please provide more memory or consider using a larger swapfile";
return EXIT_FAILURE;
}
#ifdef _WIN32
catch (const std::exception &e)
{
util::Log(logERROR) << "[exception] " << e.what();
return EXIT_FAILURE;
}
#endif
| 1 | 22,948 | would be `DEPRECATED Percentage of the graph (in vertices) to contract [0..1]` better? | Project-OSRM-osrm-backend | cpp |
@@ -318,4 +318,11 @@ public interface DriverCommand {
// Mobile API
String GET_NETWORK_CONNECTION = "getNetworkConnection";
String SET_NETWORK_CONNECTION = "setNetworkConnection";
+
+ // Cast Media Router API
+ String GET_CAST_SINKS = "getCastSinks";
+ String SET_CAST_SINK_TO_USE = "selectCastSink";
+ String START_CAST_TAB_MIRRORING = "startCastTabMirroring";
+ String GET_CAST_ISSUE_MESSAGE = "getCastIssueMessage";
+ String STOP_CASTING = "stopCasting";
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
import com.google.common.collect.ImmutableMap;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.Cookie;
import org.openqa.selenium.Dimension;
import org.openqa.selenium.Point;
import org.openqa.selenium.WindowType;
import org.openqa.selenium.interactions.Sequence;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* An empty interface defining constants for the standard commands defined in the WebDriver JSON
* wire protocol.
*
* @author [email protected] (Jason Leyba)
*/
public interface DriverCommand {
String GET_ALL_SESSIONS = "getAllSessions";
String GET_CAPABILITIES = "getCapabilities";
String NEW_SESSION = "newSession";
static CommandPayload NEW_SESSION(Capabilities capabilities) {
return new CommandPayload(NEW_SESSION, ImmutableMap.of("desiredCapabilities", capabilities));
}
String STATUS = "status";
String CLOSE = "close";
String QUIT = "quit";
String GET = "get";
static CommandPayload GET(String url) {
return new CommandPayload(GET, ImmutableMap.of("url", url));
}
String GO_BACK = "goBack";
String GO_FORWARD = "goForward";
String REFRESH = "refresh";
String ADD_COOKIE = "addCookie";
static CommandPayload ADD_COOKIE(Cookie cookie) {
return new CommandPayload(ADD_COOKIE, ImmutableMap.of("cookie", cookie));
}
String GET_ALL_COOKIES = "getCookies";
String GET_COOKIE = "getCookie";
String DELETE_COOKIE = "deleteCookie";
static CommandPayload DELETE_COOKIE(String name) {
return new CommandPayload(DELETE_COOKIE, ImmutableMap.of("name", name));
}
String DELETE_ALL_COOKIES = "deleteAllCookies";
String FIND_ELEMENT = "findElement";
static CommandPayload FIND_ELEMENT(String strategy, String value) {
return new CommandPayload(FIND_ELEMENT, ImmutableMap.of("using", strategy, "value", value));
}
String FIND_ELEMENTS = "findElements";
static CommandPayload FIND_ELEMENTS(String strategy, String value) {
return new CommandPayload(FIND_ELEMENTS, ImmutableMap.of("using", strategy, "value", value));
}
String FIND_CHILD_ELEMENT = "findChildElement";
static CommandPayload FIND_CHILD_ELEMENT(String id, String strategy, String value) {
return new CommandPayload(FIND_CHILD_ELEMENT,
ImmutableMap.of("id", id, "using", strategy, "value", value));
}
String FIND_CHILD_ELEMENTS = "findChildElements";
static CommandPayload FIND_CHILD_ELEMENTS(String id, String strategy, String value) {
return new CommandPayload(FIND_CHILD_ELEMENTS,
ImmutableMap.of("id", id, "using", strategy, "value", value));
}
String CLEAR_ELEMENT = "clearElement";
static CommandPayload CLEAR_ELEMENT(String id) {
return new CommandPayload(CLEAR_ELEMENT, ImmutableMap.of("id", id));
}
String CLICK_ELEMENT = "clickElement";
static CommandPayload CLICK_ELEMENT(String id) {
return new CommandPayload(CLICK_ELEMENT, ImmutableMap.of("id", id));
}
String SEND_KEYS_TO_ELEMENT = "sendKeysToElement";
static CommandPayload SEND_KEYS_TO_ELEMENT(String id, CharSequence[] keysToSend) {
return new CommandPayload(SEND_KEYS_TO_ELEMENT, ImmutableMap.of("id", id, "value", keysToSend));
}
String SEND_KEYS_TO_ACTIVE_ELEMENT = "sendKeysToActiveElement";
String SUBMIT_ELEMENT = "submitElement";
static CommandPayload SUBMIT_ELEMENT(String id) {
return new CommandPayload(SUBMIT_ELEMENT, ImmutableMap.of("id", id));
}
String UPLOAD_FILE = "uploadFile";
static CommandPayload UPLOAD_FILE(String file) {
return new CommandPayload(UPLOAD_FILE, ImmutableMap.of("file", file));
}
String GET_CURRENT_WINDOW_HANDLE = "getCurrentWindowHandle";
String GET_WINDOW_HANDLES = "getWindowHandles";
String GET_CURRENT_CONTEXT_HANDLE = "getCurrentContextHandle";
String GET_CONTEXT_HANDLES = "getContextHandles";
String SWITCH_TO_WINDOW = "switchToWindow";
static CommandPayload SWITCH_TO_WINDOW(String windowHandleOrName) {
return new CommandPayload(SWITCH_TO_WINDOW, ImmutableMap.of("handle", windowHandleOrName));
}
String SWITCH_TO_NEW_WINDOW = "newWindow";
static CommandPayload SWITCH_TO_NEW_WINDOW(WindowType typeHint) {
return new CommandPayload(SWITCH_TO_NEW_WINDOW, ImmutableMap.of("type", typeHint.toString()));
}
String SWITCH_TO_CONTEXT = "switchToContext";
String SWITCH_TO_FRAME = "switchToFrame";
static CommandPayload SWITCH_TO_FRAME(Object frame) {
return new CommandPayload(SWITCH_TO_FRAME, Collections.singletonMap("id", frame));
}
String SWITCH_TO_PARENT_FRAME = "switchToParentFrame";
String GET_ACTIVE_ELEMENT = "getActiveElement";
String GET_CURRENT_URL = "getCurrentUrl";
String GET_PAGE_SOURCE = "getPageSource";
String GET_TITLE = "getTitle";
String EXECUTE_SCRIPT = "executeScript";
static CommandPayload EXECUTE_SCRIPT(String script, List<Object> args) {
return new CommandPayload(EXECUTE_SCRIPT, ImmutableMap.of("script", script, "args", args));
}
String EXECUTE_ASYNC_SCRIPT = "executeAsyncScript";
static CommandPayload EXECUTE_ASYNC_SCRIPT(String script, List<Object> args) {
return new CommandPayload(EXECUTE_ASYNC_SCRIPT, ImmutableMap.of("script", script, "args", args));
}
String GET_ELEMENT_TEXT = "getElementText";
static CommandPayload GET_ELEMENT_TEXT(String id) {
return new CommandPayload(GET_ELEMENT_TEXT, ImmutableMap.of("id", id));
}
String GET_ELEMENT_TAG_NAME = "getElementTagName";
static CommandPayload GET_ELEMENT_TAG_NAME(String id) {
return new CommandPayload(GET_ELEMENT_TAG_NAME, ImmutableMap.of("id", id));
}
String IS_ELEMENT_SELECTED = "isElementSelected";
static CommandPayload IS_ELEMENT_SELECTED(String id) {
return new CommandPayload(IS_ELEMENT_SELECTED, ImmutableMap.of("id", id));
}
String IS_ELEMENT_ENABLED = "isElementEnabled";
static CommandPayload IS_ELEMENT_ENABLED(String id) {
return new CommandPayload(IS_ELEMENT_ENABLED, ImmutableMap.of("id", id));
}
String IS_ELEMENT_DISPLAYED = "isElementDisplayed";
static CommandPayload IS_ELEMENT_DISPLAYED(String id) {
return new CommandPayload(IS_ELEMENT_DISPLAYED, ImmutableMap.of("id", id));
}
String GET_ELEMENT_RECT = "getElementRect";
static CommandPayload GET_ELEMENT_RECT(String id) {
return new CommandPayload(GET_ELEMENT_RECT, ImmutableMap.of("id", id));
}
String GET_ELEMENT_LOCATION = "getElementLocation";
static CommandPayload GET_ELEMENT_LOCATION(String id) {
return new CommandPayload(GET_ELEMENT_LOCATION, ImmutableMap.of("id", id));
}
String GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW = "getElementLocationOnceScrolledIntoView";
static CommandPayload GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW(String id) {
return new CommandPayload(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW, ImmutableMap.of("id", id));
}
String GET_ELEMENT_SIZE = "getElementSize";
static CommandPayload GET_ELEMENT_SIZE(String id) {
return new CommandPayload(GET_ELEMENT_SIZE, ImmutableMap.of("id", id));
}
String GET_ELEMENT_ATTRIBUTE = "getElementAttribute";
static CommandPayload GET_ELEMENT_ATTRIBUTE(String id, String name) {
return new CommandPayload(GET_ELEMENT_ATTRIBUTE, ImmutableMap.of("id", id, "name", name));
}
String GET_ELEMENT_PROPERTY = "getElementProperty";
String GET_ELEMENT_VALUE_OF_CSS_PROPERTY = "getElementValueOfCssProperty";
static CommandPayload GET_ELEMENT_VALUE_OF_CSS_PROPERTY(String id, String name) {
return new CommandPayload(GET_ELEMENT_VALUE_OF_CSS_PROPERTY, ImmutableMap.of("id", id, "propertyName", name));
}
String ELEMENT_EQUALS = "elementEquals";
String SCREENSHOT = "screenshot";
String ELEMENT_SCREENSHOT = "elementScreenshot";
static CommandPayload ELEMENT_SCREENSHOT(String id) {
return new CommandPayload(ELEMENT_SCREENSHOT, ImmutableMap.of("id", id));
}
String ACCEPT_ALERT = "acceptAlert";
String DISMISS_ALERT = "dismissAlert";
String GET_ALERT_TEXT = "getAlertText";
String SET_ALERT_VALUE = "setAlertValue";
static CommandPayload SET_ALERT_VALUE(String keysToSend) {
return new CommandPayload(SET_ALERT_VALUE, ImmutableMap.of("text", keysToSend));
}
String SET_ALERT_CREDENTIALS = "setAlertCredentials";
String SET_TIMEOUT = "setTimeout";
static CommandPayload SET_IMPLICIT_WAIT_TIMEOUT(long time, TimeUnit unit) {
return new CommandPayload(
SET_TIMEOUT, ImmutableMap.of("implicit", TimeUnit.MILLISECONDS.convert(time, unit)));
}
static CommandPayload SET_SCRIPT_TIMEOUT(long time, TimeUnit unit) {
return new CommandPayload(
SET_TIMEOUT, ImmutableMap.of("script", TimeUnit.MILLISECONDS.convert(time, unit)));
}
static CommandPayload SET_PAGE_LOAD_TIMEOUT(long time, TimeUnit unit) {
return new CommandPayload(
SET_TIMEOUT, ImmutableMap.of("pageLoad", TimeUnit.MILLISECONDS.convert(time, unit)));
}
String IMPLICITLY_WAIT = "implicitlyWait";
String SET_SCRIPT_TIMEOUT = "setScriptTimeout";
String GET_LOCATION = "getLocation";
String SET_LOCATION = "setLocation";
String GET_APP_CACHE = "getAppCache";
String GET_APP_CACHE_STATUS = "getStatus";
String CLEAR_APP_CACHE = "clearAppCache";
String IS_BROWSER_ONLINE = "isBrowserOnline";
String SET_BROWSER_ONLINE = "setBrowserOnline";
String GET_LOCAL_STORAGE_ITEM = "getLocalStorageItem";
String GET_LOCAL_STORAGE_KEYS = "getLocalStorageKeys";
String SET_LOCAL_STORAGE_ITEM = "setLocalStorageItem";
String REMOVE_LOCAL_STORAGE_ITEM = "removeLocalStorageItem";
String CLEAR_LOCAL_STORAGE = "clearLocalStorage";
String GET_LOCAL_STORAGE_SIZE = "getLocalStorageSize";
String GET_SESSION_STORAGE_ITEM = "getSessionStorageItem";
String GET_SESSION_STORAGE_KEYS = "getSessionStorageKey";
String SET_SESSION_STORAGE_ITEM = "setSessionStorageItem";
String REMOVE_SESSION_STORAGE_ITEM = "removeSessionStorageItem";
String CLEAR_SESSION_STORAGE = "clearSessionStorage";
String GET_SESSION_STORAGE_SIZE = "getSessionStorageSize";
String SET_SCREEN_ORIENTATION = "setScreenOrientation";
String GET_SCREEN_ORIENTATION = "getScreenOrientation";
String SET_SCREEN_ROTATION = "setScreenRotation";
String GET_SCREEN_ROTATION = "getScreenRotation";
// W3C Actions APIs
String ACTIONS = "actions";
static CommandPayload ACTIONS(Collection<Sequence> actions) {
return new CommandPayload(ACTIONS, ImmutableMap.of("actions", actions));
}
String CLEAR_ACTIONS_STATE = "clearActionState";
// These belong to the Advanced user interactions - an element is
// optional for these commands.
String CLICK = "mouseClick";
String DOUBLE_CLICK = "mouseDoubleClick";
String MOUSE_DOWN = "mouseButtonDown";
String MOUSE_UP = "mouseButtonUp";
String MOVE_TO = "mouseMoveTo";
// Those allow interactions with the Input Methods installed on
// the system.
String IME_GET_AVAILABLE_ENGINES = "imeGetAvailableEngines";
String IME_GET_ACTIVE_ENGINE = "imeGetActiveEngine";
String IME_IS_ACTIVATED = "imeIsActivated";
String IME_DEACTIVATE = "imeDeactivate";
String IME_ACTIVATE_ENGINE = "imeActivateEngine";
static CommandPayload IME_ACTIVATE_ENGINE(String engine) {
return new CommandPayload(SET_ALERT_VALUE, ImmutableMap.of("engine", engine));
}
// These belong to the Advanced Touch API
String TOUCH_SINGLE_TAP = "touchSingleTap";
String TOUCH_DOWN = "touchDown";
String TOUCH_UP = "touchUp";
String TOUCH_MOVE = "touchMove";
String TOUCH_SCROLL = "touchScroll";
String TOUCH_DOUBLE_TAP = "touchDoubleTap";
String TOUCH_LONG_PRESS = "touchLongPress";
String TOUCH_FLICK = "touchFlick";
// Window API
String SET_CURRENT_WINDOW_POSITION = "setWindowPosition";
static CommandPayload SET_CURRENT_WINDOW_POSITION(Point targetPosition) {
return new CommandPayload(
SET_CURRENT_WINDOW_POSITION, ImmutableMap.of("x", targetPosition.x, "y", targetPosition.y));
}
String GET_CURRENT_WINDOW_POSITION = "getWindowPosition";
static CommandPayload GET_CURRENT_WINDOW_POSITION() {
return new CommandPayload(
GET_CURRENT_WINDOW_POSITION, ImmutableMap.of("windowHandle", "current"));
}
// W3C compatible Window API
String SET_CURRENT_WINDOW_SIZE = "setCurrentWindowSize";
static CommandPayload SET_CURRENT_WINDOW_SIZE(Dimension targetSize) {
return new CommandPayload(
SET_CURRENT_WINDOW_SIZE, ImmutableMap.of("width", targetSize.width, "height", targetSize.height));
}
String GET_CURRENT_WINDOW_SIZE = "getCurrentWindowSize";
String MAXIMIZE_CURRENT_WINDOW = "maximizeCurrentWindow";
String FULLSCREEN_CURRENT_WINDOW = "fullscreenCurrentWindow";
// Logging API
String GET_AVAILABLE_LOG_TYPES = "getAvailableLogTypes";
String GET_LOG = "getLog";
String GET_SESSION_LOGS = "getSessionLogs";
// Mobile API
String GET_NETWORK_CONNECTION = "getNetworkConnection";
String SET_NETWORK_CONNECTION = "setNetworkConnection";
}
| 1 | 16,690 | These command names are specific to Chromium-based browsers. Please move to `ChromiumDriverCommand` | SeleniumHQ-selenium | py |
@@ -108,7 +108,7 @@ namespace OpenTelemetry.Exporter.Zipkin
}
string serviceName = null;
- Dictionary<string, object> tags = null;
+ Dictionary<string, object> tags = new Dictionary<string, object>();
foreach (var label in resource.Attributes)
{
string key = label.Key; | 1 | // <copyright file="ZipkinExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Net;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Net.Sockets;
#if NET452
using Newtonsoft.Json;
#else
using System.Text.Json;
#endif
using System.Threading;
using System.Threading.Tasks;
using OpenTelemetry.Exporter.Zipkin.Implementation;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Exporter.Zipkin
{
/// <summary>
/// Zipkin exporter.
/// </summary>
public class ZipkinExporter : BaseExporter<Activity>
{
private readonly ZipkinExporterOptions options;
#if !NET452
private readonly int maxPayloadSizeInBytes;
#endif
private readonly HttpClient httpClient;
/// <summary>
/// Initializes a new instance of the <see cref="ZipkinExporter"/> class.
/// </summary>
/// <param name="options">Configuration options.</param>
/// <param name="client">Http client to use to upload telemetry.</param>
public ZipkinExporter(ZipkinExporterOptions options, HttpClient client = null)
{
this.options = options ?? throw new ArgumentNullException(nameof(options));
#if !NET452
this.maxPayloadSizeInBytes = (!options.MaxPayloadSizeInBytes.HasValue || options.MaxPayloadSizeInBytes <= 0) ? ZipkinExporterOptions.DefaultMaxPayloadSizeInBytes : options.MaxPayloadSizeInBytes.Value;
#endif
this.httpClient = client ?? new HttpClient();
}
internal ZipkinEndpoint LocalEndpoint { get; private set; }
/// <inheritdoc/>
public override ExportResult Export(in Batch<Activity> batch)
{
if (this.LocalEndpoint == null)
{
this.SetLocalEndpointFromResource(this.ParentProvider.GetResource());
}
// Prevent Zipkin's HTTP operations from being instrumented.
using var scope = SuppressInstrumentationScope.Begin();
try
{
var requestUri = this.options.Endpoint;
using var request = new HttpRequestMessage(HttpMethod.Post, requestUri)
{
Content = new JsonContent(this, batch),
};
using var response = this.httpClient.SendAsync(request, CancellationToken.None).GetAwaiter().GetResult();
response.EnsureSuccessStatusCode();
return ExportResult.Success;
}
catch (Exception ex)
{
ZipkinExporterEventSource.Log.FailedExport(ex);
return ExportResult.Failure;
}
}
internal void SetLocalEndpointFromResource(Resource resource)
{
var hostName = ResolveHostName();
string ipv4 = null;
string ipv6 = null;
if (!string.IsNullOrEmpty(hostName))
{
ipv4 = ResolveHostAddress(hostName, AddressFamily.InterNetwork);
ipv6 = ResolveHostAddress(hostName, AddressFamily.InterNetworkV6);
}
string serviceName = null;
Dictionary<string, object> tags = null;
foreach (var label in resource.Attributes)
{
string key = label.Key;
switch (key)
{
case ResourceSemanticConventions.AttributeServiceName:
serviceName = label.Value as string;
continue;
}
if (tags == null)
{
tags = new Dictionary<string, object>();
}
tags[key] = label.Value;
}
if (string.IsNullOrEmpty(serviceName))
{
serviceName = this.options.ServiceName;
}
this.LocalEndpoint = new ZipkinEndpoint(
serviceName,
ipv4,
ipv6,
port: null,
tags);
}
private static string ResolveHostAddress(string hostName, AddressFamily family)
{
string result = null;
try
{
var results = Dns.GetHostAddresses(hostName);
if (results != null && results.Length > 0)
{
foreach (var addr in results)
{
if (addr.AddressFamily.Equals(family))
{
var sanitizedAddress = new IPAddress(addr.GetAddressBytes()); // Construct address sans ScopeID
result = sanitizedAddress.ToString();
break;
}
}
}
}
catch (Exception)
{
// Ignore
}
return result;
}
private static string ResolveHostName()
{
string result = null;
try
{
result = Dns.GetHostName();
if (!string.IsNullOrEmpty(result))
{
var response = Dns.GetHostEntry(result);
if (response != null)
{
return response.HostName;
}
}
}
catch (Exception)
{
// Ignore
}
return result;
}
private class JsonContent : HttpContent
{
private static readonly MediaTypeHeaderValue JsonHeader = new MediaTypeHeaderValue("application/json")
{
CharSet = "utf-8",
};
private readonly ZipkinExporter exporter;
private readonly Batch<Activity> batch;
#if NET452
private JsonWriter writer;
#else
private Utf8JsonWriter writer;
#endif
public JsonContent(ZipkinExporter exporter, in Batch<Activity> batch)
{
this.exporter = exporter;
this.batch = batch;
this.Headers.ContentType = JsonHeader;
}
protected override Task SerializeToStreamAsync(Stream stream, TransportContext context)
{
#if NET452
StreamWriter streamWriter = new StreamWriter(stream);
this.writer = new JsonTextWriter(streamWriter);
#else
if (this.writer == null)
{
this.writer = new Utf8JsonWriter(stream);
}
else
{
this.writer.Reset(stream);
}
#endif
this.writer.WriteStartArray();
foreach (var activity in this.batch)
{
var zipkinSpan = activity.ToZipkinSpan(this.exporter.LocalEndpoint, this.exporter.options.UseShortTraceIds);
zipkinSpan.Write(this.writer);
zipkinSpan.Return();
#if !NET452
if (this.writer.BytesPending >= this.exporter.maxPayloadSizeInBytes)
{
this.writer.Flush();
}
#endif
}
this.writer.WriteEndArray();
this.writer.Flush();
#if NET452
return Task.FromResult(true);
#else
return Task.CompletedTask;
#endif
}
protected override bool TryComputeLength(out long length)
{
// We can't know the length of the content being pushed to the output stream.
length = -1;
return false;
}
}
}
}
| 1 | 18,686 | if we won't use, should we remove this? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -230,7 +230,10 @@ namespace MvvmCross.Platforms.Android.Core
protected virtual IDictionary<string, string> ViewNamespaceAbbreviations => new Dictionary<string, string>
{
- { "Mvx", "MvvmCross.Platforms.Android.Views" }
+ { "Mvx", "mvvmcross.platforms.android.views"
+ },
+ { "Mvx", "mvvmcross.platforms.android.binding.views"
+ }
};
protected virtual IEnumerable<string> ViewNamespaces => new List<string> | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using System;
using System.Collections.Generic;
using System.Reflection;
using Android.Content;
using Android.Views;
using MvvmCross.Converters;
using MvvmCross.Exceptions;
using MvvmCross.IoC;
using MvvmCross.Binding;
using MvvmCross.Binding.Binders;
using MvvmCross.Binding.BindingContext;
using MvvmCross.Binding.Bindings.Target.Construction;
using MvvmCross.Core;
using MvvmCross.Platforms.Android.Binding;
using MvvmCross.Platforms.Android.Binding.Binders.ViewTypeResolvers;
using MvvmCross.Platforms.Android.Binding.Views;
using MvvmCross.Platforms.Android.Presenters;
using MvvmCross.Platforms.Android.Views;
using MvvmCross.ViewModels;
using MvvmCross.Views;
using MvvmCross.Presenters;
using System.Linq;
namespace MvvmCross.Platforms.Android.Core
{
public abstract class MvxAndroidSetup
: MvxSetup, IMvxAndroidGlobals, IMvxAndroidSetup
{
private Context _applicationContext;
private IMvxAndroidViewPresenter _presenter;
public void PlatformInitialize(Context applicationContext)
{
_applicationContext = applicationContext;
}
public virtual Assembly ExecutableAssembly => ViewAssemblies?.FirstOrDefault() ?? GetType().Assembly;
public Context ApplicationContext => _applicationContext;
protected override void InitializePlatformServices()
{
InitializeLifetimeMonitor();
InitializeAndroidCurrentTopActivity();
RegisterPresenter();
Mvx.IoCProvider.RegisterSingleton<IMvxAndroidGlobals>(this);
var intentResultRouter = new MvxIntentResultSink();
Mvx.IoCProvider.RegisterSingleton<IMvxIntentResultSink>(intentResultRouter);
Mvx.IoCProvider.RegisterSingleton<IMvxIntentResultSource>(intentResultRouter);
var viewModelTemporaryCache = new MvxSingleViewModelCache();
Mvx.IoCProvider.RegisterSingleton<IMvxSingleViewModelCache>(viewModelTemporaryCache);
var viewModelMultiTemporaryCache = new MvxMultipleViewModelCache();
Mvx.IoCProvider.RegisterSingleton<IMvxMultipleViewModelCache>(viewModelMultiTemporaryCache);
base.InitializePlatformServices();
}
protected virtual void InitializeAndroidCurrentTopActivity()
{
var currentTopActivity = CreateAndroidCurrentTopActivity();
Mvx.IoCProvider.RegisterSingleton<IMvxAndroidCurrentTopActivity>(currentTopActivity);
}
protected virtual IMvxAndroidCurrentTopActivity CreateAndroidCurrentTopActivity()
{
var mvxApplication = MvxAndroidApplication.Instance;
if (mvxApplication != null)
{
var activityLifecycleCallbacksManager = new MvxApplicationCallbacksCurrentTopActivity();
mvxApplication.RegisterActivityLifecycleCallbacks(activityLifecycleCallbacksManager);
return activityLifecycleCallbacksManager;
}
else
{
return new MvxLifecycleMonitorCurrentTopActivity(Mvx.IoCProvider.GetSingleton<IMvxAndroidActivityLifetimeListener>());
}
}
protected virtual void InitializeLifetimeMonitor()
{
var lifetimeMonitor = CreateLifetimeMonitor();
Mvx.IoCProvider.RegisterSingleton<IMvxAndroidActivityLifetimeListener>(lifetimeMonitor);
Mvx.IoCProvider.RegisterSingleton<IMvxLifetime>(lifetimeMonitor);
}
protected virtual MvxAndroidLifetimeMonitor CreateLifetimeMonitor()
{
return new MvxAndroidLifetimeMonitor();
}
protected virtual void InitializeSavedStateConverter()
{
var converter = CreateSavedStateConverter();
Mvx.IoCProvider.RegisterSingleton(converter);
}
protected virtual IMvxSavedStateConverter CreateSavedStateConverter()
{
return new MvxSavedStateConverter();
}
protected sealed override IMvxViewsContainer CreateViewsContainer()
{
var container = CreateViewsContainer(_applicationContext);
Mvx.IoCProvider.RegisterSingleton<IMvxAndroidViewModelRequestTranslator>(container);
Mvx.IoCProvider.RegisterSingleton<IMvxAndroidViewModelLoader>(container);
var viewsContainer = container as MvxViewsContainer;
if (viewsContainer == null)
throw new MvxException("CreateViewsContainer must return an MvxViewsContainer");
return viewsContainer;
}
protected IMvxAndroidViewPresenter Presenter
{
get
{
_presenter = _presenter ?? CreateViewPresenter();
return _presenter;
}
}
protected virtual IMvxAndroidViewPresenter CreateViewPresenter()
{
return new MvxAndroidViewPresenter(AndroidViewAssemblies);
}
protected override IMvxViewDispatcher CreateViewDispatcher()
{
return new MvxAndroidViewDispatcher(Presenter);
}
protected virtual void RegisterPresenter()
{
var presenter = Presenter;
Mvx.IoCProvider.RegisterSingleton(presenter);
Mvx.IoCProvider.RegisterSingleton<IMvxViewPresenter>(presenter);
}
protected override void InitializeLastChance()
{
InitializeSavedStateConverter();
InitializeBindingBuilder();
base.InitializeLastChance();
}
protected virtual IMvxAndroidViewsContainer CreateViewsContainer(Context applicationContext)
{
return new MvxAndroidViewsContainer(applicationContext);
}
protected virtual void InitializeBindingBuilder()
{
var bindingBuilder = CreateBindingBuilder();
RegisterBindingBuilderCallbacks();
bindingBuilder.DoRegistration();
}
protected virtual void RegisterBindingBuilderCallbacks()
{
Mvx.IoCProvider.CallbackWhenRegistered<IMvxValueConverterRegistry>(FillValueConverters);
Mvx.IoCProvider.CallbackWhenRegistered<IMvxTargetBindingFactoryRegistry>(FillTargetFactories);
Mvx.IoCProvider.CallbackWhenRegistered<IMvxBindingNameRegistry>(FillBindingNames);
Mvx.IoCProvider.CallbackWhenRegistered<IMvxTypeCache<View>>(FillViewTypes);
Mvx.IoCProvider.CallbackWhenRegistered<IMvxAxmlNameViewTypeResolver>(FillAxmlViewTypeResolver);
Mvx.IoCProvider.CallbackWhenRegistered<IMvxNamespaceListViewTypeResolver>(FillNamespaceListViewTypeResolver);
}
protected virtual MvxBindingBuilder CreateBindingBuilder()
{
var bindingBuilder = new MvxAndroidBindingBuilder();
return bindingBuilder;
}
protected virtual void FillViewTypes(IMvxTypeCache<View> cache)
{
foreach (var assembly in AndroidViewAssemblies)
{
cache.AddAssembly(assembly);
}
}
protected virtual void FillBindingNames(IMvxBindingNameRegistry registry)
{
// this base class does nothing
}
protected virtual void FillAxmlViewTypeResolver(IMvxAxmlNameViewTypeResolver viewTypeResolver)
{
foreach (var kvp in ViewNamespaceAbbreviations)
{
viewTypeResolver.ViewNamespaceAbbreviations[kvp.Key] = kvp.Value;
}
}
protected virtual void FillNamespaceListViewTypeResolver(IMvxNamespaceListViewTypeResolver viewTypeResolver)
{
foreach (var viewNamespace in ViewNamespaces)
{
viewTypeResolver.Add(viewNamespace);
}
}
protected virtual void FillValueConverters(IMvxValueConverterRegistry registry)
{
registry.Fill(ValueConverterAssemblies);
registry.Fill(ValueConverterHolders);
}
protected virtual IEnumerable<Type> ValueConverterHolders => new List<Type>();
protected virtual IEnumerable<Assembly> ValueConverterAssemblies
{
get
{
var toReturn = new List<Assembly>();
toReturn.AddRange(GetViewModelAssemblies());
toReturn.AddRange(GetViewAssemblies());
return toReturn;
}
}
protected virtual IDictionary<string, string> ViewNamespaceAbbreviations => new Dictionary<string, string>
{
{ "Mvx", "MvvmCross.Platforms.Android.Views" }
};
protected virtual IEnumerable<string> ViewNamespaces => new List<string>
{
"Android.Views",
"Android.Widget",
"Android.Webkit",
"MvvmCross.Platforms.Android.Views",
};
protected virtual IEnumerable<Assembly> AndroidViewAssemblies => new List<Assembly>()
{
typeof(View).Assembly,
typeof(MvxDatePicker).Assembly,
GetType().Assembly,
};
protected virtual void FillTargetFactories(IMvxTargetBindingFactoryRegistry registry)
{
// nothing to do in this base class
}
protected override IMvxNameMapping CreateViewToViewModelNaming()
{
return new MvxPostfixAwareViewToViewModelNameMapping("View", "Activity", "Fragment");
}
}
public class MvxAndroidSetup<TApplication> : MvxAndroidSetup
where TApplication : class, IMvxApplication, new()
{
protected override IMvxApplication CreateApp() => Mvx.IoCProvider.IoCConstruct<TApplication>();
public override IEnumerable<Assembly> GetViewModelAssemblies()
{
return new[] { typeof(TApplication).GetTypeInfo().Assembly };
}
}
}
| 1 | 14,958 | Having 2 items with the same key (Mvx) won't work in a dictionary | MvvmCross-MvvmCross | .cs |
@@ -0,0 +1,17 @@
+class CouponsController < ApplicationController
+ def show
+ if coupon.valid?
+ session[:coupon] = coupon.code
+ else
+ flash[:notice] = "The coupon code you supplied is not valid."
+ end
+
+ redirect_to root_path
+ end
+
+ private
+
+ def coupon
+ @coupon ||= Coupon.new(params[:id])
+ end
+end | 1 | 1 | 13,797 | @cpytel how does the flow work right now? I expect to go to the sign up as customer page after putting in my code, but it goes to the longer landing page? | thoughtbot-upcase | rb |
|
@@ -75,10 +75,10 @@ void show_message(
//
switch (priority) {
case MSG_INTERNAL_ERROR:
- snprintf(event_msg, sizeof(event_msg), "[error] %s", message);
+ snprintf(event_msg, sizeof(event_msg), "[error] %.512s", message);
break;
case MSG_SCHEDULER_ALERT:
- snprintf(event_msg, sizeof(event_msg), "%s: %s",
+ snprintf(event_msg, sizeof(event_msg), "%.64s: %.512s",
_("Message from server"), message
);
break; | 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
#ifdef _WIN32
#include "boinc_win.h"
#define snprintf _snprintf
#else
#include "config.h"
#include <cstdarg>
#include <cstring>
#include <deque>
#endif
#include "str_util.h"
using std::deque;
#include "diagnostics.h"
#include "log_flags.h"
#include "str_replace.h"
#include "client_types.h"
#include "client_state.h"
#include "cs_notice.h"
#include "main.h"
#include "client_msgs.h"
MESSAGE_DESCS message_descs;
#ifdef SIM
extern void show_message(
PROJ_AM *p, char* msg, int priority, bool is_html, const char* link
);
#else
// Show a message:
// 1) As a MESSAGE_DESC (for GUI event log)
// 2) As a NOTICE, if high priority (for GUI notices)
// 3) write to log file (stdoutdae.txt)
//
void show_message(
PROJ_AM *p, char* msg, int priority, bool is_html, const char* link
) {
const char* x;
char message[1024], event_msg[1024], evt_message[2048];
double t = dtime();
char* time_string = time_to_string(t);
// Cycle the log files if needed
//
diagnostics_cycle_logs();
strlcpy(message, msg, sizeof(message));
// trim trailing \n's
//
while (strlen(message) && message[strlen(message)-1] == '\n') {
message[strlen(message)-1] = 0;
}
// add a message
//
switch (priority) {
case MSG_INTERNAL_ERROR:
snprintf(event_msg, sizeof(event_msg), "[error] %s", message);
break;
case MSG_SCHEDULER_ALERT:
snprintf(event_msg, sizeof(event_msg), "%s: %s",
_("Message from server"), message
);
break;
default:
strlcpy(event_msg, message, sizeof(event_msg));
}
message_descs.insert(p, priority, (int)t, event_msg);
// add a notice
//
switch (priority) {
case MSG_USER_ALERT:
case MSG_SCHEDULER_ALERT:
char buf[1024];
if (is_html) {
safe_strcpy(buf, message);
} else {
xml_escape(message, buf, sizeof(message));
}
NOTICE n;
n.description = buf;
if (link) {
safe_strcpy(n.link, link);
}
if (p) {
safe_strcpy(n.project_name, p->get_project_name());
}
n.create_time = n.arrival_time = t;
safe_strcpy(n.category, (priority==MSG_USER_ALERT)?"client":"scheduler");
notices.append(n);
}
strip_translation(message);
if (p) {
x = p->get_project_name();
} else {
x = "---";
}
// Construct message to be logged/displayed
snprintf(evt_message, sizeof(evt_message), "%s [%s] %s\n", time_string, x, message);
// print message to the console
printf("%s", evt_message);
#ifdef _WIN32
// MSVCRT doesn't support line buffered streams
fflush(stdout);
#endif
// print message to the debugger view port
diagnostics_trace_to_debugger(evt_message);
}
#endif
// Takes a printf style formatted string, inserts the proper values,
// and passes it to show_message
//
void msg_printf(PROJ_AM *p, int priority, const char *fmt, ...) {
char buf[8192]; // output can be much longer than format
va_list ap;
if (fmt == NULL) return;
va_start(ap, fmt);
vsnprintf(buf, sizeof(buf), fmt, ap);
buf[sizeof(buf)-1] = 0;
va_end(ap);
show_message(p, buf, priority, true, 0);
}
void msg_printf_notice(PROJ_AM *p, bool is_html, const char* link, const char *fmt, ...) {
char buf[8192]; // output can be much longer than format
va_list ap;
if (fmt == NULL) return;
va_start(ap, fmt);
vsnprintf(buf, sizeof(buf), fmt, ap);
buf[sizeof(buf)-1] = 0;
va_end(ap);
show_message(p, buf, MSG_USER_ALERT, is_html, link);
}
// handle new message:
// add to cache, and delete old messages if cache too big.
// If high priority, create a notice.
//
void MESSAGE_DESCS::insert(PROJ_AM* p, int priority, int now, char* message) {
MESSAGE_DESC* mdp = new MESSAGE_DESC;
static int seqno = 1;
if (p) {
strlcpy(
mdp->project_name, p->get_project_name(), sizeof(mdp->project_name)
);
} else {
safe_strcpy(mdp->project_name, "");
}
mdp->priority = (priority==MSG_SCHEDULER_ALERT)?MSG_USER_ALERT:priority;
mdp->timestamp = now;
mdp->seqno = seqno++;
mdp->message = message;
while (msgs.size() > MAX_SAVED_MESSAGES) {
delete msgs.back();
msgs.pop_back();
}
msgs.push_front(mdp);
}
void MESSAGE_DESCS::write(int seqno, MIOFILE& fout, bool translatable) {
int i, j;
unsigned int k;
MESSAGE_DESC* mdp;
char buf[1024];
// messages are stored in descreasing seqno,
// i.e. newer ones are at the head of the vector.
// compute j = index of first message to return
//
j = (int)msgs.size()-1;
for (k=0; k<msgs.size(); k++) {
mdp = msgs[k];
if (mdp->seqno <= seqno) {
j = k-1;
break;
}
}
fout.printf("<msgs>\n");
for (i=j; i>=0; i--) {
mdp = msgs[i];
safe_strcpy(buf, mdp->message.c_str());
if (!translatable) {
strip_translation(buf);
}
fout.printf(
"<msg>\n"
" <project>%s</project>\n"
" <pri>%d</pri>\n"
" <seqno>%d</seqno>\n"
" <body><![CDATA[\n%s\n]]></body>\n"
" <time>%d</time>\n",
mdp->project_name,
mdp->priority,
mdp->seqno,
buf,
mdp->timestamp
);
fout.printf("</msg>\n");
}
fout.printf("</msgs>\n");
}
int MESSAGE_DESCS::highest_seqno() {
if (msgs.size()) return msgs[0]->seqno;
return 0;
}
void MESSAGE_DESCS::cleanup() {
for (unsigned int i=0; i<msgs.size(); i++) {
delete msgs[i];
}
msgs.clear();
}
string app_list_string(PROJECT* p) {
string app_list;
for (unsigned int i=0; i<gstate.apps.size(); i++) {
APP* app = gstate.apps[i];
if (app->project != p) continue;
if (!app_list.empty()) {
app_list += ", ";
}
app_list += "'";
app_list += app->name;
app_list += "'";
}
if (app_list.empty()) {
app_list = "None";
}
return app_list;
}
| 1 | 13,723 | `event_msg` has the same size (1024) as a `message`. Maybe should be increased to 2048 instead? Then this `"[error] %.512s", message` could be changed to this: `"[error] %.*s", sizeof(message), message` | BOINC-boinc | php |
@@ -35,6 +35,7 @@ package org.apache.iceberg;
* changes.
*/
public interface ReplacePartitions extends SnapshotUpdate<ReplacePartitions> {
+
/**
* Add a {@link DataFile} to the table.
* | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
/**
* Not recommended: API for overwriting files in a table by partition.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
* <p>
* This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing
* all files in partitions with new data with the new additions. This operation is used to implement
* dynamic partition replacement.
* <p>
* When committing, these changes will be applied to the latest table snapshot. Commit conflicts
* will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
* This has no requirements for the latest snapshot and will not fail based on other snapshot
* changes.
*/
public interface ReplacePartitions extends SnapshotUpdate<ReplacePartitions> {
/**
* Add a {@link DataFile} to the table.
*
* @param file a data file
* @return this for method chaining
*/
ReplacePartitions addFile(DataFile file);
/**
* Validate that no partitions will be replaced and the operation is append-only.
*
* @return this for method chaining
*/
ReplacePartitions validateAppendOnly();
}
| 1 | 40,194 | Nit: unnecessary whitespace change. | apache-iceberg | java |
@@ -176,7 +176,7 @@ func (b *PinnedMap) Iter(f MapIter) error {
args := cmd[1:]
printCommand(prog, args...)
- output, err := exec.Command(prog, args...).CombinedOutput()
+ output, err := exec.Command(prog, args...).Output()
if err != nil {
return errors.Errorf("failed to dump in map (%s): %s\n%s", b.versionedFilename(), err, output)
} | 1 | // Copyright (c) 2019-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bpf
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"golang.org/x/sys/unix"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type MapIter func(k, v []byte)
type Map interface {
GetName() string
// EnsureExists opens the map, creating and pinning it if needed.
EnsureExists() error
// MapFD gets the file descriptor of the map, only valid after calling EnsureExists().
MapFD() MapFD
// Path returns the path that the map is (to be) pinned to.
Path() string
Iter(MapIter) error
Update(k, v []byte) error
Get(k []byte) ([]byte, error)
Delete(k []byte) error
}
type MapParameters struct {
Filename string
Type string
KeySize int
ValueSize int
MaxEntries int
Name string
Flags int
Version int
}
func versionedStr(ver int, str string) string {
if ver <= 1 {
return str
}
return fmt.Sprintf("%s%d", str, ver)
}
func (mp *MapParameters) versionedName() string {
return versionedStr(mp.Version, mp.Name)
}
func (mp *MapParameters) versionedFilename() string {
return versionedStr(mp.Version, mp.Filename)
}
type MapContext struct {
RepinningEnabled bool
}
func (c *MapContext) NewPinnedMap(params MapParameters) Map {
if len(params.versionedName()) >= unix.BPF_OBJ_NAME_LEN {
logrus.WithField("name", params.Name).Panic("Bug: BPF map name too long")
}
m := &PinnedMap{
context: c,
MapParameters: params,
perCPU: strings.Contains(params.Type, "percpu"),
}
return m
}
type PinnedMap struct {
context *MapContext
MapParameters
fdLoaded bool
fd MapFD
perCPU bool
}
func (b *PinnedMap) GetName() string {
return b.versionedName()
}
func (b *PinnedMap) MapFD() MapFD {
if !b.fdLoaded {
logrus.Panic("MapFD() called without first calling EnsureExists()")
}
return b.fd
}
func (b *PinnedMap) Path() string {
return b.versionedFilename()
}
func (b *PinnedMap) Close() error {
err := b.fd.Close()
b.fdLoaded = false
b.fd = 0
return err
}
func (b *PinnedMap) RepinningEnabled() bool {
if b.context == nil {
return false
}
return b.context.RepinningEnabled
}
// DumpMapCmd returns the command that can be used to dump a map or an error
func DumpMapCmd(m Map) ([]string, error) {
if pm, ok := m.(*PinnedMap); ok {
return []string{
"bpftool",
"--json",
"--pretty",
"map",
"dump",
"pinned",
pm.versionedFilename(),
}, nil
}
return nil, errors.Errorf("unrecognized map type %T", m)
}
// IterMapCmdOutput iterates over the outout of a command obtained by DumpMapCmd
func IterMapCmdOutput(output []byte, f MapIter) error {
var mp []mapEntry
err := json.Unmarshal(output, &mp)
if err != nil {
return errors.Errorf("cannot parse json output: %v\n%s", err, output)
}
for _, me := range mp {
k, err := hexStringsToBytes(me.Key)
if err != nil {
return errors.Errorf("failed parsing entry %s key: %e", me, err)
}
v, err := hexStringsToBytes(me.Value)
if err != nil {
return errors.Errorf("failed parsing entry %s val: %e", me, err)
}
f(k, v)
}
return nil
}
func (b *PinnedMap) Iter(f MapIter) error {
cmd, err := DumpMapCmd(b)
if err != nil {
return err
}
prog := cmd[0]
args := cmd[1:]
printCommand(prog, args...)
output, err := exec.Command(prog, args...).CombinedOutput()
if err != nil {
return errors.Errorf("failed to dump in map (%s): %s\n%s", b.versionedFilename(), err, output)
}
if err := IterMapCmdOutput(output, f); err != nil {
return errors.WithMessagef(err, "map %s", b.versionedFilename())
}
return nil
}
func (b *PinnedMap) Update(k, v []byte) error {
if b.perCPU {
// Per-CPU maps need a buffer of value-size * num-CPUs.
logrus.Panic("Per-CPU operations not implemented")
}
return UpdateMapEntry(b.fd, k, v)
}
func (b *PinnedMap) Get(k []byte) ([]byte, error) {
if b.perCPU {
// Per-CPU maps need a buffer of value-size * num-CPUs.
logrus.Panic("Per-CPU operations not implemented")
}
return GetMapEntry(b.fd, k, b.ValueSize)
}
func appendBytes(strings []string, bytes []byte) []string {
for _, b := range bytes {
strings = append(strings, strconv.FormatInt(int64(b), 10))
}
return strings
}
func (b *PinnedMap) Delete(k []byte) error {
logrus.WithField("key", k).Debug("Deleting map entry")
args := make([]string, 0, 10+len(k))
args = append(args, "map", "delete",
"pinned", b.versionedFilename(),
"key")
args = appendBytes(args, k)
cmd := exec.Command("bpftool", args...)
out, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(string(out), "delete failed: No such file or directory") {
logrus.WithField("k", k).Debug("Item didn't exist.")
return os.ErrNotExist
}
logrus.WithField("out", string(out)).Error("Failed to run bpftool")
}
return err
}
func (b *PinnedMap) EnsureExists() error {
if b.fdLoaded {
return nil
}
_, err := MaybeMountBPFfs()
if err != nil {
logrus.WithError(err).Error("Failed to mount bpffs")
return err
}
// FIXME hard-coded dir
err = os.MkdirAll("/sys/fs/bpf/tc/globals", 0700)
if err != nil {
logrus.WithError(err).Error("Failed create dir")
return err
}
_, err = os.Stat(b.versionedFilename())
if err != nil {
if !os.IsNotExist(err) {
return err
}
logrus.Debug("Map file didn't exist")
if b.context.RepinningEnabled {
logrus.WithField("name", b.Name).Info("Looking for map by name (to repin it)")
err = RepinMap(b.versionedName(), b.versionedFilename())
if err != nil && !os.IsNotExist(err) {
return err
}
}
}
if err == nil {
logrus.Debug("Map file already exists, trying to open it")
b.fd, err = GetMapFDByPin(b.versionedFilename())
if err == nil {
b.fdLoaded = true
logrus.WithField("fd", b.fd).WithField("name", b.versionedFilename()).
Info("Loaded map file descriptor.")
}
return err
}
logrus.Debug("Map didn't exist, creating it")
cmd := exec.Command("bpftool", "map", "create", b.versionedFilename(),
"type", b.Type,
"key", fmt.Sprint(b.KeySize),
"value", fmt.Sprint(b.ValueSize),
"entries", fmt.Sprint(b.MaxEntries),
"name", b.versionedName(),
"flags", fmt.Sprint(b.Flags),
)
out, err := cmd.CombinedOutput()
if err != nil {
logrus.WithField("out", string(out)).Error("Failed to run bpftool")
return err
}
b.fd, err = GetMapFDByPin(b.versionedFilename())
if err == nil {
b.fdLoaded = true
logrus.WithField("fd", b.fd).WithField("name", b.versionedFilename()).
Info("Loaded map file descriptor.")
}
return err
}
type bpftoolMapMeta struct {
ID int `json:"id"`
Name string `json:"name"`
}
func RepinMap(name string, filename string) error {
cmd := exec.Command("bpftool", "map", "list", "-j")
out, err := cmd.Output()
if err != nil {
return errors.Wrap(err, "bpftool map list failed")
}
logrus.WithField("maps", string(out)).Debug("Got map metadata.")
var maps []bpftoolMapMeta
err = json.Unmarshal(out, &maps)
if err != nil {
return errors.Wrap(err, "bpftool returned bad JSON")
}
for _, m := range maps {
if m.Name == name {
// Found the map, try to repin it.
cmd := exec.Command("bpftool", "map", "pin", "id", fmt.Sprint(m.ID), filename)
return errors.Wrap(cmd.Run(), "bpftool failed to repin map")
}
}
return os.ErrNotExist
}
| 1 | 17,841 | Hit a flake here where I think there may have been some output to stderr that got mixed in with the output from Stdout. Hence switching to `Output()`, which does also capture stderr as `err.Stderr` | projectcalico-felix | c |
@@ -119,7 +119,15 @@ void image_data_reader::set_input_params(const int width, const int height, cons
bool image_data_reader::fetch_label(CPUMat& Y, int data_id, int mb_idx) {
const label_t label = m_image_list[data_id].second;
- Y.Set(label, mb_idx, 1);
+ if (label >= 0 && label < m_num_labels) {
+ Y.Set(label, mb_idx, 1);
+ }
+ else {
+ LBANN_ERROR(
+ "\"",this->get_type(),"\" data reader ",
+ "expects data with ",m_num_labels," labels, ",
+ "but data sample ",data_id," has a label of ",label);
+ }
return true;
}
| 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// data_reader_image .hpp .cpp - generic data reader class for image dataset
////////////////////////////////////////////////////////////////////////////////
#include "lbann/data_readers/data_reader_image.hpp"
#include "lbann/utils/image.hpp"
#include "lbann/utils/timer.hpp"
#include "lbann/data_store/data_store_conduit.hpp"
#include "lbann/utils/file_utils.hpp"
#include "lbann/utils/threads/thread_utils.hpp"
#include "lbann/utils/lbann_library.hpp"
#include <fstream>
namespace lbann {
image_data_reader::image_data_reader(bool shuffle)
: generic_data_reader(shuffle) {
set_defaults();
}
image_data_reader::image_data_reader(const image_data_reader& rhs)
: generic_data_reader(rhs)
{
copy_members(rhs);
}
image_data_reader& image_data_reader::operator=(const image_data_reader& rhs) {
generic_data_reader::operator=(rhs);
m_image_dir = rhs.m_image_dir;
m_image_list = rhs.m_image_list;
m_image_width = rhs.m_image_width;
m_image_height = rhs.m_image_height;
m_image_num_channels = rhs.m_image_num_channels;
m_image_linearized_size = rhs.m_image_linearized_size;
m_num_labels = rhs.m_num_labels;
return (*this);
}
void image_data_reader::copy_members(const image_data_reader &rhs) {
if(rhs.m_data_store != nullptr) {
m_data_store = new data_store_conduit(rhs.get_data_store());
m_data_store->set_data_reader_ptr(this);
}
m_image_dir = rhs.m_image_dir;
m_image_list = rhs.m_image_list;
m_image_width = rhs.m_image_width;
m_image_height = rhs.m_image_height;
m_image_num_channels = rhs.m_image_num_channels;
m_image_linearized_size = rhs.m_image_linearized_size;
m_num_labels = rhs.m_num_labels;
//m_thread_cv_buffer = rhs.m_thread_cv_buffer
}
void image_data_reader::set_linearized_image_size() {
m_image_linearized_size = m_image_width * m_image_height * m_image_num_channels;
}
void image_data_reader::set_defaults() {
m_image_width = 256;
m_image_height = 256;
m_image_num_channels = 3;
set_linearized_image_size();
m_num_labels = 1000;
}
void image_data_reader::set_input_params(const int width, const int height, const int num_ch, const int num_labels) {
if ((width > 0) && (height > 0)) { // set and valid
m_image_width = width;
m_image_height = height;
} else if (!((width == 0) && (height == 0))) { // set but not valid
std::stringstream err;
err << __FILE__<<" "<<__LINE__<< " :: Imagenet data reader setup error: invalid input image sizes";
throw lbann_exception(err.str());
}
if (num_ch > 0) {
m_image_num_channels = num_ch;
} else if (num_ch < 0) {
std::stringstream err;
err << __FILE__<<" "<<__LINE__<< " :: Imagenet data reader setup error: invalid number of channels of input images";
throw lbann_exception(err.str());
}
set_linearized_image_size();
if (num_labels > 0) {
m_num_labels = num_labels;
} else if (num_labels < 0) {
std::stringstream err;
err << __FILE__<<" "<<__LINE__<< " :: Imagenet data reader setup error: invalid number of labels";
throw lbann_exception(err.str());
}
}
bool image_data_reader::fetch_label(CPUMat& Y, int data_id, int mb_idx) {
const label_t label = m_image_list[data_id].second;
Y.Set(label, mb_idx, 1);
return true;
}
void image_data_reader::load() {
options *opts = options::get();
const std::string imageListFile = get_data_filename();
// load image list
m_image_list.clear();
FILE *fplist = fopen(imageListFile.c_str(), "rt");
if (!fplist) {
LBANN_ERROR("failed to open: " + imageListFile + " for reading");
}
while (!feof(fplist)) {
char imagepath[512];
label_t imagelabel;
if (fscanf(fplist, "%s%d", imagepath, &imagelabel) <= 1) {
break;
}
m_image_list.emplace_back(imagepath, imagelabel);
}
fclose(fplist);
// TODO: this will probably need to change after sample_list class
// is modified
// reset indices
m_shuffled_indices.clear();
m_shuffled_indices.resize(m_image_list.size());
std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0);
resize_shuffled_indices();
opts->set_option("node_sizes_vary", 1);
instantiate_data_store();
select_subset_of_data();
}
void read_raw_data(const std::string &filename, std::vector<char> &data) {
data.clear();
std::ifstream in(filename.c_str());
if (!in) {
LBANN_ERROR("failed to open " + filename + " for reading");
}
in.seekg(0, in.end);
int num_bytes = in.tellg();
in.seekg(0, in.beg);
data.resize(num_bytes);
in.read((char*)data.data(), num_bytes);
in.close();
}
void image_data_reader::do_preload_data_store() {
options *opts = options::get();
int rank = m_comm->get_rank_in_trainer();
bool threaded = ! options::get()->get_bool("data_store_no_thread");
if (threaded) {
if (is_master()) {
std::cout << "mode: data_store_thread\n";
}
std::shared_ptr<thread_pool> io_thread_pool = construct_io_thread_pool(m_comm, opts);
int num_threads = static_cast<int>(io_thread_pool->get_num_threads());
std::vector<std::unordered_set<int>> data_ids(num_threads);
int j = 0;
for (size_t data_id=0; data_id<m_shuffled_indices.size(); data_id++) {
int index = m_shuffled_indices[data_id];
if (m_data_store->get_index_owner(index) != rank) {
continue;
}
data_ids[j++].insert(index);
if (j == num_threads) {
j = 0;
}
}
for (int t = 0; t < num_threads; t++) {
if(t == io_thread_pool->get_local_thread_id()) {
continue;
} else {
io_thread_pool->submit_job_to_work_group(std::bind(&image_data_reader::load_conduit_nodes_from_file, this, data_ids[t]));
}
}
load_conduit_nodes_from_file(data_ids[io_thread_pool->get_local_thread_id()]);
io_thread_pool->finish_work_group();
}
else {
conduit::Node node;
if (is_master()) {
std::cout << "mode: NOT data_store_thread\n";
}
for (size_t data_id=0; data_id<m_shuffled_indices.size(); data_id++) {
int index = m_shuffled_indices[data_id];
if (m_data_store->get_index_owner(index) != rank) {
continue;
}
load_conduit_node_from_file(index, node);
m_data_store->set_preloaded_conduit_node(index, node);
}
}
}
void image_data_reader::setup(int num_io_threads, observer_ptr<thread_pool> io_thread_pool) {
generic_data_reader::setup(num_io_threads, io_thread_pool);
m_transform_pipeline.set_expected_out_dims(
{static_cast<size_t>(m_image_num_channels),
static_cast<size_t>(m_image_height),
static_cast<size_t>(m_image_width)});
}
std::vector<image_data_reader::sample_t> image_data_reader::get_image_list_of_current_mb() const {
std::vector<sample_t> ret;
ret.reserve(m_mini_batch_size);
return ret;
}
bool image_data_reader::load_conduit_nodes_from_file(const std::unordered_set<int> &data_ids) {
conduit::Node node;
for (auto t : data_ids) {
load_conduit_node_from_file(t, node);
m_data_store->set_preloaded_conduit_node(t, node);
}
return true;
}
void image_data_reader::load_conduit_node_from_file(int data_id, conduit::Node &node) {
node.reset();
const std::string filename = get_file_dir() + m_image_list[data_id].first;
int label = m_image_list[data_id].second;
std::vector<char> data;
read_raw_data(filename, data);
node[LBANN_DATA_ID_STR(data_id) + "/label"].set(label);
node[LBANN_DATA_ID_STR(data_id) + "/buffer"].set(data);
node[LBANN_DATA_ID_STR(data_id) + "/buffer_size"] = data.size();
}
} // namespace lbann
| 1 | 15,275 | I would use static_cast<label_t>(0) and static_cast<label_T>(m_num_labels) just in case. | LLNL-lbann | cpp |
@@ -406,4 +406,18 @@ public class ZMSUtils {
return authorityFilter;
}
+
+ public static String lowerDomainInResource(String resource) {
+ if (resource == null) {
+ return null;
+ }
+
+ int delimiterIndex = resource.indexOf(":");
+ if (delimiterIndex == -1) {
+ return resource;
+ }
+
+ String lowerCasedDomain = resource.substring(0, delimiterIndex).toLowerCase();
+ return lowerCasedDomain + resource.substring(delimiterIndex);
+ }
} | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms.utils;
import java.util.ArrayList;
import java.util.List;
import com.yahoo.athenz.auth.Authority;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.yahoo.athenz.auth.AuthorityConsts;
import com.yahoo.athenz.auth.Principal;
import com.yahoo.athenz.common.server.log.AuditLogMsgBuilder;
import com.yahoo.athenz.common.server.log.AuditLogger;
import com.yahoo.athenz.common.server.util.ServletRequestUtil;
import com.yahoo.athenz.common.server.util.StringUtils;
import com.yahoo.athenz.zms.Assertion;
import com.yahoo.athenz.zms.AssertionEffect;
import com.yahoo.athenz.zms.Policy;
import com.yahoo.athenz.zms.ResourceContext;
import com.yahoo.athenz.zms.ResourceError;
import com.yahoo.athenz.zms.ResourceException;
import com.yahoo.athenz.zms.Role;
import com.yahoo.athenz.zms.RoleMember;
import com.yahoo.athenz.zms.RsrcCtxWrapper;
import com.yahoo.athenz.zms.ZMSConsts;
import com.yahoo.athenz.zms.ZMSImpl;
public class ZMSUtils {
private static final Logger LOG = LoggerFactory.getLogger(ZMSUtils.class);
public static void addAssertion(Policy policy, String resource, String action, String role,
AssertionEffect effect) {
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
assertions = new ArrayList<>();
policy.setAssertions(assertions);
}
Assertion assertion = new Assertion()
.setAction(action)
.setResource(resource)
.setRole(role);
if (effect != AssertionEffect.ALLOW) {
assertion.setEffect(effect);
}
assertions.add(assertion);
}
public static Role makeAdminRole(String domainName, List<String> adminUsers) {
List<RoleMember> roleMembers = new ArrayList<>();
for (String admin: adminUsers) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(admin);
roleMember.setActive(true);
roleMember.setApproved(true);
roleMembers.add(roleMember);
}
return new Role()
.setName(roleResourceName(domainName, ZMSConsts.ADMIN_ROLE_NAME))
.setRoleMembers(roleMembers);
}
public static Policy makeAdminPolicy(String domainName, Role adminsRole) {
Policy policy = new Policy()
.setName(policyResourceName(domainName, ZMSConsts.ADMIN_POLICY_NAME));
addAssertion(policy, domainName + ":*", "*", adminsRole.getName(), AssertionEffect.ALLOW);
return policy;
}
private static String generateResourceName(String domainName, String resName, String resType) {
if (resType.isEmpty()) {
return domainName + "." + resName;
} else {
return domainName + ":" + resType + "." + resName;
}
}
public static String roleResourceName(String domainName, String roleName) {
return generateResourceName(domainName, roleName, ZMSConsts.OBJECT_ROLE);
}
public static String policyResourceName(String domainName, String policyName) {
return generateResourceName(domainName, policyName, ZMSConsts.OBJECT_POLICY);
}
public static String serviceResourceName(String domainName, String serviceName) {
return generateResourceName(domainName, serviceName, "");
}
public static String entityResourceName(String domainName, String serviceName) {
return generateResourceName(domainName, serviceName, "");
}
public static String removeDomainPrefix(String objectName, String domainName, String objectPrefix) {
String valPrefix = domainName + ":" + objectPrefix;
if (objectName.startsWith(valPrefix)) {
objectName = objectName.substring(valPrefix.length());
}
return objectName;
}
public static String removeDomainPrefixForService(String serviceName, String domainName) {
final String valPrefix = domainName + ".";
if (serviceName.startsWith(valPrefix)) {
serviceName = serviceName.substring(valPrefix.length());
}
return serviceName;
}
public static String getTenantResourceGroupRolePrefix(String provSvcName, String tenantDomain, String resourceGroup) {
StringBuilder rolePrefix = new StringBuilder(256);
rolePrefix.append(provSvcName).append(".tenant.").append(tenantDomain).append('.');
if (resourceGroup != null) {
rolePrefix.append("res_group.").append(resourceGroup).append('.');
}
return rolePrefix.toString();
}
public static String getProviderResourceGroupRolePrefix(String provSvcDomain, String provSvcName, String resourceGroup) {
StringBuilder rolePrefix = new StringBuilder(256);
rolePrefix.append(provSvcDomain).append('.').append(provSvcName).append('.');
if (resourceGroup != null) {
rolePrefix.append("res_group.").append(resourceGroup).append('.');
}
return rolePrefix.toString();
}
public static String getTrustedResourceGroupRolePrefix(String provSvcDomain, String provSvcName,
String tenantDomain, String resourceGroup) {
StringBuilder trustedRole = new StringBuilder(256);
trustedRole.append(provSvcDomain).append(AuthorityConsts.ROLE_SEP).append(provSvcName)
.append(".tenant.").append(tenantDomain).append('.');
if (resourceGroup != null) {
trustedRole.append("res_group.").append(resourceGroup).append('.');
}
return trustedRole.toString();
}
public static boolean assumeRoleResourceMatch(String roleName, Assertion assertion) {
if (!ZMSConsts.ACTION_ASSUME_ROLE.equalsIgnoreCase(assertion.getAction())) {
return false;
}
String rezPattern = StringUtils.patternFromGlob(assertion.getResource());
return roleName.matches(rezPattern);
}
public static void removeMembers(List<RoleMember> originalRoleMembers, List<RoleMember> removeRoleMembers) {
if (removeRoleMembers == null || originalRoleMembers == null) {
return;
}
for (RoleMember removeMember : removeRoleMembers) {
String removeName = removeMember.getMemberName();
for (int j = 0; j < originalRoleMembers.size(); j++) {
if (removeName.equalsIgnoreCase(originalRoleMembers.get(j).getMemberName())) {
originalRoleMembers.remove(j);
break;
}
}
}
}
public static List<String> convertRoleMembersToMembers(List<RoleMember> members) {
List<String> memberList = new ArrayList<>();
if (members == null) {
return memberList;
}
for (RoleMember member: members) {
// only add active members to membername list. Active flag is optional for default value
if (member.getActive() != Boolean.FALSE) {
memberList.add(member.getMemberName());
}
}
return memberList;
}
public static List<RoleMember> convertMembersToRoleMembers(List<String> members) {
List<RoleMember> roleMemberList = new ArrayList<>();
if (members == null) {
return roleMemberList;
}
for (String member: members) {
roleMemberList.add(new RoleMember().setMemberName(member));
}
return roleMemberList;
}
/**
* Setup a new AuditLogMsgBuilder object with common values.
**/
public static AuditLogMsgBuilder getAuditLogMsgBuilder(ResourceContext ctx,
AuditLogger auditLogger, String domainName, String auditRef, String caller,
String method) {
AuditLogMsgBuilder msgBldr = auditLogger.getMsgBuilder();
// get the where - which means where this server is running
msgBldr.where(ZMSImpl.serverHostName);
msgBldr.whatDomain(domainName).why(auditRef).whatApi(caller).whatMethod(method);
// get the 'who' and set it
if (ctx != null) {
Principal princ = ((RsrcCtxWrapper) ctx).principal();
if (princ != null) {
String fullName = princ.getFullName();
String unsignedCreds = princ.getUnsignedCredentials();
if (unsignedCreds == null) {
StringBuilder sb = new StringBuilder();
sb.append("who-name=").append(princ.getName());
sb.append(",who-domain=").append(princ.getDomain());
sb.append(",who-fullname=").append(fullName);
List<String> roles = princ.getRoles();
if (roles != null && roles.size() > 0) {
sb.append(",who-roles=").append(roles.toString());
}
unsignedCreds = sb.toString();
}
msgBldr.who(unsignedCreds);
msgBldr.whoFullName(fullName);
}
// get the client IP
msgBldr.clientIp(ServletRequestUtil.getRemoteAddress(ctx.request()));
}
return msgBldr;
}
public static RuntimeException error(int code, String msg, String caller) {
LOG.error("Error: {} code: {} message: {}", caller, code, msg);
// emit our metrics if configured. the method will automatically
// return from the caller if caller is null
emitMonmetricError(code, caller);
return new ResourceException(code, new ResourceError().code(code).message(msg));
}
public static RuntimeException requestError(String msg, String caller) {
return error(ResourceException.BAD_REQUEST, msg, caller);
}
public static RuntimeException unauthorizedError(String msg, String caller) {
return error(ResourceException.UNAUTHORIZED, msg, caller);
}
public static RuntimeException forbiddenError(String msg, String caller) {
return error(ResourceException.FORBIDDEN, msg, caller);
}
public static RuntimeException notFoundError(String msg, String caller) {
return error(ResourceException.NOT_FOUND, msg, caller);
}
public static RuntimeException internalServerError(String msg, String caller) {
return error(ResourceException.INTERNAL_SERVER_ERROR, msg, caller);
}
public static RuntimeException quotaLimitError(String msg, String caller) {
return error(ResourceException.TOO_MANY_REQUESTS, msg, caller);
}
public static boolean emitMonmetricError(int errorCode, String caller) {
if (errorCode < 1) {
return false;
}
if (caller == null || caller.isEmpty()) {
return false;
}
if (ZMSImpl.metric == null) {
return false;
}
// Set 3 scoreboard error metrics:
// (1) cumulative "ERROR" (of all zms request and error types)
// (2) cumulative granular zms request and error type (eg- "getdomainlist_error_400")
// (3) cumulative error type (of all zms requests) (eg- "error_404")
String errCode = Integer.toString(errorCode);
ZMSImpl.metric.increment("ERROR");
ZMSImpl.metric.increment(caller.toLowerCase() + "_error_" + errCode);
ZMSImpl.metric.increment("error_" + errCode);
return true;
}
public static void threadSleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ignored) {
}
}
public static boolean parseBoolean(final String value, boolean defaultValue) {
boolean boolVal = defaultValue;
if (value != null && !value.isEmpty()) {
boolVal = Boolean.parseBoolean(value.trim());
}
return boolVal;
}
public static boolean isUserDomainPrincipal(final String memberName, final String userDomainPrefix,
final List<String> addlUserCheckDomainPrefixList) {
if (memberName.startsWith(userDomainPrefix)) {
return true;
}
if (addlUserCheckDomainPrefixList != null) {
for (String prefix : addlUserCheckDomainPrefixList) {
if (memberName.startsWith(prefix)) {
return true;
}
}
}
return false;
}
public static String extractObjectName(String domainName, String fullName, String objType) {
// generate prefix to compare with
final String prefix = domainName + objType;
if (!fullName.startsWith(prefix)) {
return null;
}
return fullName.substring(prefix.length());
}
public static String extractRoleName(String domainName, String fullRoleName) {
return extractObjectName(domainName, fullRoleName, AuthorityConsts.ROLE_SEP);
}
public static String extractPolicyName(String domainName, String fullPolicyName) {
return extractObjectName(domainName, fullPolicyName, ":policy.");
}
public static String extractServiceName(String domainName, String fullServiceName) {
return extractObjectName(domainName, fullServiceName, ".");
}
public static boolean isUserAuthorityFilterValid(Authority userAuthority, final String filterList, final String memberName) {
// in most cases we're going to have a single filter configured
// so we'll optimize for that case and not create an array
if (filterList.indexOf(',') == -1) {
if (!userAuthority.isAttributeSet(memberName, filterList)) {
LOG.error("Principal {} does not satisfy user authority {} filter", memberName, filterList);
return false;
}
return true;
} else {
final String[] filterItems = filterList.split(",");
for (String filterItem : filterItems) {
if (!userAuthority.isAttributeSet(memberName, filterItem)) {
LOG.error("Principal {} does not satisfy user authority {} filter", memberName, filterItem);
return false;
}
}
return true;
}
}
public static String combineUserAuthorityFilters(final String roleUserAuthorityFilter, final String domainUserAuthorityFilter) {
String authorityFilter = null;
if (roleUserAuthorityFilter != null && !roleUserAuthorityFilter.isEmpty()) {
authorityFilter = roleUserAuthorityFilter;
}
if (domainUserAuthorityFilter != null && !domainUserAuthorityFilter.isEmpty()) {
if (authorityFilter == null) {
authorityFilter = domainUserAuthorityFilter;
} else {
// no need for extra work to remove duplicates
authorityFilter += "," + domainUserAuthorityFilter;
}
}
return authorityFilter;
}
}
| 1 | 5,356 | Even if we want to keep in original case - domain will be lower-cased. | AthenZ-athenz | java |
@@ -0,0 +1,9 @@
+class RenameTopicBodyHtmlToTrailMap < ActiveRecord::Migration
+ def up
+ rename_column :topics, :body_html, :trail_map
+ end
+
+ def down
+ rename_column :topics, :trail_map, :body_html
+ end
+end | 1 | 1 | 6,482 | Based on discussion with Chad it feels like trail_map_json could be a good column name. | thoughtbot-upcase | rb |
|
@@ -98,9 +98,9 @@ class Users extends Controller
'comment' => $permission->comment,
'type' => 'balloon-selector',
'options' => [
- 1 => 'Allow',
- 0 => 'Inherit',
- -1 => 'Deny',
+ 1 => Lang::get('backend::lang.user.allow'),
+ 0 => Lang::get('backend::lang.user.inherit'),
+ -1 => Lang::get('backend::lang.user.deny'),
],
'attributes' => [
'data-trigger' => "input[name='User[permissions][superuser]']", | 1 | <?php namespace Backend\Controllers;
use Lang;
use Backend;
use Redirect;
use BackendMenu;
use BackendAuth;
use Backend\Classes\Controller;
use System\Classes\SettingsManager;
/**
* Backend user controller
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*
*/
class Users extends Controller
{
public $implement = [
'Backend.Behaviors.FormController',
'Backend.Behaviors.ListController'
];
public $formConfig = 'config_form.yaml';
public $listConfig = 'config_list.yaml';
public $requiredPermissions = ['backend.manage_users'];
public $bodyClass = 'compact-container';
public function __construct()
{
parent::__construct();
if ($this->action == 'myaccount')
$this->requiredPermissions = null;
BackendMenu::setContext('October.System', 'system', 'users');
SettingsManager::setContext('October.System', 'administrators');
}
/**
* Update controller
*/
public function update($recordId, $context = null)
{
// Users cannot edit themselves, only use My Settings
if ($context != 'myaccount' && $recordId == $this->user->id)
return Redirect::to(Backend::url('backend/users/myaccount'));
return $this->getClassExtension('Backend.Behaviors.FormController')->update($recordId, $context);
}
/**
* My Settings controller
*/
public function myaccount()
{
SettingsManager::setContext('October.Backend', 'myaccount');
$this->pageTitle = Lang::get('backend::lang.myaccount.menu_label');
return $this->update($this->user->id, 'myaccount');
}
/**
* Proxy update onSave event
*/
public function myaccount_onSave()
{
$result = $this->getClassExtension('Backend.Behaviors.FormController')->update_onSave($this->user->id, 'myaccount');
/*
* If the password or login name has been updated, reauthenticate the user
*/
$loginChanged = $this->user->login != post('User[login]');
$passwordChanged = strlen(post('User[password]'));
if ($loginChanged || $passwordChanged)
BackendAuth::login($this->user->reload(), true);
return $result;
}
/**
* Add available permission fields to the User form.
*/
protected function formExtendFields($host)
{
if ($host->getContext() == 'myaccount')
return;
$permissionFields = [];
foreach (BackendAuth::listPermissions() as $permission) {
$fieldName = 'permissions['.$permission->code.']';
$fieldConfig = [
'label' => $permission->label,
'comment' => $permission->comment,
'type' => 'balloon-selector',
'options' => [
1 => 'Allow',
0 => 'Inherit',
-1 => 'Deny',
],
'attributes' => [
'data-trigger' => "input[name='User[permissions][superuser]']",
'data-trigger-type' => 'disable',
'data-trigger-condition' => 'checked',
],
'span' => 'auto',
];
if (isset($permission->tab))
$fieldConfig['tab'] = $permission->tab;
$permissionFields[$fieldName] = $fieldConfig;
}
$host->addTabFields($permissionFields);
}
} | 1 | 10,466 | This array should be logic-less, just the language string (without `Lang::get()`) should appear. Then `trans()` is [or should be] used when the balloon selector renders the values. | octobercms-october | php |
@@ -253,7 +253,7 @@ module Beaker
else
task = 'defaultgroup:ensure_default_group'
end
- on dashboard, "/opt/puppet/bin/rake -sf /opt/puppet/share/puppet-dashboard/Rakefile #{task} RAILS_ENV=production"
+ on dashboard, "BUNDLE_GEMFILE=/opt/puppet/share/puppet-dashboard/Gemfile /opt/puppet/bin/bundle/exec /opt/puppet/bin/rake -sf /opt/puppet/share/puppet-dashboard/Rakefile #{task} RAILS_ENV=production"
# Now that all hosts are in the dashbaord, run puppet one more
# time to configure mcollective | 1 | require 'pathname'
module Beaker
module DSL
#
# This module contains methods to help cloning, extracting git info,
# ordering of Puppet packages, and installing ruby projects that
# contain an `install.rb` script.
module InstallUtils
# The default install path
SourcePath = "/opt/puppet-git-repos"
# A regex to know if the uri passed is pointing to a git repo
GitURI = %r{^(git|https?|file)://|^git@}
# Github's ssh signature for cloning via ssh
GitHubSig = 'github.com,207.97.227.239 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=='
# @param [String] uri A uri in the format of <git uri>#<revision>
# the `git://`, `http://`, `https://`, and ssh
# (if cloning as the remote git user) protocols
# are valid for <git uri>
#
# @example Usage
# project = extract_repo_info_from '[email protected]:puppetlabs/SuperSecretSauce#what_is_justin_doing'
#
# puts project[:name]
# #=> 'SuperSecretSauce'
#
# puts project[:rev]
# #=> 'what_is_justin_doing'
#
# @return [Hash{Symbol=>String}] Returns a hash containing the project
# name, repository path, and revision
# (defaults to HEAD)
#
# @api dsl
def extract_repo_info_from uri
project = {}
repo, rev = uri.split('#', 2)
project[:name] = Pathname.new(repo).basename('.git').to_s
project[:path] = repo
project[:rev] = rev || 'HEAD'
return project
end
# Takes an array of package info hashes (like that returned from
# {#extract_repo_info_from}) and sorts the `puppet`, `facter`, `hiera`
# packages so that puppet's dependencies will be installed first.
#
# @!visibility private
def order_packages packages_array
puppet = packages_array.select {|e| e[:name] == 'puppet' }
puppet_depends_on = packages_array.select do |e|
e[:name] == 'hiera' or e[:name] == 'facter'
end
depends_on_puppet = (packages_array - puppet) - puppet_depends_on
[puppet_depends_on, puppet, depends_on_puppet].flatten
end
# @param [Host] host An object implementing {Beaker::Hosts}'s
# interface.
# @param [String] path The path on the remote [host] to the repository
# @param [Hash{Symbol=>String}] repository A hash representing repo
# info like that emitted by
# {#extract_repo_info_from}
#
# @example Getting multiple project versions
# versions = [puppet_repo, facter_repo, hiera_repo].inject({}) do |vers, repo_info|
# vers.merge(find_git_repo_versions(host, '/opt/git-puppet-repos', repo_info) )
# end
# @return [Hash] Executes git describe on [host] and returns a Hash
# with the key of [repository[:name]] and value of
# the output from git describe.
#
# @note This requires the helper methods:
# * {Beaker::DSL::Structure#step}
# * {Beaker::DSL::Helpers#on}
#
# @api dsl
def find_git_repo_versions host, path, repository
version = {}
step "Grab version for #{repository[:name]}" do
on host, "cd #{path}/#{repository[:name]} && " +
"git describe || true" do
version[repository[:name]] = stdout.chomp
end
end
version
end
#
# @see #find_git_repo_versions
def install_from_git host, path, repository
name = repository[:name]
repo = repository[:path]
rev = repository[:rev]
target = "#{path}/#{name}"
step "Clone #{repo} if needed" do
on host, "test -d #{path} || mkdir -p #{path}"
on host, "test -d #{target} || git clone #{repo} #{target}"
end
step "Update #{name} and check out revision #{rev}" do
commands = ["cd #{target}",
"remote rm origin",
"remote add origin #{repo}",
"fetch origin",
"clean -fdx",
"checkout -f #{rev}"]
on host, commands.join(" && git ")
end
step "Install #{name} on the system" do
# The solaris ruby IPS package has bindir set to /usr/ruby/1.8/bin.
# However, this is not the path to which we want to deliver our
# binaries. So if we are using solaris, we have to pass the bin and
# sbin directories to the install.rb
install_opts = ''
install_opts = '--bindir=/usr/bin --sbindir=/usr/sbin' if
host['platform'].include? 'solaris'
on host, "cd #{target} && " +
"if [ -f install.rb ]; then " +
"ruby ./install.rb #{install_opts}; " +
"else true; fi"
end
end
def do_install hosts, version, path, pre_30, options = {}
#convenience methods for installation
########################################################
def installer_cmd(host, version, installer)
if host['platform'] =~ /windows/
"cd #{host['working_dir']} && msiexec.exe /qn /i puppet-enterprise-#{version}.msi"
else
"cd #{host['working_dir']}/#{host['dist']} && ./#{installer}"
end
end
def link_exists?(link)
require "net/http"
require "open-uri"
url = URI.parse(link)
Net::HTTP.start(url.host, url.port) do |http|
return http.head(url.request_uri).code == "200"
end
end
def fetch_puppet(hosts, version, path)
local = File.directory?(path)
hosts.each do |host|
filename = ""
extension = ""
if host['platform'] =~ /windows/
filename = "puppet-enterprise-#{version}"
extension = ".msi"
else
filename = "#{host['dist']}"
extension = ""
if local
extension = File.exists?("#{path}/#{filename}.tar.gz") ? ".tar.gz" : ".tar"
else
extension = link_exists?("#{path}/#{filename}.tar.gz") ? ".tar.gz" : ".tar"
end
end
if local
if not File.exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
scp_to host, "#{path}/#{filename}#{extension}", "#{host['working_dir']}/#{filename}#{extension}"
else
if not link_exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
on host, "cd #{host['working_dir']}; curl #{path}/#{filename}#{extension} -o #{filename}#{extension}"
end
if extension =~ /gz/
on host, "cd #{host['working_dir']}; gunzip #{filename}#{extension}"
end
if extension =~ /tar/
on host, "cd #{host['working_dir']}; tar -xvf #{filename}.tar"
end
end
end
########################################################
#start installation steps here
options[:installer] = 'puppet-enterprise-installer' unless options[:installer]
options[:type] = :install unless options[:type]
hostcert='uname | grep -i sunos > /dev/null && hostname || hostname -s'
master_certname = on(master, hostcert).stdout.strip
answers = Beaker::Answers.answers(version, hosts, master_certname, options)
special_nodes = [master, database, dashboard].uniq
real_agents = agents - special_nodes
# Set PE distribution for all the hosts, create working dir
use_all_tar = ENV['PE_USE_ALL_TAR'] == 'true'
hosts.each do |host|
platform = use_all_tar ? 'all' : host['platform']
host['dist'] = "puppet-enterprise-#{version}-#{platform}"
host['working_dir'] = "/tmp/" + Time.new.strftime("%Y-%m-%d_%H.%M.%S") #unique working dirs make me happy
on host, "mkdir #{host['working_dir']}"
end
fetch_puppet(hosts, version, path)
hosts.each do |host|
# Database host was added in 3.0. Skip it if installing an older version
next if host == database and host != master and host != dashboard and pre_30
if host['platform'] =~ /windows/
on host, "#{installer_cmd(host, version, options[:installer])} PUPPET_MASTER_SERVER=#{master} PUPPET_AGENT_CERTNAME=#{host}"
else
create_remote_file host, "#{host['working_dir']}/answers", Beaker::Answers.answer_string(host, answers)
on host, "#{installer_cmd(host, version, options[:installer])} -a #{host['working_dir']}/answers"
end
end
# If we're installing a version less than 3.0, ignore the database host
install_hosts = hosts.dup
install_hosts.delete(database) if pre_30 and database != master and database != dashboard
# On each agent, we ensure the certificate is signed then shut down the agent
install_hosts.each do |host|
sign_certificate(host)
stop_agent(host)
end
# Wait for PuppetDB to be totally up and running
sleep_until_puppetdb_started(database) unless pre_30
# Run the agent once to ensure everything is in the dashboard
install_hosts.each do |host|
on host, puppet_agent('-t'), :acceptable_exit_codes => [0,2]
# Workaround for PE-1105 when deploying 3.0.0
# The installer did not respect our database host answers in 3.0.0,
# and would cause puppetdb to be bounced by the agent run. By sleeping
# again here, we ensure that if that bounce happens during an upgrade
# test we won't fail early in the install process.
if version == '3.0.0' and host == database
sleep_until_puppetdb_started(database)
end
end
install_hosts.each do |host|
wait_for_host_in_dashboard(host)
end
if pre_30
task = 'nodegroup:add_all_nodes group=default'
else
task = 'defaultgroup:ensure_default_group'
end
on dashboard, "/opt/puppet/bin/rake -sf /opt/puppet/share/puppet-dashboard/Rakefile #{task} RAILS_ENV=production"
# Now that all hosts are in the dashbaord, run puppet one more
# time to configure mcollective
on install_hosts, puppet_agent('-t'), :acceptable_exit_codes => [0,2]
end
#is version a < version b
#3.0.0-160-gac44cfb is greater than 3.0.0, and 2.8.2
def version_is_less a, b
a = a.split('-')[0].split('.')
b = b.split('-')[0].split('.')
(0...a.length).each do |i|
if i < b.length
if a[i] < b[i]
return true
elsif a[i] > b[i]
return false
end
else
return false
end
end
return false
end
def install_pe version, path
pre_30 = version_is_less(version, '3.0')
step "Install #{version} PE on #{path}"
do_install hosts, version, path, pre_30
end
def upgrade_pe version, path, from
pre_30 = version_is_less(version, '3.0')
if pre_30
do_install(hosts, version, path, pre_30, :type => :upgrade, :installer => 'puppet-enterprise-upgrader', :from => from)
else
do_install(hosts, version, path, pre_30, :type => :upgrade, :from => from)
end
end
end
end
end
| 1 | 4,609 | This looks like it runs both pre 3.0 rake tasks and 3 rake tasks, and since pre 3.0 we didn't use bundler, my guess is it will fail then. | voxpupuli-beaker | rb |
@@ -450,7 +450,14 @@ func (m *Manager) Set(container *configs.Config) error {
}
func getUnitName(c *configs.Cgroup) string {
- return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name)
+ allowed := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ:-_.\\"
+ sanitizeFunc := func(r rune) rune {
+ if strings.ContainsRune(allowed, r) {
+ return r
+ }
+ return '.'
+ }
+ return fmt.Sprintf("%s-%s.scope", strings.Map(sanitizeFunc, c.Parent), strings.Map(sanitizeFunc, c.Name))
}
// Atm we can't use the systemd device support because of two missing things: | 1 | // +build linux
package systemd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
systemdDbus "github.com/coreos/go-systemd/dbus"
systemdUtil "github.com/coreos/go-systemd/util"
"github.com/godbus/dbus"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
)
type Manager struct {
mu sync.Mutex
Cgroups *configs.Cgroup
Paths map[string]string
}
type subsystem interface {
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
GetStats(path string, stats *cgroups.Stats) error
// Set the cgroup represented by cgroup.
Set(path string, cgroup *configs.Cgroup) error
}
var subsystems = map[string]subsystem{
"devices": &fs.DevicesGroup{},
"memory": &fs.MemoryGroup{},
"cpu": &fs.CpuGroup{},
"cpuset": &fs.CpusetGroup{},
"cpuacct": &fs.CpuacctGroup{},
"blkio": &fs.BlkioGroup{},
"hugetlb": &fs.HugetlbGroup{},
"perf_event": &fs.PerfEventGroup{},
"freezer": &fs.FreezerGroup{},
"net_prio": &fs.NetPrioGroup{},
"net_cls": &fs.NetClsGroup{},
"name=systemd": &fs.NameGroup{},
}
const (
testScopeWait = 4
)
var (
connLock sync.Mutex
theConn *systemdDbus.Conn
hasStartTransientUnit bool
hasTransientDefaultDependencies bool
)
func newProp(name string, units interface{}) systemdDbus.Property {
return systemdDbus.Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
func UseSystemd() bool {
if !systemdUtil.IsRunningSystemd() {
return false
}
connLock.Lock()
defer connLock.Unlock()
if theConn == nil {
var err error
theConn, err = systemdDbus.New()
if err != nil {
return false
}
// Assume we have StartTransientUnit
hasStartTransientUnit = true
// But if we get UnknownMethod error we don't
if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
hasStartTransientUnit = false
return hasStartTransientUnit
}
}
}
// Ensure the scope name we use doesn't exist. Use the Pid to
// avoid collisions between multiple libcontainer users on a
// single host.
scope := fmt.Sprintf("libcontainer-%d-systemd-test-default-dependencies.scope", os.Getpid())
testScopeExists := true
for i := 0; i <= testScopeWait; i++ {
if _, err := theConn.StopUnit(scope, "replace", nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
testScopeExists = false
break
}
}
}
time.Sleep(time.Millisecond)
}
// Bail out if we can't kill this scope without testing for DefaultDependencies
if testScopeExists {
return hasStartTransientUnit
}
// Assume StartTransientUnit on a scope allows DefaultDependencies
hasTransientDefaultDependencies = true
ddf := newProp("DefaultDependencies", false)
if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{ddf}, nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
hasTransientDefaultDependencies = false
}
}
}
// Not critical because of the stop unit logic above.
theConn.StopUnit(scope, "replace", nil)
}
return hasStartTransientUnit
}
func getIfaceForUnit(unitName string) string {
if strings.HasSuffix(unitName, ".scope") {
return "Scope"
}
if strings.HasSuffix(unitName, ".service") {
return "Service"
}
return "Unit"
}
func (m *Manager) Apply(pid int) error {
var (
c = m.Cgroups
unitName = getUnitName(c)
slice = "system.slice"
properties []systemdDbus.Property
)
if c.Slice != "" {
slice = c.Slice
}
properties = append(properties,
systemdDbus.PropSlice(slice),
systemdDbus.PropDescription("docker container "+c.Name),
newProp("PIDs", []uint32{uint32(pid)}),
)
// Always enable accounting, this gets us the same behaviour as the fs implementation,
// plus the kernel has some problems with joining the memory cgroup at a later time.
properties = append(properties,
newProp("MemoryAccounting", true),
newProp("CPUAccounting", true),
newProp("BlockIOAccounting", true))
if hasTransientDefaultDependencies {
properties = append(properties,
newProp("DefaultDependencies", false))
}
if c.Memory != 0 {
properties = append(properties,
newProp("MemoryLimit", uint64(c.Memory)))
}
if c.CpuShares != 0 {
properties = append(properties,
newProp("CPUShares", uint64(c.CpuShares)))
}
if c.BlkioWeight != 0 {
properties = append(properties,
newProp("BlockIOWeight", uint64(c.BlkioWeight)))
}
// We need to set kernel memory before processes join cgroup because
// kmem.limit_in_bytes can only be set when the cgroup is empty.
// And swap memory limit needs to be set after memory limit, only
// memory limit is handled by systemd, so it's kind of ugly here.
if c.KernelMemory > 0 {
if err := setKernelMemory(c); err != nil {
return err
}
}
if _, err := theConn.StartTransientUnit(unitName, "replace", properties, nil); err != nil {
return err
}
if err := joinDevices(c, pid); err != nil {
return err
}
// TODO: CpuQuota and CpuPeriod not available in systemd
// we need to manually join the cpu.cfs_quota_us and cpu.cfs_period_us
if err := joinCpu(c, pid); err != nil {
return err
}
// TODO: MemoryReservation and MemorySwap not available in systemd
if err := joinMemory(c, pid); err != nil {
return err
}
// we need to manually join the freezer, net_cls, net_prio and cpuset cgroup in systemd
// because it does not currently support it via the dbus api.
if err := joinFreezer(c, pid); err != nil {
return err
}
if err := joinNetPrio(c, pid); err != nil {
return err
}
if err := joinNetCls(c, pid); err != nil {
return err
}
if err := joinCpuset(c, pid); err != nil {
return err
}
if err := joinHugetlb(c, pid); err != nil {
return err
}
if err := joinPerfEvent(c, pid); err != nil {
return err
}
// FIXME: Systemd does have `BlockIODeviceWeight` property, but we got problem
// using that (at least on systemd 208, see https://github.com/opencontainers/runc/libcontainer/pull/354),
// so use fs work around for now.
if err := joinBlkio(c, pid); err != nil {
return err
}
paths := make(map[string]string)
for sysname := range subsystems {
subsystemPath, err := getSubsystemPath(m.Cgroups, sysname)
if err != nil {
// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
if cgroups.IsNotFound(err) {
continue
}
return err
}
paths[sysname] = subsystemPath
}
m.Paths = paths
if paths["cpu"] != "" {
if err := fs.CheckCpushares(paths["cpu"], c.CpuShares); err != nil {
return err
}
}
return nil
}
func (m *Manager) Destroy() error {
m.mu.Lock()
defer m.mu.Unlock()
theConn.StopUnit(getUnitName(m.Cgroups), "replace", nil)
if err := cgroups.RemovePaths(m.Paths); err != nil {
return err
}
m.Paths = make(map[string]string)
return nil
}
func (m *Manager) GetPaths() map[string]string {
m.mu.Lock()
paths := m.Paths
m.mu.Unlock()
return paths
}
func writeFile(dir, file, data string) error {
// Normally dir should not be empty, one case is that cgroup subsystem
// is not mounted, we will get empty dir, and we want it fail here.
if dir == "" {
return fmt.Errorf("no such directory for %s.", file)
}
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
}
func join(c *configs.Cgroup, subsystem string, pid int) (string, error) {
path, err := getSubsystemPath(c, subsystem)
if err != nil {
return "", err
}
if err := os.MkdirAll(path, 0755); err != nil {
return "", err
}
if err := writeFile(path, "cgroup.procs", strconv.Itoa(pid)); err != nil {
return "", err
}
return path, nil
}
func joinCpu(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "cpu")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if c.CpuQuota != 0 {
if err = writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(c.CpuQuota, 10)); err != nil {
return err
}
}
if c.CpuPeriod != 0 {
if err = writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(c.CpuPeriod, 10)); err != nil {
return err
}
}
if c.CpuRtPeriod != 0 {
if err = writeFile(path, "cpu.rt_period_us", strconv.FormatInt(c.CpuRtPeriod, 10)); err != nil {
return err
}
}
if c.CpuRtRuntime != 0 {
if err = writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(c.CpuRtRuntime, 10)); err != nil {
return err
}
}
return nil
}
func joinFreezer(c *configs.Cgroup, pid int) error {
path, err := join(c, "freezer", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
freezer := subsystems["freezer"]
return freezer.Set(path, c)
}
func joinNetPrio(c *configs.Cgroup, pid int) error {
path, err := join(c, "net_prio", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
netPrio := subsystems["net_prio"]
return netPrio.Set(path, c)
}
func joinNetCls(c *configs.Cgroup, pid int) error {
path, err := join(c, "net_cls", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
netcls := subsystems["net_cls"]
return netcls.Set(path, c)
}
func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) {
mountpoint, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
return "", err
}
initPath, err := cgroups.GetInitCgroupDir(subsystem)
if err != nil {
return "", err
}
slice := "system.slice"
if c.Slice != "" {
slice = c.Slice
}
return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
}
func (m *Manager) Freeze(state configs.FreezerState) error {
path, err := getSubsystemPath(m.Cgroups, "freezer")
if err != nil {
return err
}
prevState := m.Cgroups.Freezer
m.Cgroups.Freezer = state
freezer := subsystems["freezer"]
err = freezer.Set(path, m.Cgroups)
if err != nil {
m.Cgroups.Freezer = prevState
return err
}
return nil
}
func (m *Manager) GetPids() ([]int, error) {
path, err := getSubsystemPath(m.Cgroups, "devices")
if err != nil {
return nil, err
}
return cgroups.GetPids(path)
}
func (m *Manager) GetStats() (*cgroups.Stats, error) {
m.mu.Lock()
defer m.mu.Unlock()
stats := cgroups.NewStats()
for name, path := range m.Paths {
sys, ok := subsystems[name]
if !ok || !cgroups.PathExists(path) {
continue
}
if err := sys.GetStats(path, stats); err != nil {
return nil, err
}
}
return stats, nil
}
func (m *Manager) Set(container *configs.Config) error {
for name, path := range m.Paths {
sys, ok := subsystems[name]
if !ok || !cgroups.PathExists(path) {
continue
}
if err := sys.Set(path, container.Cgroups); err != nil {
return err
}
}
return nil
}
func getUnitName(c *configs.Cgroup) string {
return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name)
}
// Atm we can't use the systemd device support because of two missing things:
// * Support for wildcards to allow mknod on any device
// * Support for wildcards to allow /dev/pts support
//
// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
// in wide use. When both these are available we will be able to switch, but need to keep the old
// implementation for backwards compat.
//
// Note: we can't use systemd to set up the initial limits, and then change the cgroup
// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
// This happens at least for v208 when any sibling unit is started.
func joinDevices(c *configs.Cgroup, pid int) error {
path, err := join(c, "devices", pid)
// Even if it's `not found` error, we'll return err because devices cgroup
// is hard requirement for container security.
if err != nil {
return err
}
devices := subsystems["devices"]
return devices.Set(path, c)
}
func setKernelMemory(c *configs.Cgroup) error {
path, err := getSubsystemPath(c, "memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
if c.KernelMemory > 0 {
err = writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(c.KernelMemory, 10))
if err != nil {
return err
}
}
return nil
}
func joinMemory(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
// -1 disables memoryswap
if c.MemorySwap > 0 {
err = writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.MemorySwap, 10))
if err != nil {
return err
}
}
if c.MemoryReservation > 0 {
err = writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(c.MemoryReservation, 10))
if err != nil {
return err
}
}
if c.OomKillDisable {
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
return err
}
}
if c.MemorySwappiness >= 0 && c.MemorySwappiness <= 100 {
err = writeFile(path, "memory.swappiness", strconv.FormatInt(c.MemorySwappiness, 10))
if err != nil {
return err
}
} else if c.MemorySwappiness == -1 {
return nil
} else {
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", c.MemorySwappiness)
}
return nil
}
// systemd does not atm set up the cpuset controller, so we must manually
// join it. Additionally that is a very finicky controller where each
// level must have a full setup as the default for a new directory is "no cpus"
func joinCpuset(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "cpuset")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
s := &fs.CpusetGroup{}
return s.ApplyDir(path, c, pid)
}
// `BlockIODeviceWeight` property of systemd does not work properly, and systemd
// expects device path instead of major minor numbers, which is also confusing
// for users. So we use fs work around for now.
func joinBlkio(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "blkio")
if err != nil {
return err
}
// systemd doesn't directly support this in the dbus properties
if c.BlkioLeafWeight != 0 {
if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(c.BlkioLeafWeight), 10)); err != nil {
return err
}
}
for _, wd := range c.BlkioWeightDevice {
if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
return err
}
if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
return err
}
}
for _, td := range c.BlkioThrottleReadBpsDevice {
if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range c.BlkioThrottleWriteBpsDevice {
if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range c.BlkioThrottleReadIOPSDevice {
if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
return err
}
}
for _, td := range c.BlkioThrottleWriteIOPSDevice {
if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
return err
}
}
return nil
}
func joinHugetlb(c *configs.Cgroup, pid int) error {
path, err := join(c, "hugetlb", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
hugetlb := subsystems["hugetlb"]
return hugetlb.Set(path, c)
}
func joinPerfEvent(c *configs.Cgroup, pid int) error {
path, err := join(c, "perf_event", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
perfEvent := subsystems["perf_event"]
return perfEvent.Set(path, c)
}
| 1 | 8,524 | I'm not sure the replacement is a good idea, and as I said in #336 , this "parent-name.scope" is not a good idea in the first place, specially when we support assigning a slice as the parent, so we should change this, WDYT? | opencontainers-runc | go |
@@ -627,7 +627,7 @@ func (dao *blockDAO) putBlock(blk *block.Block) error {
log.L().Error("failed to serialize receipits for block", zap.Uint64("height", blkHeight))
}
}
- if err = kv.Commit(batchForBlock); err != nil {
+ if err = kv.WriteBatch(batchForBlock); err != nil {
return err
}
| 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockdao
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/golang/protobuf/proto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/cache"
"github.com/iotexproject/iotex-core/pkg/compress"
"github.com/iotexproject/iotex-core/pkg/enc"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
)
const (
blockNS = "blk"
blockHashHeightMappingNS = "h2h"
blockHeaderNS = "bhr"
blockBodyNS = "bbd"
blockFooterNS = "bfr"
receiptsNS = "rpt"
)
// these NS belong to old DB before migrating to separate index
// they are left here only for record
// do NOT use them in the future to avoid potential conflict
const (
blockActionBlockMappingNS = "a2b"
blockAddressActionMappingNS = "a2a"
blockAddressActionCountMappingNS = "a2c"
blockActionReceiptMappingNS = "a2r"
numActionsNS = "nac"
transferAmountNS = "tfa"
)
var (
topHeightKey = []byte("th")
topHashKey = []byte("ts")
hashPrefix = []byte("ha.")
heightPrefix = []byte("he.")
heightToFileBucket = []byte("h2f")
)
var (
cacheMtc = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_blockdao_cache",
Help: "IoTeX blockdao cache counter.",
},
[]string{"result"},
)
patternLen = len("00000000.db")
suffixLen = len(".db")
// ErrNotOpened indicates db is not opened
ErrNotOpened = errors.New("DB is not opened")
)
type (
// BlockDAO represents the block data access object
BlockDAO interface {
Start(ctx context.Context) error
Stop(ctx context.Context) error
GetBlockHash(uint64) (hash.Hash256, error)
GetBlockHeight(hash.Hash256) (uint64, error)
GetBlock(hash.Hash256) (*block.Block, error)
GetBlockByHeight(uint64) (*block.Block, error)
GetTipHeight() (uint64, error)
GetTipHash() (hash.Hash256, error)
Header(hash.Hash256) (*block.Header, error)
Body(hash.Hash256) (*block.Body, error)
Footer(hash.Hash256) (*block.Footer, error)
GetActionByActionHash(hash.Hash256, uint64) (action.SealedEnvelope, error)
GetReceiptByActionHash(hash.Hash256, uint64) (*action.Receipt, error)
GetReceipts(uint64) ([]*action.Receipt, error)
PutBlock(*block.Block) error
Commit() error
DeleteBlockToTarget(uint64) error
IndexFile(uint64, []byte) error
GetFileIndex(uint64) ([]byte, error)
KVStore() db.KVStore
}
// BlockIndexer defines an interface to accept block to build index
BlockIndexer interface {
Start(ctx context.Context) error
Stop(ctx context.Context) error
PutBlock(blk *block.Block) error
DeleteTipBlock(blk *block.Block) error
Commit() error
}
blockDAO struct {
compressBlock bool
kvstore db.KVStore
indexer BlockIndexer
htf db.RangeIndex
kvstores sync.Map //store like map[index]db.KVStore,index from 1...N
topIndex atomic.Value
timerFactory *prometheustimer.TimerFactory
lifecycle lifecycle.Lifecycle
headerCache *cache.ThreadSafeLruCache
bodyCache *cache.ThreadSafeLruCache
footerCache *cache.ThreadSafeLruCache
cfg config.DB
mutex sync.RWMutex // for create new db file
}
)
// NewBlockDAO instantiates a block DAO
func NewBlockDAO(kvstore db.KVStore, indexer BlockIndexer, compressBlock bool, cfg config.DB) BlockDAO {
blockDAO := &blockDAO{
compressBlock: compressBlock,
kvstore: kvstore,
indexer: indexer,
cfg: cfg,
}
if cfg.MaxCacheSize > 0 {
blockDAO.headerCache = cache.NewThreadSafeLruCache(cfg.MaxCacheSize)
blockDAO.bodyCache = cache.NewThreadSafeLruCache(cfg.MaxCacheSize)
blockDAO.footerCache = cache.NewThreadSafeLruCache(cfg.MaxCacheSize)
}
timerFactory, err := prometheustimer.New(
"iotex_block_dao_perf",
"Performance of block DAO",
[]string{"type"},
[]string{"default"},
)
if err != nil {
return nil
}
blockDAO.timerFactory = timerFactory
blockDAO.lifecycle.Add(kvstore)
if indexer != nil {
blockDAO.lifecycle.Add(indexer)
}
return blockDAO
}
// Start starts block DAO and initiates the top height if it doesn't exist
func (dao *blockDAO) Start(ctx context.Context) error {
err := dao.lifecycle.OnStart(ctx)
if err != nil {
return errors.Wrap(err, "failed to start child services")
}
// set init height value
if _, err = dao.kvstore.Get(blockNS, topHeightKey); err != nil &&
errors.Cause(err) == db.ErrNotExist {
if err := dao.kvstore.Put(blockNS, topHeightKey, make([]byte, 8)); err != nil {
return errors.Wrap(err, "failed to write initial value for top height")
}
}
return dao.initStores()
}
func (dao *blockDAO) initStores() error {
cfg := dao.cfg
model, dir := getFileNameAndDir(cfg.DbPath)
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
var maxN uint64
for _, file := range files {
name := file.Name()
lens := len(name)
if lens < patternLen || !strings.Contains(name, model) {
continue
}
num := name[lens-patternLen : lens-suffixLen]
n, err := strconv.Atoi(num)
if err != nil {
continue
}
dao.openDB(uint64(n))
if uint64(n) > maxN {
maxN = uint64(n)
}
}
if maxN == 0 {
maxN = 1
}
dao.topIndex.Store(maxN)
return nil
}
func (dao *blockDAO) Stop(ctx context.Context) error { return dao.lifecycle.OnStop(ctx) }
func (dao *blockDAO) Commit() error {
return nil
}
func (dao *blockDAO) GetBlockHash(height uint64) (hash.Hash256, error) {
return dao.getBlockHash(height)
}
func (dao *blockDAO) GetBlockHeight(hash hash.Hash256) (uint64, error) {
return dao.getBlockHeight(hash)
}
func (dao *blockDAO) GetBlock(hash hash.Hash256) (*block.Block, error) {
return dao.getBlock(hash)
}
func (dao *blockDAO) GetBlockByHeight(height uint64) (*block.Block, error) {
hash, err := dao.getBlockHash(height)
if err != nil {
return nil, err
}
return dao.getBlock(hash)
}
func (dao *blockDAO) GetTipHash() (hash.Hash256, error) {
return dao.getTipHash()
}
func (dao *blockDAO) GetTipHeight() (uint64, error) {
return dao.getTipHeight()
}
func (dao *blockDAO) Header(h hash.Hash256) (*block.Header, error) {
return dao.header(h)
}
func (dao *blockDAO) Body(h hash.Hash256) (*block.Body, error) {
return dao.body(h)
}
func (dao *blockDAO) Footer(h hash.Hash256) (*block.Footer, error) {
return dao.footer(h)
}
func (dao *blockDAO) GetActionByActionHash(h hash.Hash256, height uint64) (action.SealedEnvelope, error) {
bh, err := dao.getBlockHash(height)
if err != nil {
return action.SealedEnvelope{}, err
}
blk, err := dao.body(bh)
if err != nil {
return action.SealedEnvelope{}, err
}
for _, act := range blk.Actions {
if act.Hash() == h {
return act, nil
}
}
return action.SealedEnvelope{}, errors.Errorf("block %d does not have action %x", height, h)
}
func (dao *blockDAO) GetReceiptByActionHash(h hash.Hash256, height uint64) (*action.Receipt, error) {
receipts, err := dao.getReceipts(height)
if err != nil {
return nil, err
}
for _, r := range receipts {
if r.ActionHash == h {
return r, nil
}
}
return nil, errors.Errorf("receipt of action %x isn't found", h)
}
func (dao *blockDAO) GetReceipts(blkHeight uint64) ([]*action.Receipt, error) {
return dao.getReceipts(blkHeight)
}
func (dao *blockDAO) PutBlock(blk *block.Block) error {
if err := dao.putBlock(blk); err != nil {
return err
}
// index the block if there's indexer
if dao.indexer == nil {
return nil
}
if err := dao.indexer.PutBlock(blk); err != nil {
return err
}
return dao.indexer.Commit()
}
func (dao *blockDAO) DeleteBlockToTarget(targetHeight uint64) error {
dao.mutex.Lock()
defer dao.mutex.Unlock()
tipHeight, err := dao.getTipHeight()
if err != nil {
return err
}
for tipHeight > targetHeight {
// Obtain tip block hash
h, err := dao.getTipHash()
if err != nil {
return errors.Wrap(err, "failed to get tip block hash")
}
blk, err := dao.getBlock(h)
if err != nil {
return errors.Wrap(err, "failed to get tip block")
}
// delete block index if there's indexer
if dao.indexer != nil {
if err := dao.indexer.DeleteTipBlock(blk); err != nil {
return err
}
}
if err := dao.deleteTipBlock(); err != nil {
return err
}
tipHeight--
}
return nil
}
func (dao *blockDAO) IndexFile(height uint64, index []byte) error {
dao.mutex.Lock()
defer dao.mutex.Unlock()
if dao.htf == nil {
htf, err := db.NewRangeIndex(dao.kvstore, heightToFileBucket, make([]byte, 8))
if err != nil {
return err
}
dao.htf = htf
}
return dao.htf.Insert(height, index)
}
// GetFileIndex return the db filename
func (dao *blockDAO) GetFileIndex(height uint64) ([]byte, error) {
dao.mutex.RLock()
defer dao.mutex.RUnlock()
if dao.htf == nil {
htf, err := db.NewRangeIndex(dao.kvstore, heightToFileBucket, make([]byte, 8))
if err != nil {
return nil, err
}
dao.htf = htf
}
return dao.htf.Get(height)
}
func (dao *blockDAO) KVStore() db.KVStore {
return dao.kvstore
}
// getBlockHash returns the block hash by height
func (dao *blockDAO) getBlockHash(height uint64) (hash.Hash256, error) {
h := hash.ZeroHash256
if height == 0 {
return h, nil
}
key := heightKey(height)
value, err := dao.kvstore.Get(blockHashHeightMappingNS, key)
if err != nil {
return h, errors.Wrap(err, "failed to get block hash")
}
if len(h) != len(value) {
return h, errors.Wrapf(err, "blockhash is broken with length = %d", len(value))
}
copy(h[:], value)
return h, nil
}
// getBlockHeight returns the block height by hash
func (dao *blockDAO) getBlockHeight(hash hash.Hash256) (uint64, error) {
key := hashKey(hash)
value, err := dao.kvstore.Get(blockHashHeightMappingNS, key)
if err != nil {
return 0, errors.Wrap(err, "failed to get block height")
}
if len(value) == 0 {
return 0, errors.Wrapf(db.ErrNotExist, "height missing for block with hash = %x", hash)
}
return enc.MachineEndian.Uint64(value), nil
}
// getBlock returns a block
func (dao *blockDAO) getBlock(hash hash.Hash256) (*block.Block, error) {
header, err := dao.header(hash)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block header %x", hash)
}
body, err := dao.body(hash)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block body %x", hash)
}
footer, err := dao.footer(hash)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block footer %x", hash)
}
return &block.Block{
Header: *header,
Body: *body,
Footer: *footer,
}, nil
}
func (dao *blockDAO) header(h hash.Hash256) (*block.Header, error) {
if dao.headerCache != nil {
header, ok := dao.headerCache.Get(h)
if ok {
cacheMtc.WithLabelValues("hit_header").Inc()
return header.(*block.Header), nil
}
cacheMtc.WithLabelValues("miss_header").Inc()
}
value, err := dao.getBlockValue(blockHeaderNS, h)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block header %x", h)
}
if dao.compressBlock {
timer := dao.timerFactory.NewTimer("decompress_header")
value, err = compress.Decompress(value)
timer.End()
if err != nil {
return nil, errors.Wrapf(err, "error when decompressing a block header %x", h)
}
}
if len(value) == 0 {
return nil, errors.Wrapf(db.ErrNotExist, "block header %x is missing", h)
}
header := &block.Header{}
if err := header.Deserialize(value); err != nil {
return nil, errors.Wrapf(err, "failed to deserialize block header %x", h)
}
if dao.headerCache != nil {
dao.headerCache.Add(h, header)
}
return header, nil
}
func (dao *blockDAO) body(h hash.Hash256) (*block.Body, error) {
if dao.bodyCache != nil {
body, ok := dao.bodyCache.Get(h)
if ok {
cacheMtc.WithLabelValues("hit_body").Inc()
return body.(*block.Body), nil
}
cacheMtc.WithLabelValues("miss_body").Inc()
}
value, err := dao.getBlockValue(blockBodyNS, h)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block body %x", h)
}
if dao.compressBlock {
timer := dao.timerFactory.NewTimer("decompress_body")
value, err = compress.Decompress(value)
timer.End()
if err != nil {
return nil, errors.Wrapf(err, "error when decompressing a block body %x", h)
}
}
if len(value) == 0 {
return nil, errors.Wrapf(db.ErrNotExist, "block body %x is missing", h)
}
body := &block.Body{}
if err := body.Deserialize(value); err != nil {
return nil, errors.Wrapf(err, "failed to deserialize block body %x", h)
}
if dao.bodyCache != nil {
dao.bodyCache.Add(h, body)
}
return body, nil
}
func (dao *blockDAO) footer(h hash.Hash256) (*block.Footer, error) {
if dao.footerCache != nil {
footer, ok := dao.footerCache.Get(h)
if ok {
cacheMtc.WithLabelValues("hit_footer").Inc()
return footer.(*block.Footer), nil
}
cacheMtc.WithLabelValues("miss_footer").Inc()
}
value, err := dao.getBlockValue(blockFooterNS, h)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block footer %x", h)
}
if dao.compressBlock {
timer := dao.timerFactory.NewTimer("decompress_footer")
value, err = compress.Decompress(value)
timer.End()
if err != nil {
return nil, errors.Wrapf(err, "error when decompressing a block footer %x", h)
}
}
if len(value) == 0 {
return nil, errors.Wrapf(db.ErrNotExist, "block footer %x is missing", h)
}
footer := &block.Footer{}
if err := footer.Deserialize(value); err != nil {
return nil, errors.Wrapf(err, "failed to deserialize block footer %x", h)
}
if dao.footerCache != nil {
dao.footerCache.Add(h, footer)
}
return footer, nil
}
// getTipHeight returns the blockchain height
func (dao *blockDAO) getTipHeight() (uint64, error) {
value, err := dao.kvstore.Get(blockNS, topHeightKey)
if err != nil {
return 0, errors.Wrap(err, "failed to get top height")
}
if len(value) == 0 {
return 0, errors.Wrap(db.ErrNotExist, "blockchain height missing")
}
return enc.MachineEndian.Uint64(value), nil
}
// getTipHash returns the blockchain tip hash
func (dao *blockDAO) getTipHash() (hash.Hash256, error) {
value, err := dao.kvstore.Get(blockNS, topHashKey)
if err != nil {
return hash.ZeroHash256, errors.Wrap(err, "failed to get tip hash")
}
return hash.BytesToHash256(value), nil
}
func (dao *blockDAO) getReceipts(blkHeight uint64) ([]*action.Receipt, error) {
kvstore, _, err := dao.getDBFromHeight(blkHeight)
if err != nil {
return nil, err
}
value, err := kvstore.Get(receiptsNS, byteutil.Uint64ToBytes(blkHeight))
if err != nil {
return nil, errors.Wrapf(err, "failed to get receipts of block %d", blkHeight)
}
if len(value) == 0 {
return nil, errors.Wrap(db.ErrNotExist, "block receipts missing")
}
receiptsPb := &iotextypes.Receipts{}
if err := proto.Unmarshal(value, receiptsPb); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal block receipts")
}
var blockReceipts []*action.Receipt
for _, receiptPb := range receiptsPb.Receipts {
receipt := &action.Receipt{}
receipt.ConvertFromReceiptPb(receiptPb)
blockReceipts = append(blockReceipts, receipt)
}
return blockReceipts, nil
}
// putBlock puts a block
func (dao *blockDAO) putBlock(blk *block.Block) error {
blkHeight := blk.Height()
h, err := dao.getBlockHash(blkHeight)
if h != hash.ZeroHash256 && err == nil {
return errors.Errorf("block %d already exist", blkHeight)
}
serHeader, err := blk.Header.Serialize()
if err != nil {
return errors.Wrap(err, "failed to serialize block header")
}
serBody, err := blk.Body.Serialize()
if err != nil {
return errors.Wrap(err, "failed to serialize block body")
}
serFooter, err := blk.Footer.Serialize()
if err != nil {
return errors.Wrap(err, "failed to serialize block footer")
}
if dao.compressBlock {
timer := dao.timerFactory.NewTimer("compress_header")
serHeader, err = compress.Compress(serHeader)
timer.End()
if err != nil {
return errors.Wrapf(err, "error when compressing a block header")
}
timer = dao.timerFactory.NewTimer("compress_body")
serBody, err = compress.Compress(serBody)
timer.End()
if err != nil {
return errors.Wrapf(err, "error when compressing a block body")
}
timer = dao.timerFactory.NewTimer("compress_footer")
serFooter, err = compress.Compress(serFooter)
timer.End()
if err != nil {
return errors.Wrapf(err, "error when compressing a block footer")
}
}
batchForBlock := db.NewBatch()
hash := blk.HashBlock()
batchForBlock.Put(blockHeaderNS, hash[:], serHeader, "failed to put block header")
batchForBlock.Put(blockBodyNS, hash[:], serBody, "failed to put block body")
batchForBlock.Put(blockFooterNS, hash[:], serFooter, "failed to put block footer")
kv, _, err := dao.getTopDB(blkHeight)
if err != nil {
return err
}
// write receipts
if blk.Receipts != nil {
receipts := iotextypes.Receipts{}
for _, r := range blk.Receipts {
receipts.Receipts = append(receipts.Receipts, r.ConvertToReceiptPb())
}
if receiptsBytes, err := proto.Marshal(&receipts); err == nil {
batchForBlock.Put(receiptsNS, byteutil.Uint64ToBytes(blkHeight), receiptsBytes, "failed to put receipts")
} else {
log.L().Error("failed to serialize receipits for block", zap.Uint64("height", blkHeight))
}
}
if err = kv.Commit(batchForBlock); err != nil {
return err
}
batch := db.NewBatch()
heightValue := byteutil.Uint64ToBytes(blkHeight)
hashKey := hashKey(hash)
batch.Put(blockHashHeightMappingNS, hashKey, heightValue, "failed to put hash -> height mapping")
heightKey := heightKey(blkHeight)
batch.Put(blockHashHeightMappingNS, heightKey, hash[:], "failed to put height -> hash mapping")
tipHeight, err := dao.kvstore.Get(blockNS, topHeightKey)
if err != nil {
return errors.Wrap(err, "failed to get top height")
}
if blkHeight > enc.MachineEndian.Uint64(tipHeight) {
batch.Put(blockNS, topHeightKey, heightValue, "failed to put top height")
batch.Put(blockNS, topHashKey, hash[:], "failed to put top hash")
}
return dao.kvstore.Commit(batch)
}
// deleteTipBlock deletes the tip block
func (dao *blockDAO) deleteTipBlock() error {
// First obtain tip height from db
height, err := dao.getTipHeight()
if err != nil {
return errors.Wrap(err, "failed to get tip height")
}
if height == 0 {
// should not delete genesis block
return errors.New("cannot delete genesis block")
}
// Obtain tip block hash
hash, err := dao.getTipHash()
if err != nil {
return errors.Wrap(err, "failed to get tip block hash")
}
batch := db.NewBatch()
batchForBlock := db.NewBatch()
whichDB, _, err := dao.getDBFromHeight(height)
if err != nil {
return err
}
// Delete hash -> block mapping
batchForBlock.Delete(blockHeaderNS, hash[:], "failed to delete block header")
if dao.headerCache != nil {
dao.headerCache.Remove(hash)
}
batchForBlock.Delete(blockBodyNS, hash[:], "failed to delete block body")
if dao.bodyCache != nil {
dao.bodyCache.Remove(hash)
}
batchForBlock.Delete(blockFooterNS, hash[:], "failed to delete block footer")
if dao.footerCache != nil {
dao.footerCache.Remove(hash)
}
// delete receipt
batchForBlock.Delete(receiptsNS, byteutil.Uint64ToBytes(height), "failed to delete receipt")
// Delete hash -> height mapping
hashKey := hashKey(hash)
batch.Delete(blockHashHeightMappingNS, hashKey, "failed to delete hash -> height mapping")
// Delete height -> hash mapping
heightKey := heightKey(height)
batch.Delete(blockHashHeightMappingNS, heightKey, "failed to delete height -> hash mapping")
// Update tip height
batch.Put(blockNS, topHeightKey, byteutil.Uint64ToBytes(height-1), "failed to put top height")
// Update tip hash
hash2, err := dao.getBlockHash(height - 1)
if err != nil {
return errors.Wrap(err, "failed to get tip block hash")
}
batch.Put(blockNS, topHashKey, hash2[:], "failed to put top hash")
if err := dao.kvstore.Commit(batch); err != nil {
return err
}
return whichDB.Commit(batchForBlock)
}
// getDBFromHash returns db of this block stored
func (dao *blockDAO) getDBFromHash(h hash.Hash256) (db.KVStore, uint64, error) {
height, err := dao.getBlockHeight(h)
if err != nil {
return nil, 0, err
}
return dao.getDBFromHeight(height)
}
func (dao *blockDAO) getTopDB(blkHeight uint64) (kvstore db.KVStore, index uint64, err error) {
if dao.cfg.SplitDBSizeMB == 0 || blkHeight <= dao.cfg.SplitDBHeight {
return dao.kvstore, 0, nil
}
topIndex := dao.topIndex.Load().(uint64)
file, dir := getFileNameAndDir(dao.cfg.DbPath)
if err != nil {
return
}
longFileName := dir + "/" + file + fmt.Sprintf("-%08d", topIndex) + ".db"
dat, err := os.Stat(longFileName)
if err != nil && os.IsNotExist(err) {
// index the height --> file index mapping
if err = dao.IndexFile(blkHeight, byteutil.Uint64ToBytesBigEndian(topIndex)); err != nil {
return
}
// db file does not exist, create it
return dao.openDB(topIndex)
}
// other errors except file does not exist
if err != nil {
return
}
// file exists,but need create new db
if uint64(dat.Size()) > dao.cfg.SplitDBSize() {
kvstore, index, err = dao.openDB(topIndex + 1)
dao.topIndex.Store(index)
// index the height --> file index mapping
err = dao.IndexFile(blkHeight, byteutil.Uint64ToBytesBigEndian(topIndex))
return
}
// db exist,need load from kvstores
kv, ok := dao.kvstores.Load(topIndex)
if ok {
kvstore, ok = kv.(db.KVStore)
if !ok {
err = errors.New("db convert error")
}
index = topIndex
return
}
// file exists,but not opened
return dao.openDB(topIndex)
}
func (dao *blockDAO) getDBFromHeight(blkHeight uint64) (kvstore db.KVStore, index uint64, err error) {
if dao.cfg.SplitDBSizeMB == 0 {
return dao.kvstore, 0, nil
}
if blkHeight <= dao.cfg.SplitDBHeight {
return dao.kvstore, 0, nil
}
// get file index
value, err := dao.GetFileIndex(blkHeight)
if err != nil {
return
}
return dao.getDBFromIndex(byteutil.BytesToUint64BigEndian(value))
}
func (dao *blockDAO) getDBFromIndex(idx uint64) (kvstore db.KVStore, index uint64, err error) {
if idx == 0 {
return dao.kvstore, 0, nil
}
kv, ok := dao.kvstores.Load(idx)
if ok {
kvstore, ok = kv.(db.KVStore)
if !ok {
err = errors.New("db convert error")
}
index = idx
return
}
// if user rm some db files manully,then call this method will create new file
return dao.openDB(idx)
}
// getBlockValue get block's data from db,if this db failed,it will try the previous one
func (dao *blockDAO) getBlockValue(blockNS string, h hash.Hash256) ([]byte, error) {
whichDB, index, err := dao.getDBFromHash(h)
if err != nil {
return nil, err
}
value, err := whichDB.Get(blockNS, h[:])
if errors.Cause(err) == db.ErrNotExist {
idx := index - 1
if index == 0 {
idx = 0
}
db, _, err := dao.getDBFromIndex(idx)
if err != nil {
return nil, err
}
value, err = db.Get(blockNS, h[:])
if err != nil {
return nil, err
}
}
return value, err
}
// openDB open file if exists, or create new file
func (dao *blockDAO) openDB(idx uint64) (kvstore db.KVStore, index uint64, err error) {
if idx == 0 {
return dao.kvstore, 0, nil
}
dao.mutex.Lock()
defer dao.mutex.Unlock()
cfg := dao.cfg
model, _ := getFileNameAndDir(cfg.DbPath)
name := model + fmt.Sprintf("-%08d", idx) + ".db"
// open or create this db file
cfg.DbPath = path.Dir(cfg.DbPath) + "/" + name
kvstore = db.NewBoltDB(cfg)
dao.kvstores.Store(idx, kvstore)
err = kvstore.Start(context.Background())
if err != nil {
return
}
dao.lifecycle.Add(kvstore)
index = idx
return
}
func getFileNameAndDir(p string) (fileName, dir string) {
var withSuffix, suffix string
withSuffix = path.Base(p)
suffix = path.Ext(withSuffix)
fileName = strings.TrimSuffix(withSuffix, suffix)
dir = path.Dir(p)
return
}
func hashKey(h hash.Hash256) []byte {
return append(hashPrefix, h[:]...)
}
func heightKey(height uint64) []byte {
return append(heightPrefix, byteutil.Uint64ToBytes(height)...)
}
| 1 | 20,358 | sloppyReassign: re-assignment to `err` can be replaced with `err := kv.WriteBatch(batchForBlock)` (from `gocritic`) | iotexproject-iotex-core | go |
@@ -123,7 +123,11 @@ public class VertxServerResponseToHttpServletResponse extends AbstractHttpServle
@Override
public CompletableFuture<Void> sendPart(Part part) {
DownloadUtils.prepareDownloadHeader(this, part);
-
+ if (part == null) {
+ CompletableFuture<Void> future = new CompletableFuture<>();
+ future.complete(null);
+ return future;
+ }
return new PumpFromPart(context, part).toWriteStream(serverResponse, null);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.foundation.vertx.http;
import java.util.Collection;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import javax.servlet.http.Part;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response.StatusType;
import org.apache.servicecomb.foundation.common.http.HttpStatus;
import org.apache.servicecomb.foundation.vertx.stream.PumpFromPart;
import io.vertx.core.Context;
import io.vertx.core.Vertx;
import io.vertx.core.http.HttpServerResponse;
public class VertxServerResponseToHttpServletResponse extends AbstractHttpServletResponse {
private Context context;
private HttpServerResponse serverResponse;
private StatusType statusType;
public VertxServerResponseToHttpServletResponse(HttpServerResponse serverResponse) {
this.context = Vertx.currentContext();
this.serverResponse = serverResponse;
Objects.requireNonNull(context, "must run in vertx context.");
}
@Override
public void setContentType(String type) {
serverResponse.headers().set(HttpHeaders.CONTENT_TYPE, type);
}
@Override
@Deprecated
public void setStatus(int sc, String sm) {
serverResponse.setStatusCode(sc);
serverResponse.setStatusMessage(sm);
}
@Override
public StatusType getStatusType() {
if (statusType == null) {
statusType = new HttpStatus(serverResponse.getStatusCode(), serverResponse.getStatusMessage());
}
return statusType;
}
@Override
public void addHeader(String name, String value) {
serverResponse.headers().add(name, value);
}
@Override
public void setHeader(String name, String value) {
serverResponse.headers().set(name, value);
}
@Override
public int getStatus() {
return serverResponse.getStatusCode();
}
@Override
public String getContentType() {
return serverResponse.headers().get(HttpHeaders.CONTENT_TYPE);
}
@Override
public String getHeader(String name) {
return serverResponse.headers().get(name);
}
@Override
public Collection<String> getHeaders(String name) {
return serverResponse.headers().getAll(name);
}
@Override
public Collection<String> getHeaderNames() {
return serverResponse.headers().names();
}
@Override
public void flushBuffer() {
if (context == Vertx.currentContext()) {
internalFlushBuffer();
return;
}
context.runOnContext(V -> internalFlushBuffer());
}
public void internalFlushBuffer() {
if (bodyBuffer == null) {
serverResponse.end();
return;
}
serverResponse.end(bodyBuffer);
}
@Override
public CompletableFuture<Void> sendPart(Part part) {
DownloadUtils.prepareDownloadHeader(this, part);
return new PumpFromPart(context, part).toWriteStream(serverResponse, null);
}
@Override
public void setChunked(boolean chunked) {
serverResponse.setChunked(chunked);
}
}
| 1 | 12,435 | using factory method. return CompletableFuture.completedFuture | apache-servicecomb-java-chassis | java |
@@ -31,7 +31,7 @@ storiesOf( 'Global', module )
title={ __( 'Top content over the last 28 days', 'google-site-kit' ) }
headerCtaLink="https://analytics.google.com"
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
- footerCtaLabel={ __( 'Analytics', 'google-site-kit' ) }
+ footerCtaLabel={ _x( 'Analytics', 'Service name', 'google-site-kit' ) }
footerCtaLink="https://analytics.google.com"
>
<AnalyticsDashboardWidgetTopPagesTable /> | 1 | /**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
import Layout from 'GoogleComponents/layout/layout';
import AnalyticsDashboardWidgetTopPagesTable from 'GoogleModules/analytics/dashboard/dashboard-widget-top-pages-table.js';
/**
* Internal dependencies
*/
import { googlesitekit as analyticsDashboardData } from '../.storybook/data/wp-admin-admin.php-page=googlesitekit-module-analytics-googlesitekit';
storiesOf( 'Global', module )
.add( 'Data Table', () => {
window.googlesitekit = analyticsDashboardData;
// Load the datacache with data.
setTimeout( () => {
wp.hooks.doAction(
'googlesitekit.moduleLoaded',
'Dashboard'
);
}, 250 );
return (
<Layout
header
footer
title={ __( 'Top content over the last 28 days', 'google-site-kit' ) }
headerCtaLink="https://analytics.google.com"
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
footerCtaLabel={ __( 'Analytics', 'google-site-kit' ) }
footerCtaLink="https://analytics.google.com"
>
<AnalyticsDashboardWidgetTopPagesTable />
</Layout>
);
}, {
options: {
readySelector: '.googlesitekit-table-overflow',
delay: 2000, // Wait for table overflow to animate.
},
} );
| 1 | 25,369 | The `_x` function needs to be imported at the top of the file (in addition to `__`) | google-site-kit-wp | js |
@@ -19,7 +19,7 @@ type ofFlow struct {
// The Flow.Table field can be updated by Reset(), which can be called by
// ReplayFlows() when replaying the Flow to OVS. For thread safety, any access
// to Flow.Table should hold the replayMutex read lock.
- ofctrl.Flow
+ *ofctrl.Flow
// matchers is string slice, it is used to generate a readable match string of the Flow.
matchers []string | 1 | package openflow
import (
"fmt"
"strings"
"github.com/contiv/libOpenflow/openflow13"
"github.com/contiv/ofnet/ofctrl"
)
type FlowStates struct {
TableID uint8
PacketCount uint64
DurationNSecond uint32
}
type ofFlow struct {
table *ofTable
// The Flow.Table field can be updated by Reset(), which can be called by
// ReplayFlows() when replaying the Flow to OVS. For thread safety, any access
// to Flow.Table should hold the replayMutex read lock.
ofctrl.Flow
// matchers is string slice, it is used to generate a readable match string of the Flow.
matchers []string
// protocol adds a readable protocol type in the match string of ofFlow.
protocol Protocol
// ctStateString is a temporary variable for the readable ct_state configuration. Its value is changed when the client
// updates the matching condition of "ct_states". When FlowBuilder.Done is called, its value is added into the matchers.
ctStateString string
// ctStates is a temporary variable to maintain openflow13.CTStates. When FlowBuilder.Done is called, it is used to
// set the CtStates field in ofctrl.Flow.Match.
ctStates *openflow13.CTStates
}
// Reset updates the ofFlow.Flow.Table field with ofFlow.table.Table.
// In the case of reconnecting to OVS, the ofnet library creates new OFTable
// objects. Reset() can be called to reset ofFlow.Flow.Table to the right value,
// before replaying the Flow to OVS.
func (f *ofFlow) Reset() {
f.Flow.Table = f.table.Table
}
func (f *ofFlow) Add() error {
err := f.Flow.Send(openflow13.FC_ADD)
if err != nil {
return err
}
f.table.UpdateStatus(1)
return nil
}
func (f *ofFlow) Modify() error {
err := f.Flow.Send(openflow13.FC_MODIFY_STRICT)
if err != nil {
return err
}
f.table.UpdateStatus(0)
return nil
}
func (f *ofFlow) Delete() error {
f.Flow.UpdateInstallStatus(true)
err := f.Flow.Send(openflow13.FC_DELETE_STRICT)
if err != nil {
return err
}
f.table.UpdateStatus(-1)
return nil
}
func (f *ofFlow) Type() EntryType {
return FlowEntry
}
func (f *ofFlow) KeyString() string {
return f.MatchString()
}
func (f *ofFlow) MatchString() string {
repr := fmt.Sprintf("table=%d", f.table.GetID())
if f.protocol != "" {
repr = fmt.Sprintf("%s,%s", repr, f.protocol)
}
if len(f.matchers) > 0 {
repr += fmt.Sprintf(",%s", strings.Join(f.matchers, ","))
}
return repr
}
func (f *ofFlow) FlowPriority() uint16 {
return f.Match.Priority
}
func (f *ofFlow) GetBundleMessage(entryOper OFOperation) (ofctrl.OpenFlowModMessage, error) {
var operation int
switch entryOper {
case AddMessage:
operation = openflow13.FC_ADD
case ModifyMessage:
operation = openflow13.FC_MODIFY_STRICT
case DeleteMessage:
operation = openflow13.FC_DELETE_STRICT
}
message, err := f.Flow.GetBundleMessage(operation)
if err != nil {
return nil, err
}
return message, nil
}
// CopyToBuilder returns a new FlowBuilder that copies the table, protocols,
// matches, and CookieID of the Flow, but does not copy the actions,
// and other private status fields of the ofctrl.Flow, e.g. "realized" and
// "isInstalled". Reset the priority in the new FlowBuilder if it is provided.
func (f *ofFlow) CopyToBuilder(priority uint16) FlowBuilder {
newFlow := ofFlow{
table: f.table,
Flow: ofctrl.Flow{
Table: f.Flow.Table,
CookieID: f.Flow.CookieID,
CookieMask: f.Flow.CookieMask,
Match: f.Flow.Match,
},
matchers: f.matchers,
protocol: f.protocol,
}
if priority > 0 {
newFlow.Flow.Match.Priority = priority
}
return &ofFlowBuilder{newFlow}
}
// ToBuilder returns a new FlowBuilder with all the contents of the original Flow
func (f *ofFlow) ToBuilder() FlowBuilder {
newFlow := ofFlow{
table: f.table,
Flow: f.Flow,
matchers: f.matchers,
protocol: f.protocol,
}
return &ofFlowBuilder{newFlow}
}
func (r *Range) ToNXRange() *openflow13.NXRange {
return openflow13.NewNXRange(int(r[0]), int(r[1]))
}
func (r *Range) Length() uint32 {
return r[1] - r[0] + 1
}
| 1 | 22,096 | this looks like the only controversial change potentially. @MatthewHinton56 Could let us know why you made this change (which `go vet` warning was triggered)? @wenyingd could you take a look and let us know if you're okay with this change? | antrea-io-antrea | go |
@@ -26,7 +26,7 @@ using pwiz.Skyline.Util;
namespace pwiz.Skyline.Controls.Graphs
{
- public sealed partial class AreaCVToolbar : GraphSummaryToolbar //UserControl // for designer
+ public sealed partial class AreaCVToolbar : GraphSummaryToolbar //GraphSummaryToolbar UserControl // for designer
{
private readonly Timer _timer;
private int _standardTypeCount; | 1 | /*
* Original author: Tobias Rohde <tobiasr .at. uw.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2017 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Windows.Forms;
using pwiz.Skyline.Model;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
namespace pwiz.Skyline.Controls.Graphs
{
public sealed partial class AreaCVToolbar : GraphSummaryToolbar //UserControl // for designer
{
private readonly Timer _timer;
private int _standardTypeCount;
public AreaCVToolbar(GraphSummary graphSummary) :
base(graphSummary)
{
InitializeComponent();
toolStripNumericDetections.NumericUpDownControl.ValueChanged += NumericUpDownControl_ValueChanged;
toolStripComboGroup.SelectedIndexChanged += toolStripComboGroup_SelectedIndexChanged;
_timer = new Timer
{
Interval = 100
};
_timer.Tick += timer_Tick;
}
protected override void OnHandleDestroyed(EventArgs e)
{
_timer.Stop();
_timer.Tick -= timer_Tick;
base.OnHandleDestroyed(e);
}
void timer_Tick(object sender, EventArgs e)
{
_timer.Stop();
_graphSummary.UpdateUIWithoutToolbar();
}
private void toolStripComboGroup_SelectedIndexChanged(object sender, EventArgs e)
{
SetGroupIndex(toolStripComboGroup.SelectedIndex);
}
public void SetGroupIndex(int index)
{
if (index == 0)
{
Program.MainWindow.SetAreaCVAnnotation(null, false);
var results = _graphSummary.DocumentUIContainer.DocumentUI.MeasuredResults;
toolStripNumericDetections.NumericUpDownControl.Maximum =
results != null ? results.Chromatograms.Count : 0;
}
else
{
Program.MainWindow.SetAreaCVAnnotation(toolStripComboGroup.Items[index], false);
var document = _graphSummary.DocumentUIContainer.DocumentUI;
var groupByGroup =
ReplicateValue.FromPersistedString(document.Settings, AreaGraphController.GroupByGroup);
toolStripNumericDetections.NumericUpDownControl.Maximum = AnnotationHelper
.GetReplicateIndices(document, groupByGroup, AreaGraphController.GroupByAnnotation).Length;
}
if (IsCurrentDataCached())
{
_graphSummary.UpdateUIWithoutToolbar();
return;
}
_timer.Stop();
_timer.Start();
}
private void toolStripComboNormalizedTo_SelectedIndexChanged(object sender, EventArgs e)
{
SetNormalizationIndex(toolStripComboNormalizedTo.SelectedIndex);
}
public void SetNormalizationIndex(int index)
{
// TODO(Tobi): Fix this. It causes an error loading a saved layout with an Area CV graph and normalization meth
if (Program.MainWindow == null)
return;
if (index < _standardTypeCount)
{
Program.MainWindow.SetNormalizationMethod(AreaCVNormalizationMethod.ratio, index, false);
}
else
{
index -= _standardTypeCount;
if (!_graphSummary.DocumentUIContainer.DocumentUI.Settings.HasGlobalStandardArea)
++index;
var normalizationMethod = AreaCVNormalizationMethod.none;
switch (index)
{
case 0:
normalizationMethod =
_graphSummary.DocumentUIContainer.Document.Settings.HasGlobalStandardArea
? AreaCVNormalizationMethod.global_standards
: AreaCVNormalizationMethod.medians;
break;
case 1:
normalizationMethod = AreaCVNormalizationMethod.medians;
break;
case 2:
normalizationMethod = AreaCVNormalizationMethod.none;
break;
}
Program.MainWindow.SetNormalizationMethod(normalizationMethod, -1, false);
}
if (IsCurrentDataCached())
{
_graphSummary.UpdateUIWithoutToolbar();
return;
}
_timer.Stop();
_timer.Start();
}
private void NumericUpDownControl_ValueChanged(object sender, EventArgs e)
{
SetMinimumDetections((int)toolStripNumericDetections.NumericUpDownControl.Value);
}
public void SetMinimumDetections(int min)
{
AreaGraphController.MinimumDetections = min;
if (IsCurrentDataCached())
{
_graphSummary.UpdateUIWithoutToolbar();
return;
}
_timer.Stop();
_timer.Start();
}
private bool IsCurrentDataCached()
{
var info = _graphSummary.GraphPanes.FirstOrDefault() as IAreaCVHistogramInfo;
if (info == null)
return false;
var document = _graphSummary.DocumentUIContainer.DocumentUI;
return info.Cache.IsValidFor(document, new AreaCVGraphData.AreaCVGraphSettings(document.Settings, _graphSummary.Type)) &&
info.Cache.Get(ReplicateValue.FromPersistedString(document.Settings, AreaGraphController.GroupByGroup),
AreaGraphController.GroupByAnnotation,
AreaGraphController.MinimumDetections,
AreaGraphController.NormalizationMethod,
AreaGraphController.AreaCVRatioIndex) != null;
}
private void toolStripProperties_Click(object sender, EventArgs e)
{
using (var dlgProperties = new AreaCVToolbarProperties(_graphSummary))
{
if (dlgProperties.ShowDialog(FormEx.GetParentForm(this)) == DialogResult.OK)
_graphSummary.UpdateUI();
}
}
public override bool Visible
{
get { return true; }
}
public override void OnDocumentChanged(SrmDocument oldDocument, SrmDocument newDocument)
{
}
public override void UpdateUI()
{
var document = _graphSummary.DocumentUIContainer.DocumentUI;
if (!document.Settings.HasResults)
return;
var groupsVisible = AreaGraphController.GroupByGroup != null;
toolStripLabel1.Visible = toolStripComboGroup.Visible = groupsVisible;
var detectionsVisiblePrev = toolStripLabel2.Visible && toolStripNumericDetections.Visible && toolStripLabel3.Visible;
var detectionsVisible = AreaGraphController.ShouldUseQValues(document);
toolStripLabel2.Visible = toolStripNumericDetections.Visible = toolStripLabel3.Visible = detectionsVisible;
if (detectionsVisible)
{
toolStripNumericDetections.NumericUpDownControl.Minimum = 2;
if (AreaGraphController.GroupByGroup == null || AreaGraphController.GroupByAnnotation == null)
toolStripNumericDetections.NumericUpDownControl.Maximum = document.MeasuredResults.Chromatograms.Count;
else
toolStripNumericDetections.NumericUpDownControl.Maximum = AnnotationHelper.GetReplicateIndices(document, ReplicateValue.FromPersistedString(document.Settings, AreaGraphController.GroupByGroup), AreaGraphController.GroupByAnnotation).Length;
if (!detectionsVisiblePrev)
toolStripNumericDetections.NumericUpDownControl.Value = 2;
}
if (groupsVisible)
{
var annotations = new[] {Resources.GraphSummary_UpdateToolbar_All}.Concat(
AnnotationHelper.GetPossibleAnnotations(document,
ReplicateValue.FromPersistedString(document.Settings, AreaGraphController.GroupByGroup))
.Except(new object[] {null})).ToArray();
toolStripComboGroup.Items.Clear();
// ReSharper disable once CoVariantArrayConversion
toolStripComboGroup.Items.AddRange(annotations);
if (AreaGraphController.GroupByAnnotation != null)
toolStripComboGroup.SelectedItem = AreaGraphController.GroupByAnnotation;
else
toolStripComboGroup.SelectedIndex = 0;
}
var mods = _graphSummary.DocumentUIContainer.DocumentUI.Settings.PeptideSettings.Modifications;
var standardTypes = mods.RatioInternalStandardTypes;
toolStripComboNormalizedTo.Items.Clear();
_standardTypeCount = 0;
if (mods.HasHeavyModifications)
{
// ReSharper disable once CoVariantArrayConversion
toolStripComboNormalizedTo.Items.AddRange(standardTypes.Select(s => s.Title).ToArray());
_standardTypeCount = standardTypes.Count;
}
var hasGlobalStandard = _graphSummary.DocumentUIContainer.DocumentUI.Settings.HasGlobalStandardArea;
if (hasGlobalStandard)
toolStripComboNormalizedTo.Items.Add(Resources.AreaCVToolbar_UpdateUI_Global_standards);
toolStripComboNormalizedTo.Items.Add(Resources.AreaCVToolbar_UpdateUI_Medians);
toolStripComboNormalizedTo.Items.Add(Resources.AreaCVToolbar_UpdateUI_None);
if (AreaGraphController.NormalizationMethod == AreaCVNormalizationMethod.ratio)
toolStripComboNormalizedTo.SelectedIndex = AreaGraphController.AreaCVRatioIndex;
else
{
var index = _standardTypeCount + (int) AreaGraphController.NormalizationMethod;
if (!hasGlobalStandard)
--index;
toolStripComboNormalizedTo.SelectedIndex = index;
}
}
#region Functional Test Support
public int MinDetections { get { return (int) toolStripNumericDetections.NumericUpDownControl.Minimum; } }
public int Detections { get { return (int) toolStripNumericDetections.NumericUpDownControl.Value; } }
public int MaxDetections { get { return (int) toolStripNumericDetections.NumericUpDownControl.Maximum; } }
public bool DetectionsVisible { get { return toolStripLabel2.Visible && toolStripNumericDetections.Visible && toolStripLabel3.Visible; } }
public bool GroupsVisible { get { return toolStripLabel1.Visible && toolStripComboGroup.Visible; } }
public IEnumerable<object> Annotations
{
get { return toolStripComboGroup.Items.Cast<object>(); }
set
{
toolStripComboGroup.Items.Clear();
// ReSharper disable once CoVariantArrayConversion
toolStripComboGroup.Items.AddRange(value.ToArray());
}
}
public IEnumerable<string> NormalizationMethods
{
get { return toolStripComboNormalizedTo.Items.Cast<string>(); }
set
{
toolStripComboNormalizedTo.Items.Clear();
// ReSharper disable once CoVariantArrayConversion
toolStripComboNormalizedTo.Items.AddRange(value.ToArray());
}
}
#endregion
}
} | 1 | 13,588 | This seems like left-over debugging code. I think I fixed this to no longer be necessary, switching to UserControl for the designer. | ProteoWizard-pwiz | .cs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.