file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
indexable.py | the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.core.indexable nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .nameable import Nameable
from .updateable import Updateable
from ..transformations import __fixed__
from operator import delitem
from functools import reduce
from collections import OrderedDict
class Indexable(Nameable, Updateable):
"""
Make an object constrainable with Priors and Transformations.
TODO: Mappings!! (As in ties etc.)
Adding a constraint to a Parameter means to tell the highest parent that
the constraint was added and making sure that all parameters covered
by this object are indeed conforming to the constraint.
:func:`constrain()` and :func:`unconstrain()` are main methods here
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(Indexable, self).__init__(name=name, *a, **kw)
self._index_operations = OrderedDict()
def __setstate__(self, state):
super(Indexable, self).__setstate__(state)
for name in self._index_operations:
self._add_io(name, self._index_operations[name])
#@property
#def _index_operations(self):
# try:
# return self._index_operations_dict
# except AttributeError:
# self._index_operations_dict = OrderedDict()
# return self._index_operations_dict
#@_index_operations.setter
#def _index_operations(self, io):
# self._index_operations_dict = io
def add_index_operation(self, name, operations):
"""
Add index operation with name to the operations given.
raises: attribute error if operations exist.
"""
if name not in self._index_operations:
|
else:
raise AttributeError("An index operation with the name {} was already taken".format(name))
def _add_io(self, name, operations):
self._index_operations[name] = operations
def do_raise(self, x):
self._index_operations.__setitem__(name, x)
self._connect_fixes()
self._notify_parent_change()
#raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name))
setattr(Indexable, name, property(fget=lambda self: self._index_operations[name],
fset=do_raise))
def remove_index_operation(self, name):
if name in self._index_operations:
delitem(self._index_operations, name)
#delattr(self, name)
else:
raise AttributeError("No index operation with the name {}".format(name))
def _disconnect_parent(self, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
#===========================================================================
# Indexable
#===========================================================================
def _offset_for(self, param):
"""
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
"""
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0
### Global index operations (from highest_parent)
### These indices are for gradchecking, so that we
### can index the optimizer array and manipulate it directly
### The indices here do not reflect the indices in
### index_operations, as index operations handle
### the offset themselves and can be set directly
### without doing the offset.
def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
def _raveled_index_for_transformed(self, param):
"""
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
"""
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi
### These indices are just the raveled index for self
### These are in the index_operations are used for them
### The index_operations then handle the offsets themselves
### This makes it easier to test and handle indices
### as the index operations framework is in its own
### corner and can be set significantly better without
### being inside the parameterized scope.
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
return np.r_[:self.size]
######
#===========================================================================
# Tie parameters together
# TODO: create own class for tieing and remapping
#===========================================================================
# def _has_ties(self):
# if self._highest_parent_.tie.tied_param is None:
# return False
# if self.has_parent():
# return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0
# return True
#
# def tie_together(self):
# self._highest_parent_.tie.add_tied_parameter(self)
# self._highest_parent_._set_fixed(self,self._raveled_index())
# self._trigger_params_changed()
#===============================================================================
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
"""
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self | self._add_io(name, operations) | conditional_block |
indexable.py | retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.core.indexable nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .nameable import Nameable
from .updateable import Updateable
from ..transformations import __fixed__
from operator import delitem
from functools import reduce
from collections import OrderedDict
class Indexable(Nameable, Updateable):
"""
Make an object constrainable with Priors and Transformations.
TODO: Mappings!! (As in ties etc.)
Adding a constraint to a Parameter means to tell the highest parent that
the constraint was added and making sure that all parameters covered
by this object are indeed conforming to the constraint.
:func:`constrain()` and :func:`unconstrain()` are main methods here
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(Indexable, self).__init__(name=name, *a, **kw)
self._index_operations = OrderedDict()
def __setstate__(self, state):
super(Indexable, self).__setstate__(state)
for name in self._index_operations:
self._add_io(name, self._index_operations[name])
#@property
#def _index_operations(self):
# try:
# return self._index_operations_dict
# except AttributeError:
# self._index_operations_dict = OrderedDict()
# return self._index_operations_dict
#@_index_operations.setter
#def _index_operations(self, io):
# self._index_operations_dict = io
def add_index_operation(self, name, operations):
"""
Add index operation with name to the operations given.
raises: attribute error if operations exist.
"""
if name not in self._index_operations:
self._add_io(name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name))
def _add_io(self, name, operations):
self._index_operations[name] = operations
def do_raise(self, x):
self._index_operations.__setitem__(name, x)
self._connect_fixes()
self._notify_parent_change()
#raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name))
setattr(Indexable, name, property(fget=lambda self: self._index_operations[name],
fset=do_raise))
def remove_index_operation(self, name):
if name in self._index_operations:
delitem(self._index_operations, name)
#delattr(self, name)
else:
raise AttributeError("No index operation with the name {}".format(name))
def _disconnect_parent(self, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
#===========================================================================
# Indexable
#=========================================================================== | """
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0
### Global index operations (from highest_parent)
### These indices are for gradchecking, so that we
### can index the optimizer array and manipulate it directly
### The indices here do not reflect the indices in
### index_operations, as index operations handle
### the offset themselves and can be set directly
### without doing the offset.
def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
def _raveled_index_for_transformed(self, param):
"""
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
"""
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi
### These indices are just the raveled index for self
### These are in the index_operations are used for them
### The index_operations then handle the offsets themselves
### This makes it easier to test and handle indices
### as the index operations framework is in its own
### corner and can be set significantly better without
### being inside the parameterized scope.
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
return np.r_[:self.size]
######
#===========================================================================
# Tie parameters together
# TODO: create own class for tieing and remapping
#===========================================================================
# def _has_ties(self):
# if self._highest_parent_.tie.tied_param is None:
# return False
# if self.has_parent():
# return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0
# return True
#
# def tie_together(self):
# self._highest_parent_.tie.add_tied_parameter(self)
# self._highest_parent_._set_fixed(self,self._raveled_index())
# self._trigger_params_changed()
#===============================================================================
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
"""
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self | def _offset_for(self, param):
"""
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param. | random_line_split |
indexable.py | retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.core.indexable nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .nameable import Nameable
from .updateable import Updateable
from ..transformations import __fixed__
from operator import delitem
from functools import reduce
from collections import OrderedDict
class Indexable(Nameable, Updateable):
"""
Make an object constrainable with Priors and Transformations.
TODO: Mappings!! (As in ties etc.)
Adding a constraint to a Parameter means to tell the highest parent that
the constraint was added and making sure that all parameters covered
by this object are indeed conforming to the constraint.
:func:`constrain()` and :func:`unconstrain()` are main methods here
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(Indexable, self).__init__(name=name, *a, **kw)
self._index_operations = OrderedDict()
def __setstate__(self, state):
super(Indexable, self).__setstate__(state)
for name in self._index_operations:
self._add_io(name, self._index_operations[name])
#@property
#def _index_operations(self):
# try:
# return self._index_operations_dict
# except AttributeError:
# self._index_operations_dict = OrderedDict()
# return self._index_operations_dict
#@_index_operations.setter
#def _index_operations(self, io):
# self._index_operations_dict = io
def add_index_operation(self, name, operations):
"""
Add index operation with name to the operations given.
raises: attribute error if operations exist.
"""
if name not in self._index_operations:
self._add_io(name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name))
def _add_io(self, name, operations):
self._index_operations[name] = operations
def do_raise(self, x):
self._index_operations.__setitem__(name, x)
self._connect_fixes()
self._notify_parent_change()
#raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name))
setattr(Indexable, name, property(fget=lambda self: self._index_operations[name],
fset=do_raise))
def remove_index_operation(self, name):
if name in self._index_operations:
delitem(self._index_operations, name)
#delattr(self, name)
else:
raise AttributeError("No index operation with the name {}".format(name))
def _disconnect_parent(self, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
#===========================================================================
# Indexable
#===========================================================================
def _offset_for(self, param):
|
### Global index operations (from highest_parent)
### These indices are for gradchecking, so that we
### can index the optimizer array and manipulate it directly
### The indices here do not reflect the indices in
### index_operations, as index operations handle
### the offset themselves and can be set directly
### without doing the offset.
def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
def _raveled_index_for_transformed(self, param):
"""
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
"""
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi
### These indices are just the raveled index for self
### These are in the index_operations are used for them
### The index_operations then handle the offsets themselves
### This makes it easier to test and handle indices
### as the index operations framework is in its own
### corner and can be set significantly better without
### being inside the parameterized scope.
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
return np.r_[:self.size]
######
#===========================================================================
# Tie parameters together
# TODO: create own class for tieing and remapping
#===========================================================================
# def _has_ties(self):
# if self._highest_parent_.tie.tied_param is None:
# return False
# if self.has_parent():
# return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0
# return True
#
# def tie_together(self):
# self._highest_parent_.tie.add_tied_parameter(self)
# self._highest_parent_._set_fixed(self,self._raveled_index())
# self._trigger_params_changed()
#===============================================================================
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
"""
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self | """
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
"""
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0 | identifier_body |
indexable.py | the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.core.indexable nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .nameable import Nameable
from .updateable import Updateable
from ..transformations import __fixed__
from operator import delitem
from functools import reduce
from collections import OrderedDict
class Indexable(Nameable, Updateable):
"""
Make an object constrainable with Priors and Transformations.
TODO: Mappings!! (As in ties etc.)
Adding a constraint to a Parameter means to tell the highest parent that
the constraint was added and making sure that all parameters covered
by this object are indeed conforming to the constraint.
:func:`constrain()` and :func:`unconstrain()` are main methods here
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(Indexable, self).__init__(name=name, *a, **kw)
self._index_operations = OrderedDict()
def __setstate__(self, state):
super(Indexable, self).__setstate__(state)
for name in self._index_operations:
self._add_io(name, self._index_operations[name])
#@property
#def _index_operations(self):
# try:
# return self._index_operations_dict
# except AttributeError:
# self._index_operations_dict = OrderedDict()
# return self._index_operations_dict
#@_index_operations.setter
#def _index_operations(self, io):
# self._index_operations_dict = io
def add_index_operation(self, name, operations):
"""
Add index operation with name to the operations given.
raises: attribute error if operations exist.
"""
if name not in self._index_operations:
self._add_io(name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name))
def _add_io(self, name, operations):
self._index_operations[name] = operations
def do_raise(self, x):
self._index_operations.__setitem__(name, x)
self._connect_fixes()
self._notify_parent_change()
#raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name))
setattr(Indexable, name, property(fget=lambda self: self._index_operations[name],
fset=do_raise))
def | (self, name):
if name in self._index_operations:
delitem(self._index_operations, name)
#delattr(self, name)
else:
raise AttributeError("No index operation with the name {}".format(name))
def _disconnect_parent(self, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
#===========================================================================
# Indexable
#===========================================================================
def _offset_for(self, param):
"""
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
"""
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0
### Global index operations (from highest_parent)
### These indices are for gradchecking, so that we
### can index the optimizer array and manipulate it directly
### The indices here do not reflect the indices in
### index_operations, as index operations handle
### the offset themselves and can be set directly
### without doing the offset.
def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
def _raveled_index_for_transformed(self, param):
"""
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
"""
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi
### These indices are just the raveled index for self
### These are in the index_operations are used for them
### The index_operations then handle the offsets themselves
### This makes it easier to test and handle indices
### as the index operations framework is in its own
### corner and can be set significantly better without
### being inside the parameterized scope.
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
return np.r_[:self.size]
######
#===========================================================================
# Tie parameters together
# TODO: create own class for tieing and remapping
#===========================================================================
# def _has_ties(self):
# if self._highest_parent_.tie.tied_param is None:
# return False
# if self.has_parent():
# return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0
# return True
#
# def tie_together(self):
# self._highest_parent_.tie.add_tied_parameter(self)
# self._highest_parent_._set_fixed(self,self._raveled_index())
# self._trigger_params_changed()
#===============================================================================
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
"""
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self | remove_index_operation | identifier_name |
parser.go | [2]
b.Variable = name
}
if k == bTest {
// Parse operator.
b.Operator = strings.TrimSpace(parts[1])
// Parse value. Can use a variable.
b.Value = strings.TrimSpace(parts[2])
// Parse offset.
offset, err := strconv.Atoi(strings.TrimSpace(parts[3]))
if err != nil {
return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err)
}
b.Offset = offset
}
// The rest of the options, for all types not b64decode
for i, l := b.Kind.minLen(), len(parts); i < l; i++ {
parts[i] = strings.TrimSpace(parts[i])
b.Options = append(b.Options, parts[i])
}
return b, nil
}
// parseFlowbit parses a flowbit.
func parseFlowbit(s string) (*Flowbit, error) {
parts := strings.Split(s, ",")
if len(parts) < 1 {
return nil, fmt.Errorf("couldn't parse flowbit string: %s", s)
}
// Ensure all actions are of valid type.
a := strings.TrimSpace(parts[0])
if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) {
return nil, fmt.Errorf("invalid action for flowbit: %s", a)
}
fb := &Flowbit{
Action: a,
}
if fb.Action == "noalert" && len(parts) > 1 {
return nil, fmt.Errorf("noalert shouldn't have a value")
}
if len(parts) == 2 {
fb.Value = strings.TrimSpace(parts[1])
}
return fb, nil
}
// parseXbit parses an xbit.
func parseXbit(s string) (*Xbit, error) {
parts := strings.Split(s, ",")
// All xbits must have an action, name and track
if len(parts) < 3 {
return nil, fmt.Errorf("not enough parts for xbits: %s", s)
}
// Ensure all actions are of valid type.
a := strings.TrimSpace(parts[0])
if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) {
return nil, fmt.Errorf("invalid action for xbits: %s", a)
}
xb := &Xbit{
Action: a,
Name: strings.TrimSpace(parts[1]),
}
// Track.
t := strings.Fields(parts[2])
if len(t) != 2 {
return nil, fmt.Errorf("wrong number of parts for track: %v", t)
}
if t[0] != "track" {
return nil, fmt.Errorf("%s should be 'track'", t[0])
}
xb.Track = t[1]
// Expire
if len(parts) == 4 {
e := strings.Fields(parts[3])
if len(e) != 2 {
return nil, fmt.Errorf("wrong number of parts for expire: %v", e)
}
if e[0] != "expire" {
return nil, fmt.Errorf("%s should be 'expire'", e[0])
}
xb.Expire = e[1]
}
return xb, nil
}
// parseFlowint parses a flowint.
func parseFlowint(s string) (*Flowint, error) {
parts := strings.Split(s, ",")
// All flowints must have a name and modifier
if len(parts) < 2 {
return nil, fmt.Errorf("not enough parts for flowint: %s", s)
}
// Ensure all actions are of valid type.
m := strings.TrimSpace(parts[1])
if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) {
return nil, fmt.Errorf("invalid modifier for flowint: %s", m)
}
fi := &Flowint{
Name: strings.TrimSpace(parts[0]),
Modifier: m,
}
if len(parts) == 3 {
fi.Value = strings.TrimSpace(parts[2])
}
return fi, nil
}
// containsUnescaped checks content whether special characters are properly escaped.
func containsUnescaped(s string) bool {
esc := false
for _, b := range s {
if esc {
switch b {
case '\\', ';', '"', ':':
esc = false
default:
return true
}
} else {
switch b {
case '\\':
esc = true
case ';', '"':
return true
}
}
}
return esc
}
func unquote(s string) string {
if strings.IndexByte(s, '"') < 0 {
return s
}
return strings.Replace(s, `\"`, `"`, -1)
}
func inSlice(str string, strings []string) bool {
for _, k := range strings {
if str == k {
return true
}
}
return false
}
// comment decodes a comment (commented rule, or just a comment.)
func (r *Rule) comment(key item, l *lexer) error {
if key.typ != itemComment {
panic("item is not a comment")
}
if r.Disabled {
// ignoring comment for rule with empty action
return nil
}
rule, err := parseRuleAux(key.value, true)
// If there was an error this means the comment is not a rule.
if err != nil {
return fmt.Errorf("this is not a rule: %s", err)
}
// We parsed a rule, this was a comment so set the rule to disabled.
rule.Disabled = true
// Overwrite the rule we're working on with the recently parsed, disabled rule.
*r = *rule
return nil
}
// action decodes an IDS rule option based on its key.
func (r *Rule) action(key item, l *lexer) error {
if key.typ != itemAction {
panic("item is not an action")
}
if !inSlice(key.value, []string{"alert", "drop", "pass"}) {
return fmt.Errorf("invalid action: %v", key.value)
}
r.Action = key.value
return nil
}
// protocol decodes an IDS rule protocol based on its key.
func (r *Rule) protocol(key item, l *lexer) error {
if key.typ != itemProtocol {
panic("item is not a protocol")
}
if !inSlice(key.value, appLayerProtocols) {
return fmt.Errorf("invalid protocol: %v", key.value)
}
r.Protocol = key.value
return nil
}
// network decodes an IDS rule network (networks and ports) based on its key.
func (r *Rule) network(key item, l *lexer) error {
// This is a hack. We use a regexp to replace the outer `,` with `___`
// to give us a discrete string to split on, avoiding the inner `,`.
// Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed.
tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]")
items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___")
// Validate that no items contain spaces.
for _, i := range items {
if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) {
return fmt.Errorf("network component contains spaces: %v", i)
}
}
switch key.typ {
case itemSourceAddress:
if validNetworks(items) {
r.Source.Nets = append(r.Source.Nets, items...)
} else {
return fmt.Errorf("some or all source ips are invalid: %v", items)
}
case itemSourcePort:
if portsValid(items) {
r.Source.Ports = append(r.Source.Ports, items...)
} else {
return fmt.Errorf("some or all source ports are invalid: %v", items)
}
case itemDestinationAddress:
if validNetworks(items) {
r.Destination.Nets = append(r.Destination.Nets, items...)
} else {
return fmt.Errorf("some or all destination ips are invalid: %v", items)
}
case itemDestinationPort:
if portsValid(items) {
r.Destination.Ports = append(r.Destination.Ports, items...)
} else {
return fmt.Errorf("some or all destination ports are invalid: %v", items)
}
default:
panic("item is not a network component")
}
return nil
}
// Validate that every item is between 1 and 65535.
func portsValid(p []string) bool | {
for _, u := range p {
if strings.Count(u, "[") != strings.Count(u, "]") {
// unbalanced groups.
return false
}
u = strings.TrimPrefix(u, "!")
// If this port range is a grouping, check the inner group.
if strings.HasPrefix(u, "[") {
if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) {
continue
}
return false
}
ports := portSplitRE.Split(u, -1)
for _, port := range ports {
port = strings.TrimPrefix(port, "!") | identifier_body |
|
parser.go | (content string) ([]byte, error) {
// Decode and replace all occurrences of hexadecimal content.
var errpanic error
defer func() {
r := recover()
if r != nil {
errpanic = fmt.Errorf("recovered from panic: %v", r)
}
}()
if containsUnescaped(content) {
return nil, fmt.Errorf("invalid special characters escaping")
}
b := escapeContent.ReplaceAllString(content, "$1")
b = hexRE.ReplaceAllStringFunc(b,
func(h string) string {
r, err := hex.DecodeString(strings.Replace(strings.Trim(h, "|"), " ", "", -1))
if err != nil {
panic("invalid hexRE regexp")
}
return string(r)
})
return []byte(b), errpanic
}
// parsePCRE parses the components of a PCRE. Returns PCRE struct.
func parsePCRE(s string) (*PCRE, error) {
c := strings.Count(s, "/")
if c < 2 {
return nil, fmt.Errorf("all pcre patterns must contain at least 2 '/', found: %d", c)
}
l := strings.LastIndex(s, "/")
if l < 0 {
return nil, fmt.Errorf("couldn't find options in PCRE")
}
i := strings.Index(s, "/")
if l < 0 {
return nil, fmt.Errorf("couldn't find start of pattern")
}
return &PCRE{
Pattern: []byte(s[i+1 : l]),
Options: []byte(s[l+1:]),
}, nil
}
// parseLenMatch parses a LenMatch (like urilen).
func parseLenMatch(k lenMatchType, s string) (*LenMatch, error) {
m := new(LenMatch)
m.Kind = k
switch {
// Simple case, no operators.
case !strings.ContainsAny(s, "><"):
// Ignore options after ','.
numTmp := strings.Split(s, ",")[0]
num, err := strconv.Atoi(strings.TrimSpace(numTmp))
if err != nil {
return nil, fmt.Errorf("%v is not an integer", s)
}
m.Num = num
// Leading operator, single number.
case strings.HasPrefix(s, ">") || strings.HasPrefix(s, "<"):
m.Operator = s[0:1]
// Strip leading < or >.
numTmp := strings.TrimLeft(s, "><")
// Ignore options after ','.
numTmp = strings.Split(numTmp, ",")[0]
num, err := strconv.Atoi(strings.TrimSpace(numTmp))
if err != nil {
return nil, fmt.Errorf("%v is not an integer", s)
}
m.Num = num
// Min/Max center operator.
case strings.Contains(s, "<>"):
m.Operator = "<>"
parts := strings.Split(s, "<>")
if len(parts) != 2 {
return nil, fmt.Errorf("must have exactly 2 parts for min/max operator. got %d", len(parts))
}
var min, max int
var err error
min, err = strconv.Atoi(strings.TrimSpace(parts[0]))
if err != nil {
return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(parts[0]))
}
maxTmp := strings.Split(parts[1], ",")[0]
max, err = strconv.Atoi(strings.TrimSpace(maxTmp))
if err != nil {
return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(maxTmp))
}
// Do stuff to handle options here.
m.Min = min
m.Max = max
}
// Parse options:
if strings.Contains(s, ",") {
opts := strings.Split(s, ",")[1:]
for i, o := range opts {
opts[i] = strings.TrimSpace(o)
}
m.Options = opts
}
return m, nil
}
func parseBase64Decode(k byteMatchType, s string) (*ByteMatch, error) {
if k != b64Decode {
return nil, fmt.Errorf("kind %v is not base64_decode", k)
}
b := new(ByteMatch)
b.Kind = k
// All options to base64_decode are optional, and specified by their keyword.
for _, p := range strings.Split(s, ",") {
v := strings.TrimSpace(p)
switch {
case strings.HasPrefix(v, "bytes"):
b.NumBytes = strings.TrimSpace(strings.SplitAfter(v, "bytes")[1])
case strings.HasPrefix(v, "offset"):
val := strings.TrimSpace(strings.SplitAfter(v, "offset")[1])
i, err := strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("offset is not an int: %s; %s", val, err)
}
if i < 1 {
return nil, fmt.Errorf("offset must be positive, non-zero values only")
}
b.Offset = i
case strings.HasPrefix(v, "relative"):
b.Options = []string{"relative"}
}
}
return b, nil
}
// parseByteMatch parses a ByteMatch.
func parseByteMatch(k byteMatchType, s string) (*ByteMatch, error) {
b := new(ByteMatch)
b.Kind = k
parts := strings.Split(s, ",")
// Num bytes is required for all byteMatchType keywords.
if len(parts) < 1 {
return nil, fmt.Errorf("%s keyword has %d parts", s, len(parts))
}
b.NumBytes = strings.TrimSpace(parts[0])
if len(parts) < b.Kind.minLen() {
return nil, fmt.Errorf("invalid %s length: %d", b.Kind, len(parts))
}
if k == bExtract || k == bJump {
// Parse offset.
offset, err := strconv.Atoi(strings.TrimSpace(parts[1]))
if err != nil {
return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err)
}
b.Offset = offset
}
if k == bExtract {
// Parse variable name.
name := parts[2]
b.Variable = name
}
if k == bTest {
// Parse operator.
b.Operator = strings.TrimSpace(parts[1])
// Parse value. Can use a variable.
b.Value = strings.TrimSpace(parts[2])
// Parse offset.
offset, err := strconv.Atoi(strings.TrimSpace(parts[3]))
if err != nil {
return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err)
}
b.Offset = offset
}
// The rest of the options, for all types not b64decode
for i, l := b.Kind.minLen(), len(parts); i < l; i++ {
parts[i] = strings.TrimSpace(parts[i])
b.Options = append(b.Options, parts[i])
}
return b, nil
}
// parseFlowbit parses a flowbit.
func parseFlowbit(s string) (*Flowbit, error) {
parts := strings.Split(s, ",")
if len(parts) < 1 {
return nil, fmt.Errorf("couldn't parse flowbit string: %s", s)
}
// Ensure all actions are of valid type.
a := strings.TrimSpace(parts[0])
if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) {
return nil, fmt.Errorf("invalid action for flowbit: %s", a)
}
fb := &Flowbit{
Action: a,
}
if fb.Action == "noalert" && len(parts) > 1 {
return nil, fmt.Errorf("noalert shouldn't have a value")
}
if len(parts) == 2 {
fb.Value = strings.TrimSpace(parts[1])
}
return fb, nil
}
// parseXbit parses an xbit.
func parseXbit(s string) (*Xbit, error) {
parts := strings.Split(s, ",")
// All xbits must have an action, name and track
if len(parts) < 3 {
return nil, fmt.Errorf("not enough parts for xbits: %s", s)
}
// Ensure all actions are of valid type.
a := strings.TrimSpace(parts[0])
if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) {
return nil, fmt.Errorf("invalid action for xbits: %s", a)
}
xb := &Xbit{
Action: a,
Name: strings.TrimSpace(parts[1]),
}
// Track.
t := strings.Fields(parts[2])
if len(t) != 2 {
return nil, fmt.Errorf("wrong number of parts for track: %v", t)
}
if t[0] != "track" {
return nil, fmt.Errorf("%s should be 'track'", t[0])
}
xb.Track = t[1]
// Expire
if len(parts) == 4 {
e := strings.Fields(parts[3])
if len(e) != 2 {
return nil, fmt.Errorf("wrong number of parts for expire: %v", e)
}
if e[0] != "expire" {
return nil, fmt.Errorf("%s should be 'expire'", e[0])
}
xb.Expire = e[1]
}
return xb, nil | parseContent | identifier_name |
|
parser.go | group.
if strings.HasPrefix(u, "[") {
if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) {
continue
}
return false
}
ports := portSplitRE.Split(u, -1)
for _, port := range ports {
port = strings.TrimPrefix(port, "!")
if port == "any" || port == "" || strings.HasPrefix(port, "$") {
continue
}
x, err := strconv.Atoi(port)
if err != nil {
return false
}
if x > 65535 || x < 0 {
return false
}
}
}
return true
}
// Validate item is either a valid ip or ip range.
func validNetwork(i string) bool {
_, _, err := net.ParseCIDR(i)
if err == nil {
return true
}
if net.ParseIP(i) != nil {
return true
}
return false
}
// Validate every item is either a valid ip or ip range.
func validNetworks(nets []string) bool {
for _, net := range nets {
if strings.Count(net, "[") != strings.Count(net, "]") {
// unbalanced groups.
return false
}
net = strings.TrimPrefix(net, "!")
// If this network is a grouping, check the inner group.
if strings.HasPrefix(net, "[") || strings.Contains(net, ",") {
if validNetworks(strings.Split(strings.Trim(net, "[]"), ",")) {
continue
}
return false
}
switch {
case net == "any":
continue
case strings.HasPrefix(net, "$"):
continue
case !validNetwork(net):
return false
}
}
return true
}
// direction decodes an IDS rule direction based on its key.
func (r *Rule) direction(key item, l *lexer) error {
if key.typ != itemDirection {
panic("item is not a direction")
}
switch key.value {
case "->":
r.Bidirectional = false
case "<>":
r.Bidirectional = true
default:
return fmt.Errorf("invalid direction operator %q", key.value)
}
return nil
}
var dataPosition = pktData
// option decodes an IDS rule option based on its key.
func (r *Rule) option(key item, l *lexer) error {
if key.typ != itemOptionKey {
panic("item is not an option key")
}
switch {
// TODO: Many of these simple tags could be factored into nicer structures.
case inSlice(key.value, []string{"classtype", "flow", "tag", "priority", "app-layer-protocol", "noalert", "target",
"flags", "ipopts", "ip_proto", "geoip", "fragbits", "fragoffset", "tos",
"window",
"threshold", "detection_filter",
"dce_iface", "dce_opnum", "dce_stub_data",
"asn1"}):
nextItem := l.nextItem()
if nextItem.typ != itemOptionValue {
return fmt.Errorf("no valid value for %s tag", key.value)
}
if r.Tags == nil {
r.Tags = make(map[string]string)
}
r.Tags[key.value] = nextItem.value
case inSlice(key.value, []string{"sameip", "tls.store", "ftpbounce"}):
r.Statements = append(r.Statements, key.value)
case inSlice(key.value, tlsTags):
t := &TLSTag{
Key: key.value,
}
nextItem := l.nextItem()
if nextItem.typ == itemNot {
t.Negate = true
nextItem = l.nextItem()
}
t.Value = nextItem.value
r.TLSTags = append(r.TLSTags, t)
case key.value == "stream_size":
nextItem := l.nextItem()
parts := strings.Split(nextItem.value, ",")
if len(parts) != 3 {
return fmt.Errorf("invalid number of parts for stream_size: %d", len(parts))
}
num, err := strconv.Atoi(strings.TrimSpace(parts[2]))
if err != nil {
return fmt.Errorf("comparison number is not an integer: %v", parts[2])
}
r.StreamMatch = &StreamCmp{
Direction: parts[0],
Operator: parts[1],
Number: num,
}
case key.value == "reference":
nextItem := l.nextItem()
if nextItem.typ != itemOptionValue {
return errors.New("no valid value for reference")
}
refs := strings.SplitN(nextItem.value, ",", 2)
if len(refs) != 2 {
return fmt.Errorf("invalid reference definition: %s", refs)
}
r.References = append(r.References, &Reference{Type: refs[0], Value: refs[1]})
case key.value == "metadata":
nextItem := l.nextItem()
if nextItem.typ != itemOptionValue {
return errors.New("no valid value for metadata")
}
metas := metaSplitRE.Split(nextItem.value, -1)
for _, kv := range metas {
metaTmp := strings.SplitN(kv, " ", 2)
if len(metaTmp) != 2 {
return fmt.Errorf("invalid metadata definition: %s", metaTmp)
}
r.Metas = append(r.Metas, &Metadata{Key: strings.TrimSpace(metaTmp[0]), Value: strings.TrimSpace(metaTmp[1])})
}
case key.value == "sid":
nextItem := l.nextItem()
if nextItem.typ != itemOptionValue {
return errors.New("no value for option sid")
}
sid, err := strconv.Atoi(nextItem.value)
if err != nil {
return fmt.Errorf("invalid sid %s", nextItem.value)
}
r.SID = sid
case key.value == "rev":
nextItem := l.nextItem()
if nextItem.typ != itemOptionValue {
return errors.New("no value for option rev")
}
rev, err := strconv.Atoi(nextItem.value)
if err != nil {
return fmt.Errorf("invalid rev %s", nextItem.value)
}
r.Revision = rev
case key.value == "msg":
nextItem := l.nextItem()
if nextItem.typ != itemOptionValueString {
return errors.New("no value for option msg")
}
r.Description = nextItem.value
case isStickyBuffer(key.value):
var d DataPos
var err error
if d, err = StickyBuffer(key.value); err != nil {
return err
}
dataPosition = d
case inSlice(key.value, []string{"content", "uricontent"}):
nextItem := l.nextItem()
negate := false
if nextItem.typ == itemNot {
nextItem = l.nextItem()
negate = true
}
if nextItem.typ == itemOptionValueString {
c, err := parseContent(nextItem.value)
if err != nil {
return err
}
var options []*ContentOption
if key.value == "uricontent" {
options = append(options, &ContentOption{Name: "http_uri"})
}
con := &Content{
DataPosition: dataPosition,
Pattern: c,
Negate: negate,
Options: options,
}
r.Matchers = append(r.Matchers, con)
} else {
return fmt.Errorf("invalid type %q for option content", nextItem.typ)
}
case inSlice(key.value, []string{"http_cookie", "http_raw_cookie", "http_method", "http_header", "http_raw_header",
"http_uri", "http_raw_uri", "http_user_agent", "http_stat_code", "http_stat_msg",
"http_client_body", "http_server_body", "http_host", "nocase", "rawbytes", "startswith", "endswith"}):
lastContent := r.LastContent()
if lastContent == nil {
return fmt.Errorf("invalid content option %q with no content match", key.value)
}
lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value})
case inSlice(key.value, []string{"depth", "distance", "offset", "within"}):
lastContent := r.LastContent()
if lastContent == nil {
return fmt.Errorf("invalid content option %q with no content match", key.value)
}
nextItem := l.nextItem()
if nextItem.typ != itemOptionValue {
return fmt.Errorf("no value for content option %s", key.value)
}
lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value, Value: nextItem.value})
case key.value == "fast_pattern":
lastContent := r.LastContent()
if lastContent == nil {
return fmt.Errorf("invalid content option %q with no content match", key.value)
}
var (
only bool
offset int
length int
)
nextItem := l.nextItem()
if nextItem.typ == itemOptionValue {
v := nextItem.value
switch {
case v == "only": | random_line_split |
||
parser.go | ", b.Kind, parts[1], err)
}
b.Offset = offset
}
if k == bExtract {
// Parse variable name.
name := parts[2]
b.Variable = name
}
if k == bTest {
// Parse operator.
b.Operator = strings.TrimSpace(parts[1])
// Parse value. Can use a variable.
b.Value = strings.TrimSpace(parts[2])
// Parse offset.
offset, err := strconv.Atoi(strings.TrimSpace(parts[3]))
if err != nil {
return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err)
}
b.Offset = offset
}
// The rest of the options, for all types not b64decode
for i, l := b.Kind.minLen(), len(parts); i < l; i++ {
parts[i] = strings.TrimSpace(parts[i])
b.Options = append(b.Options, parts[i])
}
return b, nil
}
// parseFlowbit parses a flowbit.
func parseFlowbit(s string) (*Flowbit, error) {
parts := strings.Split(s, ",")
if len(parts) < 1 {
return nil, fmt.Errorf("couldn't parse flowbit string: %s", s)
}
// Ensure all actions are of valid type.
a := strings.TrimSpace(parts[0])
if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) {
return nil, fmt.Errorf("invalid action for flowbit: %s", a)
}
fb := &Flowbit{
Action: a,
}
if fb.Action == "noalert" && len(parts) > 1 {
return nil, fmt.Errorf("noalert shouldn't have a value")
}
if len(parts) == 2 {
fb.Value = strings.TrimSpace(parts[1])
}
return fb, nil
}
// parseXbit parses an xbit.
func parseXbit(s string) (*Xbit, error) {
parts := strings.Split(s, ",")
// All xbits must have an action, name and track
if len(parts) < 3 {
return nil, fmt.Errorf("not enough parts for xbits: %s", s)
}
// Ensure all actions are of valid type.
a := strings.TrimSpace(parts[0])
if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) {
return nil, fmt.Errorf("invalid action for xbits: %s", a)
}
xb := &Xbit{
Action: a,
Name: strings.TrimSpace(parts[1]),
}
// Track.
t := strings.Fields(parts[2])
if len(t) != 2 {
return nil, fmt.Errorf("wrong number of parts for track: %v", t)
}
if t[0] != "track" {
return nil, fmt.Errorf("%s should be 'track'", t[0])
}
xb.Track = t[1]
// Expire
if len(parts) == 4 {
e := strings.Fields(parts[3])
if len(e) != 2 {
return nil, fmt.Errorf("wrong number of parts for expire: %v", e)
}
if e[0] != "expire" {
return nil, fmt.Errorf("%s should be 'expire'", e[0])
}
xb.Expire = e[1]
}
return xb, nil
}
// parseFlowint parses a flowint.
func parseFlowint(s string) (*Flowint, error) {
parts := strings.Split(s, ",")
// All flowints must have a name and modifier
if len(parts) < 2 {
return nil, fmt.Errorf("not enough parts for flowint: %s", s)
}
// Ensure all actions are of valid type.
m := strings.TrimSpace(parts[1])
if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) {
return nil, fmt.Errorf("invalid modifier for flowint: %s", m)
}
fi := &Flowint{
Name: strings.TrimSpace(parts[0]),
Modifier: m,
}
if len(parts) == 3 {
fi.Value = strings.TrimSpace(parts[2])
}
return fi, nil
}
// containsUnescaped checks content whether special characters are properly escaped.
func containsUnescaped(s string) bool {
esc := false
for _, b := range s {
if esc {
switch b {
case '\\', ';', '"', ':':
esc = false
default:
return true
}
} else {
switch b {
case '\\':
esc = true
case ';', '"':
return true
}
}
}
return esc
}
func unquote(s string) string {
if strings.IndexByte(s, '"') < 0 {
return s
}
return strings.Replace(s, `\"`, `"`, -1)
}
func inSlice(str string, strings []string) bool {
for _, k := range strings {
if str == k {
return true
}
}
return false
}
// comment decodes a comment (commented rule, or just a comment.)
func (r *Rule) comment(key item, l *lexer) error {
if key.typ != itemComment {
panic("item is not a comment")
}
if r.Disabled {
// ignoring comment for rule with empty action
return nil
}
rule, err := parseRuleAux(key.value, true)
// If there was an error this means the comment is not a rule.
if err != nil {
return fmt.Errorf("this is not a rule: %s", err)
}
// We parsed a rule, this was a comment so set the rule to disabled.
rule.Disabled = true
// Overwrite the rule we're working on with the recently parsed, disabled rule.
*r = *rule
return nil
}
// action decodes an IDS rule option based on its key.
func (r *Rule) action(key item, l *lexer) error {
if key.typ != itemAction {
panic("item is not an action")
}
if !inSlice(key.value, []string{"alert", "drop", "pass"}) {
return fmt.Errorf("invalid action: %v", key.value)
}
r.Action = key.value
return nil
}
// protocol decodes an IDS rule protocol based on its key.
func (r *Rule) protocol(key item, l *lexer) error {
if key.typ != itemProtocol {
panic("item is not a protocol")
}
if !inSlice(key.value, appLayerProtocols) {
return fmt.Errorf("invalid protocol: %v", key.value)
}
r.Protocol = key.value
return nil
}
// network decodes an IDS rule network (networks and ports) based on its key.
func (r *Rule) network(key item, l *lexer) error {
// This is a hack. We use a regexp to replace the outer `,` with `___`
// to give us a discrete string to split on, avoiding the inner `,`.
// Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed.
tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]")
items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___")
// Validate that no items contain spaces.
for _, i := range items {
if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) {
return fmt.Errorf("network component contains spaces: %v", i)
}
}
switch key.typ {
case itemSourceAddress:
if validNetworks(items) {
r.Source.Nets = append(r.Source.Nets, items...)
} else {
return fmt.Errorf("some or all source ips are invalid: %v", items)
}
case itemSourcePort:
if portsValid(items) {
r.Source.Ports = append(r.Source.Ports, items...)
} else {
return fmt.Errorf("some or all source ports are invalid: %v", items)
}
case itemDestinationAddress:
if validNetworks(items) {
r.Destination.Nets = append(r.Destination.Nets, items...)
} else {
return fmt.Errorf("some or all destination ips are invalid: %v", items)
}
case itemDestinationPort:
if portsValid(items) {
r.Destination.Ports = append(r.Destination.Ports, items...)
} else {
return fmt.Errorf("some or all destination ports are invalid: %v", items)
}
default:
panic("item is not a network component")
}
return nil
}
// Validate that every item is between 1 and 65535.
func portsValid(p []string) bool {
for _, u := range p {
if strings.Count(u, "[") != strings.Count(u, "]") {
// unbalanced groups.
return false
}
u = strings.TrimPrefix(u, "!")
// If this port range is a grouping, check the inner group.
if strings.HasPrefix(u, "[") {
if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) | {
continue
} | conditional_block |
|
types.go | }
type AddressMeta struct {
Label string `json:"label"`
Value string `json:"value"`
}
type CryptoDetails struct {
Address string `json:"address"`
Txid string `json:"txid"`
}
type DetailFields struct {
CryptoDetails CryptoDetails `json:"crypto_details"`
TradeDetails TradeDetails `json:"trade_details"`
}
type Kind string
const (
KindExchange Kind = "EXCHANGE"
KindFee Kind = "FEE"
KindInterest Kind = "INTEREST"
KindTransfer Kind = "TRANSFER"
)
type MarketInfo struct {
// Base currency code
BaseCurrency string `json:"base_currency"`
// Counter currency code
CounterCurrency string `json:"counter_currency"`
// Fee decimal places
FeeScale int64 `json:"fee_scale"`
// Unique identifier for the market
MarketId string `json:"market_id"`
// Maximum order price
MaxPrice decimal.Decimal `json:"max_price"`
// Maximum order volume
MaxVolume decimal.Decimal `json:"max_volume"`
// Minimum order price
MinPrice decimal.Decimal `json:"min_price"`
// Minimum order volume
MinVolume decimal.Decimal `json:"min_volume"`
// Price decimal places
PriceScale int64 `json:"price_scale"`
// Current market trading status:<br>
// <code>POST_ONLY</code> Trading is indefinitely suspended. This state is
// commonly used when new markets are being launched to give traders enough
// time to setup their orders before trading begins. When in this status,
// orders can only be posted as post-only.<br>
// <code>ACTIVE</code> Trading is fully enabled.<br>
// <code>SUSPENDED</code> Trading has been temporarily suspended due to very
// high volatility. When in this status, orders can only be posted as
// post-only.<br>
TradingStatus TradingStatus `json:"trading_status"`
// Volume decimal places
VolumeScale int64 `json:"volume_scale"`
}
type Order struct {
Base decimal.Decimal `json:"base"`
CompletedTimestamp Time `json:"completed_timestamp"`
Counter decimal.Decimal `json:"counter"`
CreationTimestamp Time `json:"creation_timestamp"`
ExpirationTimestamp Time `json:"expiration_timestamp"`
FeeBase decimal.Decimal `json:"fee_base"`
FeeCounter decimal.Decimal `json:"fee_counter"`
LimitPrice decimal.Decimal `json:"limit_price"`
LimitVolume decimal.Decimal `json:"limit_volume"`
OrderId string `json:"order_id"`
|
// <code>PENDING</code> The order has been placed. Some trades may have
// taken place but the order is not filled yet.<br>
// <code>COMPLETE</code> The order is no longer active. It has been settled
// or has been cancelled.
State OrderState `json:"state"`
// <code>BID</code> bid (buy) limit order.<br>
// <code>ASK</code> ask (sell) limit order.
Type OrderType `json:"type"`
}
type OrderBookEntry struct {
// Limit price at which orders are trading at
Price decimal.Decimal `json:"price"`
// The volume available at the limit price
Volume decimal.Decimal `json:"volume"`
}
type OrderState string
const (
OrderStateComplete OrderState = "COMPLETE"
OrderStatePending OrderState = "PENDING"
)
type OrderType string
const (
OrderTypeAsk OrderType = "ASK"
OrderTypeBid OrderType = "BID"
OrderTypeBuy OrderType = "BUY"
OrderTypeSell OrderType = "SELL"
)
type OrderV2 struct {
// Amount of base filled
Base decimal.Decimal `json:"base"`
// Time of order completion in milliseconds
CompletedTimestamp Time `json:"completed_timestamp"`
// Amount of counter filled
Counter decimal.Decimal `json:"counter"`
// Time of order creation in milliseconds
CreationTimestamp Time `json:"creation_timestamp"`
// Time of order expiration in milliseconds
ExpirationTimestamp Time `json:"expiration_timestamp"`
// Base amount of fees to be charged
FeeBase decimal.Decimal `json:"fee_base"`
// Counter amount of fees to be charged
FeeCounter decimal.Decimal `json:"fee_counter"`
// Limit price to transact
LimitPrice decimal.Decimal `json:"limit_price"`
// Limit volume to transact
LimitVolume decimal.Decimal `json:"limit_volume"`
// The order reference
OrderId string `json:"order_id"`
// Specifies the market
Pair string `json:"pair"`
// The order intention
Side Side `json:"side"`
// The current state of the order
//
// Status meaning:<br>
// <code>AWAITING</code> The order is awaiting to enter the order book.<br>
// <code>PENDING</code> The order is in the order book. Some trades may
// have taken place but the order is not filled yet.<br>
// <code>COMPLETE</code> The order is no longer in the order book. It has
// been settled/filled or has been cancelled.
Status Status `json:"status"`
// Direction to trigger the order
StopDirection StopDirection `json:"stop_direction"`
// Price to trigger the order
StopPrice decimal.Decimal `json:"stop_price"`
// The order type
Type Type `json:"type"`
}
type Side string
const (
SideBuy Side = "BUY"
SideSell Side = "SELL"
)
type Status string
const (
StatusActive Status = "ACTIVE"
StatusAwaiting Status = "AWAITING"
StatusComplete Status = "COMPLETE"
StatusDisabled Status = "DISABLED"
StatusPending Status = "PENDING"
StatusPostonly Status = "POSTONLY"
)
type StopDirection string
const (
StopDirectionAbove StopDirection = "ABOVE"
StopDirectionBelow StopDirection = "BELOW"
StopDirectionRelative_last_trade StopDirection = "RELATIVE_LAST_TRADE"
)
type Ticker struct {
// The lowest ask price
Ask decimal.Decimal `json:"ask"`
// The highest bid price
Bid decimal.Decimal `json:"bid"`
// Last trade price
LastTrade decimal.Decimal `json:"last_trade"`
Pair string `json:"pair"`
// 24h rolling trade volume
Rolling24HourVolume decimal.Decimal `json:"rolling_24_hour_volume"`
// Market current status
//
// <code>ACTIVE</code> when the market is trading normally
//
// <code>POSTONLY</code> when the market has been suspended and only post-only orders will be accepted
//
// <code>DISABLED</code> when the market is shutdown and no orders can be accepted
Status Status `json:"status"`
// Unix timestamp in milliseconds of the tick
Timestamp Time `json:"timestamp"`
}
type Trade struct {
Base decimal.Decimal `json:"base"`
Counter decimal.Decimal `json:"counter"`
FeeBase decimal.Decimal `json:"fee_base"`
FeeCounter decimal.Decimal `json:"fee_counter"`
IsBuy bool `json:"is_buy"`
OrderId string `json:"order_id"`
Pair string `json:"pair"`
Price decimal.Decimal `json:"price"`
Sequence int64 `json:"sequence"`
Timestamp Time `json:"timestamp"`
Type OrderType `json:"type"`
Volume decimal.Decimal `json:"volume"`
}
type TradeDetails struct {
// Pair of the market
Pair string `json:"pair"`
// Price at which the volume traded for
Price decimal.Decimal `json:"price"`
// Sequence identifies the trade within a market
Sequence int64 `json:"sequence"`
// Volume is the amount of base traded
Volume decimal.Decimal `json:"volume"`
}
type TradingStatus string
const (
TradingStatusPost_only TradingStatus = "POST_ONLY"
TradingStatusActive TradingStatus = "ACTIVE"
TradingStatusSuspended TradingStatus = "SUSPENDED"
)
type Transaction struct {
AccountId string `json:"account_id"`
Available decimal.Decimal `json:"available"`
AvailableDelta decimal.Decimal `json:"available_delta"`
Balance decimal.Decimal `json:"balance"`
// Transaction amounts computed for convenience.
BalanceDelta decimal.Decimal `json:"balance_delta"`
Currency string `json:"currency"`
// Human-readable description of the transaction.
Description string `json:"description"`
DetailFields DetailFields `json:"detail_fields"`
// Human-readable label-value attributes.
Details map[string]string `json:"details"`
// The kind of the transaction indicates the transaction flow
//
// Kinds explained:<br>
// <code>FEE</code> when transaction is towards Luno fees<br>
// <code>TRANSFER</code> when the transaction is a one way flow of funds, e.g. a deposit or crypto send<br>
// <code>EXCHANGE</code> when the transaction is part of a two way exchange, e.g. a trade or instant buy
Kind Kind `json:"kind"`
RowIndex int64 `json:"row_index"`
Timestamp Time `json:"timestamp"`
}
type Transfer struct {
// Amount that has been credited | // Specifies the market.
Pair string `json:"pair"` | random_line_split |
clipmap.rs | ();
if lod >= config.num_lods {
return VisitStatus::Continue;
}
let offset_from_center = get_offset_from_lod_center(octant, ¢ers);
if lod == 0 || offset_from_center > high_lod_boundary {
// This octant can be rendered at this level of detail.
active_rx(octant_chunk_key(chunk_log2, octant));
VisitStatus::Stop
} else {
// This octant should be rendered with more detail.
VisitStatus::Continue
}
});
}
/// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a
/// camera movement.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum LodChunkUpdate<N> {
Split(SplitChunk<N>),
Merge(MergeChunks<N>),
}
/// A 3-dimensional `LodChunkUpdate`.
pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>;
/// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has
/// moved.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SplitChunk<N> {
pub old_chunk: ChunkKey<N>,
pub new_chunks: Vec<ChunkKey<N>>,
}
/// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has
/// moved.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MergeChunks<N> {
pub old_chunks: Vec<ChunkKey<N>>,
pub new_chunk: ChunkKey<N>,
}
/// A transient object used for running the `find_chunk_updates` method on multiple octrees.
pub struct ClipMapUpdate3 {
chunk_log2: i32,
num_lods: u8,
low_lod_boundary: i32,
high_lod_boundary: i32,
old_centers: Vec<Point3i>,
new_centers: Vec<Point3i>,
}
impl ClipMapUpdate3 {
/// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to
/// `new_lod0_center`.
pub fn new(
config: &ClipMapConfig3,
old_lod0_center: ChunkUnits<Point3i>,
new_lod0_center: ChunkUnits<Point3i>,
) -> Self {
Self {
chunk_log2: config.chunk_shape.x().trailing_zeros() as i32,
num_lods: config.num_lods,
low_lod_boundary: config.clip_box_radius,
high_lod_boundary: config.clip_box_radius >> 1,
old_centers: all_lod_centers(old_lod0_center.0, config.num_lods),
new_centers: all_lod_centers(new_lod0_center.0, config.num_lods),
}
}
/// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the
/// clipmap.
pub fn find_chunk_updates(
&self,
octree: &OctreeSet,
mut update_rx: impl FnMut(LodChunkUpdate3),
) {
octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| {
let octant = node.octant();
let lod = octant.exponent();
if lod >= self.num_lods || lod == 0 {
return VisitStatus::Continue;
}
let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers);
let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers);
if old_offset_from_center > self.high_lod_boundary
&& offset_from_center <= self.high_lod_boundary
{
// Increase the detail for this octant.
// Create the higher detail in descendant octants.
let old_chunk = octant_chunk_key(self.chunk_log2, octant);
let new_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.new_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Split(SplitChunk {
old_chunk,
new_chunks,
}));
VisitStatus::Stop
} else if offset_from_center > self.high_lod_boundary
&& old_offset_from_center <= self.high_lod_boundary
{
// Decrease the detail for this octant.
// Delete the higher detail in descendant octants.
let new_chunk = octant_chunk_key(self.chunk_log2, octant);
let old_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.old_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Merge(MergeChunks {
old_chunks,
new_chunk,
}));
VisitStatus::Stop
} else if offset_from_center > self.low_lod_boundary
&& old_offset_from_center > self.low_lod_boundary
{
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
}
}
fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> {
let mut centers = vec![lod0_center; num_lods as usize];
for i in 1..num_lods as usize {
centers[i] = centers[i - 1] >> 1;
}
centers
}
fn find_merge_or_split_descendants(
chunk_log2: i32,
octree: &OctreeSet,
node: &OctreeNode,
centers: &[Point3i],
high_lod_boundary: i32,
) -> Vec<ChunkKey3> {
let mut matching_chunks = Vec::with_capacity(8);
node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| {
let lod = node.octant().exponent();
let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers);
if lod == 0 || old_offset_from_center > high_lod_boundary {
matching_chunks.push(octant_chunk_key(chunk_log2, node.octant()));
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
matching_chunks
}
fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 {
let lod = octant.exponent();
let lod_p = octant.minimum() >> lod;
let lod_center = centers[lod as usize];
(lod_p - lod_center)
// For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates
// symmetric about the center.
//
// Voxel Coordinates
//
// -3 -2 -1 0 1 2 3
// <--|---|---|---|---|---|---|-->
//
// Clipmap Coordinates
//
// -3 -2 -1 1 2 3
// <--|---|---|---|---|---|---|-->
.map_components_unary(|c| if c >= 0 { c + 1 } else | )
.abs()
.max_component()
}
fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 {
let lod = octant.exponent();
ChunkKey {
lod,
minimum: (octant.minimum() << chunk_log2) >> lod,
}
}
// ████████╗███████╗███████╗████████╗
// ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝
// ██║ █████╗ ███████╗ ██║
// ██║ ██╔══╝ ╚════██║ ██║
// ██║ ███████╗███████║ ██║
// ╚═╝ ╚══════╝╚══════╝ ╚═╝
#[cfg(test)]
mod test {
use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet};
use super::*;
use itertools::Itertools;
use std::iter::FromIterator;
#[test]
fn active_chunks_in_lod0_and_lod1() {
let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE);
let lod0_center = ChunkUnits(Point3i::ZERO);
let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32));
let mut octree = OctreeSet::new_empty(domain);
let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8));
octree.add_extent(&filled_extent);
let active_chunks = ActiveChunks::new(&config, &octree, lod0_center);
let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4))
.iter_points()
.map(|p| ChunkKey | { c } | conditional_block |
clipmap.rs | ();
if lod >= config.num_lods {
return VisitStatus::Continue;
}
let offset_from_center = get_offset_from_lod_center(octant, ¢ers);
if lod == 0 || offset_from_center > high_lod_boundary {
// This octant can be rendered at this level of detail.
active_rx(octant_chunk_key(chunk_log2, octant));
VisitStatus::Stop
} else {
// This octant should be rendered with more detail.
VisitStatus::Continue
}
});
}
/// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a
/// camera movement.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum LodChunkUpdate<N> {
Split(SplitChunk<N>),
Merge(MergeChunks<N>),
}
/// A 3-dimensional `LodChunkUpdate`.
pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>;
/// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has
/// moved.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SplitChunk<N> {
pub old_chunk: ChunkKey<N>,
pub new_chunks: Vec<ChunkKey<N>>,
}
/// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has
/// moved.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MergeChunks<N> {
pub old_chunks: Vec<ChunkKey<N>>,
pub new_chunk: ChunkKey<N>,
}
/// A transient object used for running the `find_chunk_updates` method on multiple octrees.
pub struct ClipMapUpdate3 {
chunk_log2: i32,
num_lods: u8,
low_lod_boundary: i32,
high_lod_boundary: i32,
old_centers: Vec<Point3i>,
new_centers: Vec<Point3i>,
}
impl ClipMapUpdate3 {
/// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to
/// `new_lod0_center`.
pub fn new(
config: &ClipMapConfig3,
old_lod0_center: ChunkUnits<Point3i>,
new_lod0_center: ChunkUnits<Point3i>,
) -> Self {
Self {
chunk_log2: config.chunk_shape.x().trailing_zeros() as i32,
num_lods: config.num_lods,
low_lod_boundary: config.clip_box_radius,
high_lod_boundary: config.clip_box_radius >> 1,
old_centers: all_lod_centers(old_lod0_center.0, config.num_lods),
new_centers: all_lod_centers(new_lod0_center.0, config.num_lods),
}
}
/// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the
/// clipmap.
pub fn | (
&self,
octree: &OctreeSet,
mut update_rx: impl FnMut(LodChunkUpdate3),
) {
octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| {
let octant = node.octant();
let lod = octant.exponent();
if lod >= self.num_lods || lod == 0 {
return VisitStatus::Continue;
}
let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers);
let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers);
if old_offset_from_center > self.high_lod_boundary
&& offset_from_center <= self.high_lod_boundary
{
// Increase the detail for this octant.
// Create the higher detail in descendant octants.
let old_chunk = octant_chunk_key(self.chunk_log2, octant);
let new_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.new_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Split(SplitChunk {
old_chunk,
new_chunks,
}));
VisitStatus::Stop
} else if offset_from_center > self.high_lod_boundary
&& old_offset_from_center <= self.high_lod_boundary
{
// Decrease the detail for this octant.
// Delete the higher detail in descendant octants.
let new_chunk = octant_chunk_key(self.chunk_log2, octant);
let old_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.old_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Merge(MergeChunks {
old_chunks,
new_chunk,
}));
VisitStatus::Stop
} else if offset_from_center > self.low_lod_boundary
&& old_offset_from_center > self.low_lod_boundary
{
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
}
}
fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> {
let mut centers = vec![lod0_center; num_lods as usize];
for i in 1..num_lods as usize {
centers[i] = centers[i - 1] >> 1;
}
centers
}
fn find_merge_or_split_descendants(
chunk_log2: i32,
octree: &OctreeSet,
node: &OctreeNode,
centers: &[Point3i],
high_lod_boundary: i32,
) -> Vec<ChunkKey3> {
let mut matching_chunks = Vec::with_capacity(8);
node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| {
let lod = node.octant().exponent();
let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers);
if lod == 0 || old_offset_from_center > high_lod_boundary {
matching_chunks.push(octant_chunk_key(chunk_log2, node.octant()));
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
matching_chunks
}
fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 {
let lod = octant.exponent();
let lod_p = octant.minimum() >> lod;
let lod_center = centers[lod as usize];
(lod_p - lod_center)
// For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates
// symmetric about the center.
//
// Voxel Coordinates
//
// -3 -2 -1 0 1 2 3
// <--|---|---|---|---|---|---|-->
//
// Clipmap Coordinates
//
// -3 -2 -1 1 2 3
// <--|---|---|---|---|---|---|-->
.map_components_unary(|c| if c >= 0 { c + 1 } else { c })
.abs()
.max_component()
}
fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 {
let lod = octant.exponent();
ChunkKey {
lod,
minimum: (octant.minimum() << chunk_log2) >> lod,
}
}
// ████████╗███████╗███████╗████████╗
// ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝
// ██║ █████╗ ███████╗ ██║
// ██║ ██╔══╝ ╚════██║ ██║
// ██║ ███████╗███████║ ██║
// ╚═╝ ╚══════╝╚══════╝ ╚═╝
#[cfg(test)]
mod test {
use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet};
use super::*;
use itertools::Itertools;
use std::iter::FromIterator;
#[test]
fn active_chunks_in_lod0_and_lod1() {
let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE);
let lod0_center = ChunkUnits(Point3i::ZERO);
let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32));
let mut octree = OctreeSet::new_empty(domain);
let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8));
octree.add_extent(&filled_extent);
let active_chunks = ActiveChunks::new(&config, &octree, lod0_center);
let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4))
.iter_points()
.map(|p| ChunkKey {
| find_chunk_updates | identifier_name |
clipmap.rs | , &self.old_centers);
let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers);
if old_offset_from_center > self.high_lod_boundary
&& offset_from_center <= self.high_lod_boundary
{
// Increase the detail for this octant.
// Create the higher detail in descendant octants.
let old_chunk = octant_chunk_key(self.chunk_log2, octant);
let new_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.new_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Split(SplitChunk {
old_chunk,
new_chunks,
}));
VisitStatus::Stop
} else if offset_from_center > self.high_lod_boundary
&& old_offset_from_center <= self.high_lod_boundary
{
// Decrease the detail for this octant.
// Delete the higher detail in descendant octants.
let new_chunk = octant_chunk_key(self.chunk_log2, octant);
let old_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.old_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Merge(MergeChunks {
old_chunks,
new_chunk,
}));
VisitStatus::Stop
} else if offset_from_center > self.low_lod_boundary
&& old_offset_from_center > self.low_lod_boundary
{
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
}
}
fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> {
let mut centers = vec![lod0_center; num_lods as usize];
for i in 1..num_lods as usize {
centers[i] = centers[i - 1] >> 1;
}
centers
}
fn find_merge_or_split_descendants(
chunk_log2: i32,
octree: &OctreeSet,
node: &OctreeNode,
centers: &[Point3i],
high_lod_boundary: i32,
) -> Vec<ChunkKey3> {
let mut matching_chunks = Vec::with_capacity(8);
node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| {
let lod = node.octant().exponent();
let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers);
if lod == 0 || old_offset_from_center > high_lod_boundary {
matching_chunks.push(octant_chunk_key(chunk_log2, node.octant()));
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
matching_chunks
}
fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 {
let lod = octant.exponent();
let lod_p = octant.minimum() >> lod;
let lod_center = centers[lod as usize];
(lod_p - lod_center)
// For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates
// symmetric about the center.
//
// Voxel Coordinates
//
// -3 -2 -1 0 1 2 3
// <--|---|---|---|---|---|---|-->
//
// Clipmap Coordinates
//
// -3 -2 -1 1 2 3
// <--|---|---|---|---|---|---|-->
.map_components_unary(|c| if c >= 0 { c + 1 } else { c })
.abs()
.max_component()
}
fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 {
let lod = octant.exponent();
ChunkKey {
lod,
minimum: (octant.minimum() << chunk_log2) >> lod,
}
}
// ████████╗███████╗███████╗████████╗
// ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝
// ██║ █████╗ ███████╗ ██║
// ██║ ██╔══╝ ╚════██║ ██║
// ██║ ███████╗███████║ ██║
// ╚═╝ ╚══════╝╚══════╝ ╚═╝
#[cfg(test)]
mod test {
use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet};
use super::*;
use itertools::Itertools;
use std::iter::FromIterator;
#[test]
fn active_chunks_in_lod0_and_lod1() {
let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE);
let lod0_center = ChunkUnits(Point3i::ZERO);
let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32));
let mut octree = OctreeSet::new_empty(domain);
let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8));
octree.add_extent(&filled_extent);
let active_chunks = ActiveChunks::new(&config, &octree, lod0_center);
let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4))
.iter_points()
.map(|p| ChunkKey {
minimum: p * CHUNK_SHAPE,
lod: 0,
});
let mut lod1_set = OctreeSet::new_empty(domain);
lod1_set.add_extent(&Extent3i::from_min_and_shape(
Point3i::fill(-2),
Point3i::fill(4),
));
lod1_set.subtract_extent(&Extent3i::from_min_and_shape(
Point3i::fill(-1),
Point3i::fill(2),
));
let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey {
minimum: p * CHUNK_SHAPE,
lod: 1,
});
let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set));
assert_eq!(active_chunks.keys, expected_keys);
}
#[test]
fn no_updates_when_center_does_not_move() {
let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE);
let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32));
let octree = OctreeSet::new_full(domain);
let centers = [
[0, 0, 0],
[2, 0, 0],
[-2, 0, 0],
[0, 2, 0],
[0, -2, 0],
[0, 0, 2],
[0, 0, -2],
];
for p in centers.iter().cloned() {
let center = ChunkUnits(PointN(p));
ClipMapUpdate3::new(&config, center, center)
.find_chunk_updates(&octree, |_update| panic!("Fail"));
}
}
#[test]
fn updates_are_consistent_with_active_chunks() {
let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE);
let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32));
let octree = OctreeSet::new_full(domain);
validate_update_path(
&config,
&octree,
&[
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[-1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0],
[0, -1, 0],
[0, 0, 0],
[0, 0, 1],
[0, 0, 0],
[0, 0, -1],
],
);
}
fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) {
let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0])));
for (p1, p2) in path.iter().cloned().tuple_windows() {
let old_lod0_center = ChunkUnits(PointN(p1));
let new_lod0_center = ChunkUnits(PointN(p2)); |
ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center)
.find_chunk_updates(octree, |update| active_chunks.apply_update(update));
| random_line_split |
|
clipmap.rs | ();
if lod >= config.num_lods {
return VisitStatus::Continue;
}
let offset_from_center = get_offset_from_lod_center(octant, ¢ers);
if lod == 0 || offset_from_center > high_lod_boundary {
// This octant can be rendered at this level of detail.
active_rx(octant_chunk_key(chunk_log2, octant));
VisitStatus::Stop
} else {
// This octant should be rendered with more detail.
VisitStatus::Continue
}
});
}
/// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a
/// camera movement.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum LodChunkUpdate<N> {
Split(SplitChunk<N>),
Merge(MergeChunks<N>),
}
/// A 3-dimensional `LodChunkUpdate`.
pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>;
/// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has
/// moved.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SplitChunk<N> {
pub old_chunk: ChunkKey<N>,
pub new_chunks: Vec<ChunkKey<N>>,
}
/// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has
/// moved.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MergeChunks<N> {
pub old_chunks: Vec<ChunkKey<N>>,
pub new_chunk: ChunkKey<N>,
}
/// A transient object used for running the `find_chunk_updates` method on multiple octrees.
pub struct ClipMapUpdate3 {
chunk_log2: i32,
num_lods: u8,
low_lod_boundary: i32,
high_lod_boundary: i32,
old_centers: Vec<Point3i>,
new_centers: Vec<Point3i>,
}
impl ClipMapUpdate3 {
/// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to
/// `new_lod0_center`.
pub fn new(
config: &ClipMapConfig3,
old_lod0_center: ChunkUnits<Point3i>,
new_lod0_center: ChunkUnits<Point3i>,
) -> Self {
Self {
chunk_log2: config.chunk_shape.x().trailing_zeros() as i32,
num_lods: config.num_lods,
low_lod_boundary: config.clip_box_radius,
high_lod_boundary: config.clip_box_radius >> 1,
old_centers: all_lod_centers(old_lod0_center.0, config.num_lods),
new_centers: all_lod_centers(new_lod0_center.0, config.num_lods),
}
}
/// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the
/// clipmap.
pub fn find_chunk_updates(
&self,
octree: &OctreeSet,
mut update_rx: impl FnMut(LodChunkUpdate3),
) {
octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| {
let octant = node.octant();
let lod = octant.exponent();
if lod >= self.num_lods || lod == 0 {
return VisitStatus::Continue;
}
let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers);
let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers);
if old_offset_from_center > self.high_lod_boundary
&& offset_from_center <= self.high_lod_boundary
{
// Increase the detail for this octant.
// Create the higher detail in descendant octants.
let old_chunk = octant_chunk_key(self.chunk_log2, octant);
let new_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.new_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Split(SplitChunk {
old_chunk,
new_chunks,
}));
VisitStatus::Stop
} else if offset_from_center > self.high_lod_boundary
&& old_offset_from_center <= self.high_lod_boundary
{
// Decrease the detail for this octant.
// Delete the higher detail in descendant octants.
let new_chunk = octant_chunk_key(self.chunk_log2, octant);
let old_chunks = find_merge_or_split_descendants(
self.chunk_log2,
octree,
node,
&self.old_centers,
self.high_lod_boundary,
);
update_rx(LodChunkUpdate::Merge(MergeChunks {
old_chunks,
new_chunk,
}));
VisitStatus::Stop
} else if offset_from_center > self.low_lod_boundary
&& old_offset_from_center > self.low_lod_boundary
{
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
}
}
fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> {
let mut centers = vec![lod0_center; num_lods as usize];
for i in 1..num_lods as usize {
centers[i] = centers[i - 1] >> 1;
}
centers
}
fn find_merge_or_split_descendants(
chunk_log2: i32,
octree: &OctreeSet,
node: &OctreeNode,
centers: &[Point3i],
high_lod_boundary: i32,
) -> Vec<ChunkKey3> {
let mut matching_chunks = Vec::with_capacity(8);
node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| {
let lod = node.octant().exponent();
let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers);
if lod == 0 || old_offset_from_center > high_lod_boundary {
matching_chunks.push(octant_chunk_key(chunk_log2, node.octant()));
VisitStatus::Stop
} else {
VisitStatus::Continue
}
});
matching_chunks
}
fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 | .max_component()
}
fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 {
let lod = octant.exponent();
ChunkKey {
lod,
minimum: (octant.minimum() << chunk_log2) >> lod,
}
}
// ████████╗███████╗███████╗████████╗
// ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝
// ██║ █████╗ ███████╗ ██║
// ██║ ██╔══╝ ╚════██║ ██║
// ██║ ███████╗███████║ ██║
// ╚═╝ ╚══════╝╚══════╝ ╚═╝
#[cfg(test)]
mod test {
use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet};
use super::*;
use itertools::Itertools;
use std::iter::FromIterator;
#[test]
fn active_chunks_in_lod0_and_lod1() {
let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE);
let lod0_center = ChunkUnits(Point3i::ZERO);
let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32));
let mut octree = OctreeSet::new_empty(domain);
let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8));
octree.add_extent(&filled_extent);
let active_chunks = ActiveChunks::new(&config, &octree, lod0_center);
let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4))
.iter_points()
.map(|p| ChunkKey {
| {
let lod = octant.exponent();
let lod_p = octant.minimum() >> lod;
let lod_center = centers[lod as usize];
(lod_p - lod_center)
// For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates
// symmetric about the center.
//
// Voxel Coordinates
//
// -3 -2 -1 0 1 2 3
// <--|---|---|---|---|---|---|-->
//
// Clipmap Coordinates
//
// -3 -2 -1 1 2 3
// <--|---|---|---|---|---|---|-->
.map_components_unary(|c| if c >= 0 { c + 1 } else { c })
.abs() | identifier_body |
message.go | session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err)
return nil
}
if id == "" {
return nil
}
if time.Since(mc.recipientID[id]) > models.UserActivePeriod {
if err := models.PingUserActiveAt(ctx, id); err != nil {
session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err)
}
mc.recipientID[id] = time.Now()
}
return nil
}
type TransferMemoInst struct {
Action string `json:"a"`
Param1 string `json:"p1"`
Param2 string `json:"p2"`
}
func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error {
go distribute(ctx)
go loopPendingMessage(ctx)
go handlePendingParticipants(ctx)
go handleExpiredPackets(ctx)
go schedulePluginCronJob(ctx)
user, err := mixin.NewUser(
config.AppConfig.Mixin.ClientId,
config.AppConfig.Mixin.SessionId,
config.AppConfig.Mixin.SessionKey,
)
if err != nil {
panic(err)
}
mc := &MessageContext{
user: user,
bc: broadcastChan,
recipientID: map[string]time.Time{},
}
for {
b := mixin.NewBlazeClient(user)
if err := b.Loop(ctx, mc); err != nil {
session.Logger(ctx).Error(err)
}
session.Logger(ctx).Info("connection loop end")
time.Sleep(300 * time.Millisecond)
}
}
func | (ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error {
id, err := bot.UuidFromString(transfer.TraceId)
if err != nil {
return nil
}
user, err := models.FindUser(ctx, userId)
if user == nil || err != nil {
log.Println("No such a user", userId)
return err
}
if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" {
if inst.Action == "rewards" {
return handleRewardsPayment(ctx, mc, transfer, user, inst)
} else {
log.Println("Unknown instruction", inst)
}
} else {
log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err)
if user.TraceId == transfer.TraceId {
log.Println("New legacy payment", userId, transfer.TraceId)
if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId {
return user.Payment(ctx)
}
for _, asset := range config.AppConfig.System.AccpetPaymentAssetList {
if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId {
return user.Payment(ctx)
}
}
} else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil {
log.Println("New order received", userId, transfer.TraceId)
return handleOrderPayment(ctx, mc, transfer, order)
} else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil {
log.Println("New packet paid", userId, transfer.TraceId, id)
return err
} else if packet.State == models.PacketStatePaid {
log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId)
return sendAppCard(ctx, mc, packet)
}
}
return nil
}
func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) {
var data *TransferMemoInst
err := json.Unmarshal([]byte(transfer.Memo), &data)
return data, err
}
func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error {
userId := inst.Param1
targetUser, err := models.FindUser(ctx, userId)
if err != nil {
log.Println("can't find user to reward", userId, err)
return nil
}
memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10)
log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId)
var traceID string
traceID = transfer.SnapshotId
if err != nil {
return errors.New("generate trace id failed")
}
in := &bot.TransferInput{
AssetId: transfer.AssetId,
RecipientId: targetUser.UserId,
Amount: number.FromString(transfer.Amount),
TraceId: traceID,
Memo: memo,
}
if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil {
log.Println("can't transfer to recipient", err)
return err
}
if user.UserId != targetUser.UserId {
if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil {
log.Println("can't record tip", err)
// return err
}
if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil {
log.Println("can't create rewards message", err)
// return err
}
}
return nil
}
func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error {
if order.PayMethod == models.PayMethodMixin &&
number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) &&
order.AssetId == transfer.AssetId {
_, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId)
if err != nil {
log.Println(err)
return err
}
}
return nil
}
func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error {
description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName)
if strings.TrimSpace(packet.User.FullName) == "" {
description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc
}
if count := utf8.RuneCountInString(description); count > 100 {
name := string([]rune(packet.User.FullName)[:16])
description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name)
}
host := config.AppConfig.Service.HTTPResourceHost
if config.AppConfig.System.RouterMode == config.RouterModeHash {
host = host + config.RouterModeHashSymbol
}
card, err := json.Marshal(map[string]string{
"icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256",
"title": config.AppConfig.MessageTemplate.GroupRedPacket,
"description": description,
"action": host + "/packets/" + packet.PacketId,
})
if err != nil {
return session.BlazeServerError(ctx, err)
}
t := time.Now()
u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()}
_, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t)
if err != nil {
return session.BlazeServerError(ctx, err)
}
return nil
}
func handleExpiredPackets(ctx context.Context) {
var limit = 100
for {
packetIds, err := models.ListExpiredPackets(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, id := range packetIds {
packet, err := models.SendPacketRefundTransfer(ctx, id)
if err != nil {
session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err)
break
}
if packet != nil {
session.Logger(ctx).Infof("REFUND %v\n", id)
}
}
if len(packetIds) < limit {
time.Sleep(300 * time.Millisecond)
continue
}
}
}
func schedulePluginCronJob(ctx context.Context) {
plugin.RunCron()
}
func handlePendingParticipants(ctx context.Context) {
var limit = 100
for {
participants, err := models.ListPendingParticipants(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, p := range participants {
err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount)
if err != nil {
session.Logger(ctx).Error(err)
break
| handleTransfer | identifier_name |
message.go | session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err)
return nil
}
if id == "" {
return nil
}
if time.Since(mc.recipientID[id]) > models.UserActivePeriod {
if err := models.PingUserActiveAt(ctx, id); err != nil {
session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err)
}
mc.recipientID[id] = time.Now()
}
return nil
}
type TransferMemoInst struct {
Action string `json:"a"`
Param1 string `json:"p1"`
Param2 string `json:"p2"`
}
func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error {
go distribute(ctx)
go loopPendingMessage(ctx)
go handlePendingParticipants(ctx)
go handleExpiredPackets(ctx)
go schedulePluginCronJob(ctx)
user, err := mixin.NewUser(
config.AppConfig.Mixin.ClientId,
config.AppConfig.Mixin.SessionId,
config.AppConfig.Mixin.SessionKey,
)
if err != nil {
panic(err)
}
mc := &MessageContext{
user: user,
bc: broadcastChan,
recipientID: map[string]time.Time{},
}
for {
b := mixin.NewBlazeClient(user)
if err := b.Loop(ctx, mc); err != nil {
session.Logger(ctx).Error(err)
}
session.Logger(ctx).Info("connection loop end")
time.Sleep(300 * time.Millisecond)
}
}
func handleTransfer(ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error {
id, err := bot.UuidFromString(transfer.TraceId)
if err != nil {
return nil
}
user, err := models.FindUser(ctx, userId)
if user == nil || err != nil {
log.Println("No such a user", userId)
return err
}
if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" {
if inst.Action == "rewards" {
return handleRewardsPayment(ctx, mc, transfer, user, inst)
} else {
log.Println("Unknown instruction", inst)
}
} else {
log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err)
if user.TraceId == transfer.TraceId {
log.Println("New legacy payment", userId, transfer.TraceId)
if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId {
return user.Payment(ctx)
}
for _, asset := range config.AppConfig.System.AccpetPaymentAssetList {
if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId {
return user.Payment(ctx)
}
}
} else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil {
log.Println("New order received", userId, transfer.TraceId)
return handleOrderPayment(ctx, mc, transfer, order)
} else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil {
log.Println("New packet paid", userId, transfer.TraceId, id)
return err
} else if packet.State == models.PacketStatePaid {
log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId)
return sendAppCard(ctx, mc, packet)
}
}
return nil
}
func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) {
var data *TransferMemoInst
err := json.Unmarshal([]byte(transfer.Memo), &data)
return data, err
}
func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error {
userId := inst.Param1
targetUser, err := models.FindUser(ctx, userId)
if err != nil {
log.Println("can't find user to reward", userId, err)
return nil
}
memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10)
log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId)
var traceID string
traceID = transfer.SnapshotId
if err != nil {
return errors.New("generate trace id failed")
}
in := &bot.TransferInput{
AssetId: transfer.AssetId,
RecipientId: targetUser.UserId,
Amount: number.FromString(transfer.Amount),
TraceId: traceID,
Memo: memo,
}
if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil {
log.Println("can't transfer to recipient", err)
return err
}
if user.UserId != targetUser.UserId {
if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil {
log.Println("can't record tip", err)
// return err | }
if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil {
log.Println("can't create rewards message", err)
// return err
}
}
return nil
}
func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error {
if order.PayMethod == models.PayMethodMixin &&
number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) &&
order.AssetId == transfer.AssetId {
_, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId)
if err != nil {
log.Println(err)
return err
}
}
return nil
}
func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error {
description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName)
if strings.TrimSpace(packet.User.FullName) == "" {
description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc
}
if count := utf8.RuneCountInString(description); count > 100 {
name := string([]rune(packet.User.FullName)[:16])
description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name)
}
host := config.AppConfig.Service.HTTPResourceHost
if config.AppConfig.System.RouterMode == config.RouterModeHash {
host = host + config.RouterModeHashSymbol
}
card, err := json.Marshal(map[string]string{
"icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256",
"title": config.AppConfig.MessageTemplate.GroupRedPacket,
"description": description,
"action": host + "/packets/" + packet.PacketId,
})
if err != nil {
return session.BlazeServerError(ctx, err)
}
t := time.Now()
u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()}
_, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t)
if err != nil {
return session.BlazeServerError(ctx, err)
}
return nil
}
func handleExpiredPackets(ctx context.Context) {
var limit = 100
for {
packetIds, err := models.ListExpiredPackets(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, id := range packetIds {
packet, err := models.SendPacketRefundTransfer(ctx, id)
if err != nil {
session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err)
break
}
if packet != nil {
session.Logger(ctx).Infof("REFUND %v\n", id)
}
}
if len(packetIds) < limit {
time.Sleep(300 * time.Millisecond)
continue
}
}
}
func schedulePluginCronJob(ctx context.Context) {
plugin.RunCron()
}
func handlePendingParticipants(ctx context.Context) {
var limit = 100
for {
participants, err := models.ListPendingParticipants(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, p := range participants {
err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount)
if err != nil {
session.Logger(ctx).Error(err)
break
}
| random_line_split |
|
message.go | session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err)
return nil
}
if id == "" {
return nil
}
if time.Since(mc.recipientID[id]) > models.UserActivePeriod {
if err := models.PingUserActiveAt(ctx, id); err != nil {
session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err)
}
mc.recipientID[id] = time.Now()
}
return nil
}
type TransferMemoInst struct {
Action string `json:"a"`
Param1 string `json:"p1"`
Param2 string `json:"p2"`
}
func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error {
go distribute(ctx)
go loopPendingMessage(ctx)
go handlePendingParticipants(ctx)
go handleExpiredPackets(ctx)
go schedulePluginCronJob(ctx)
user, err := mixin.NewUser(
config.AppConfig.Mixin.ClientId,
config.AppConfig.Mixin.SessionId,
config.AppConfig.Mixin.SessionKey,
)
if err != nil {
panic(err)
}
mc := &MessageContext{
user: user,
bc: broadcastChan,
recipientID: map[string]time.Time{},
}
for {
b := mixin.NewBlazeClient(user)
if err := b.Loop(ctx, mc); err != nil {
session.Logger(ctx).Error(err)
}
session.Logger(ctx).Info("connection loop end")
time.Sleep(300 * time.Millisecond)
}
}
func handleTransfer(ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error {
id, err := bot.UuidFromString(transfer.TraceId)
if err != nil {
return nil
}
user, err := models.FindUser(ctx, userId)
if user == nil || err != nil {
log.Println("No such a user", userId)
return err
}
if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" {
if inst.Action == "rewards" {
return handleRewardsPayment(ctx, mc, transfer, user, inst)
} else {
log.Println("Unknown instruction", inst)
}
} else {
log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err)
if user.TraceId == transfer.TraceId {
log.Println("New legacy payment", userId, transfer.TraceId)
if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId {
return user.Payment(ctx)
}
for _, asset := range config.AppConfig.System.AccpetPaymentAssetList {
if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId {
return user.Payment(ctx)
}
}
} else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil {
log.Println("New order received", userId, transfer.TraceId)
return handleOrderPayment(ctx, mc, transfer, order)
} else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil {
log.Println("New packet paid", userId, transfer.TraceId, id)
return err
} else if packet.State == models.PacketStatePaid {
log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId)
return sendAppCard(ctx, mc, packet)
}
}
return nil
}
func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) {
var data *TransferMemoInst
err := json.Unmarshal([]byte(transfer.Memo), &data)
return data, err
}
func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error | }
if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil {
log.Println("can't transfer to recipient", err)
return err
}
if user.UserId != targetUser.UserId {
if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil {
log.Println("can't record tip", err)
// return err
}
if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil {
log.Println("can't create rewards message", err)
// return err
}
}
return nil
}
func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error {
if order.PayMethod == models.PayMethodMixin &&
number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) &&
order.AssetId == transfer.AssetId {
_, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId)
if err != nil {
log.Println(err)
return err
}
}
return nil
}
func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error {
description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName)
if strings.TrimSpace(packet.User.FullName) == "" {
description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc
}
if count := utf8.RuneCountInString(description); count > 100 {
name := string([]rune(packet.User.FullName)[:16])
description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name)
}
host := config.AppConfig.Service.HTTPResourceHost
if config.AppConfig.System.RouterMode == config.RouterModeHash {
host = host + config.RouterModeHashSymbol
}
card, err := json.Marshal(map[string]string{
"icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256",
"title": config.AppConfig.MessageTemplate.GroupRedPacket,
"description": description,
"action": host + "/packets/" + packet.PacketId,
})
if err != nil {
return session.BlazeServerError(ctx, err)
}
t := time.Now()
u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()}
_, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t)
if err != nil {
return session.BlazeServerError(ctx, err)
}
return nil
}
func handleExpiredPackets(ctx context.Context) {
var limit = 100
for {
packetIds, err := models.ListExpiredPackets(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, id := range packetIds {
packet, err := models.SendPacketRefundTransfer(ctx, id)
if err != nil {
session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err)
break
}
if packet != nil {
session.Logger(ctx).Infof("REFUND %v\n", id)
}
}
if len(packetIds) < limit {
time.Sleep(300 * time.Millisecond)
continue
}
}
}
func schedulePluginCronJob(ctx context.Context) {
plugin.RunCron()
}
func handlePendingParticipants(ctx context.Context) {
var limit = 100
for {
participants, err := models.ListPendingParticipants(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, p := range participants {
err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount)
if err != nil {
session.Logger(ctx).Error(err)
break
| {
userId := inst.Param1
targetUser, err := models.FindUser(ctx, userId)
if err != nil {
log.Println("can't find user to reward", userId, err)
return nil
}
memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10)
log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId)
var traceID string
traceID = transfer.SnapshotId
if err != nil {
return errors.New("generate trace id failed")
}
in := &bot.TransferInput{
AssetId: transfer.AssetId,
RecipientId: targetUser.UserId,
Amount: number.FromString(transfer.Amount),
TraceId: traceID,
Memo: memo, | identifier_body |
message.go | nil
}
user, err := models.FindUser(ctx, userId)
if user == nil || err != nil {
log.Println("No such a user", userId)
return err
}
if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" {
if inst.Action == "rewards" {
return handleRewardsPayment(ctx, mc, transfer, user, inst)
} else {
log.Println("Unknown instruction", inst)
}
} else {
log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err)
if user.TraceId == transfer.TraceId {
log.Println("New legacy payment", userId, transfer.TraceId)
if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId {
return user.Payment(ctx)
}
for _, asset := range config.AppConfig.System.AccpetPaymentAssetList {
if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId {
return user.Payment(ctx)
}
}
} else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil {
log.Println("New order received", userId, transfer.TraceId)
return handleOrderPayment(ctx, mc, transfer, order)
} else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil {
log.Println("New packet paid", userId, transfer.TraceId, id)
return err
} else if packet.State == models.PacketStatePaid {
log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId)
return sendAppCard(ctx, mc, packet)
}
}
return nil
}
func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) {
var data *TransferMemoInst
err := json.Unmarshal([]byte(transfer.Memo), &data)
return data, err
}
func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error {
userId := inst.Param1
targetUser, err := models.FindUser(ctx, userId)
if err != nil {
log.Println("can't find user to reward", userId, err)
return nil
}
memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10)
log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId)
var traceID string
traceID = transfer.SnapshotId
if err != nil {
return errors.New("generate trace id failed")
}
in := &bot.TransferInput{
AssetId: transfer.AssetId,
RecipientId: targetUser.UserId,
Amount: number.FromString(transfer.Amount),
TraceId: traceID,
Memo: memo,
}
if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil {
log.Println("can't transfer to recipient", err)
return err
}
if user.UserId != targetUser.UserId {
if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil {
log.Println("can't record tip", err)
// return err
}
if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil {
log.Println("can't create rewards message", err)
// return err
}
}
return nil
}
func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error {
if order.PayMethod == models.PayMethodMixin &&
number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) &&
order.AssetId == transfer.AssetId {
_, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId)
if err != nil {
log.Println(err)
return err
}
}
return nil
}
func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error {
description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName)
if strings.TrimSpace(packet.User.FullName) == "" {
description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc
}
if count := utf8.RuneCountInString(description); count > 100 {
name := string([]rune(packet.User.FullName)[:16])
description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name)
}
host := config.AppConfig.Service.HTTPResourceHost
if config.AppConfig.System.RouterMode == config.RouterModeHash {
host = host + config.RouterModeHashSymbol
}
card, err := json.Marshal(map[string]string{
"icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256",
"title": config.AppConfig.MessageTemplate.GroupRedPacket,
"description": description,
"action": host + "/packets/" + packet.PacketId,
})
if err != nil {
return session.BlazeServerError(ctx, err)
}
t := time.Now()
u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()}
_, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t)
if err != nil {
return session.BlazeServerError(ctx, err)
}
return nil
}
func handleExpiredPackets(ctx context.Context) {
var limit = 100
for {
packetIds, err := models.ListExpiredPackets(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, id := range packetIds {
packet, err := models.SendPacketRefundTransfer(ctx, id)
if err != nil {
session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err)
break
}
if packet != nil {
session.Logger(ctx).Infof("REFUND %v\n", id)
}
}
if len(packetIds) < limit {
time.Sleep(300 * time.Millisecond)
continue
}
}
}
func schedulePluginCronJob(ctx context.Context) {
plugin.RunCron()
}
func handlePendingParticipants(ctx context.Context) {
var limit = 100
for {
participants, err := models.ListPendingParticipants(ctx, limit)
if err != nil {
session.Logger(ctx).Error(err)
time.Sleep(300 * time.Millisecond)
continue
}
for _, p := range participants {
err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount)
if err != nil {
session.Logger(ctx).Error(err)
break
}
}
if len(participants) < limit {
time.Sleep(300 * time.Millisecond)
continue
}
}
}
func handleMessage(ctx context.Context, mc *MessageContext, message *mixin.MessageView, broadcastChan chan WsBroadcastMessage) error {
user, err := models.FindUser(ctx, message.UserID)
if err != nil {
return err
}
if user == nil || user.State != models.PaymentStatePaid {
return sendHelpMessage(ctx, user, mc, message)
}
if time.Since(user.ActiveAt) > models.UserActivePeriod {
err = models.PingUserActiveAt(ctx, user.UserId)
if err != nil {
session.Logger(ctx).Error("handleMessage PingUserActiveAt", err)
}
}
if user.SubscribedAt.IsZero() {
return sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsUnsubscribe)
}
dataBytes, err := base64.StdEncoding.DecodeString(message.Data)
if err != nil {
return session.BadDataError(ctx)
} else if len(dataBytes) < 10 {
if strings.ToUpper(string(dataBytes)) == config.AppConfig.MessageTemplate.MessageCommandsInfo {
if count, err := models.SubscribersCount(ctx); err != nil {
return err
} else {
return sendTextMessage(ctx, mc, message.ConversationID, fmt.Sprintf(config.AppConfig.MessageTemplate.MessageCommandsInfoResp, count))
}
}
}
// broadcast
if isBroadcastOn, err := models.ReadBroadcastProperty(ctx); err == nil && isBroadcastOn == "on" {
go func() {
if bmsg, err := decodeMessage(ctx, user, message); err == nil | {
broadcastChan <- bmsg
} | conditional_block |
|
main.go | }
}
func (e *Editor) debugRowRunes() {
if e.debug {
i := 0
for i < e.n {
_, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes())
i += 1
}
}
}
// Terminal
func makeRaw(fd int) *unix.Termios {
termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA)
if err != nil {
panic(err)
}
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
termios.Oflag &^= unix.OPOST
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
termios.Cflag &^= unix.CSIZE | unix.PARENB
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil {
panic(err)
}
return termios
}
func (e *Editor) restoreTerminal(fd int) {
if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil {
panic(err)
}
}
func getWindowSize(fd int) (int, int) {
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
panic(err)
}
return int(ws.Col), int(ws.Row)
}
func (e *Editor) initTerminal() {
e.flush()
e.writeHelpMenu(helpMessage)
e.writeStatusBar()
e.moveCursor(e.crow, e.ccol)
}
func (e *Editor) writeHelpMenu(message string) {
prevRow, prevCol := e.crow, e.ccol
for i, ch := range message {
e.moveCursor(e.terminal.height+1, i)
e.write([]byte(string(ch)))
}
for i := len(message); i < e.terminal.width; i++ {
e.moveCursor(e.terminal.height+1, i)
e.write([]byte{' '})
}
e.moveCursor(prevRow, prevCol)
}
func (e *Editor) writeStatusBar() {
e.setBgColor(BgCyan)
defer e.setBgColor(BgBlack)
// Write file name
for i, ch := range e.filePath {
e.moveCursor(e.terminal.height, i)
e.write([]byte(string(ch)))
}
// Write Spacer
for i := len(e.filePath); i < e.terminal.width; i++ {
e.moveCursor(e.terminal.height, i)
e.write([]byte{' '})
}
}
// Views
func (e *Editor) write(b []byte) {
syscall.Write(0, b)
}
func (e *Editor) writeWithColor(b []byte, colors []color) {
var newBuf []byte
for i, c := range colors {
s := fmt.Sprintf("\033[%dm", c)
newBuf = append(newBuf, []byte(s)...)
newBuf = append(newBuf, b[i])
}
syscall.Write(0, newBuf)
}
func (e *Editor) highlight(b []byte) []color {
colors := make([]color, len(b))
for i := range colors {
colors[i] = DummyColor
}
// ASCII-only
ascii := string(b)
// Keywords
for key := range keywordColor {
index := strings.Index(ascii, string(key))
if index != -1 {
for i := 0; i < len(string(key)); i += 1 {
colors[index+i] = keywordColor[key]
}
}
}
// String Literal
isStringLit := false
for i, b := range ascii {
if b == '"' || isStringLit {
if b == '"' {
isStringLit = !isStringLit
}
colors[i] = FgGreen
}
}
return colors
}
func (e *Editor) writeRow(r *Row) {
var buf []byte
for _, r := range r.chars.Runes() {
buf = append(buf, []byte(string(r))...)
}
e.moveCursor(e.crow, 0)
e.flushRow()
// If the extension of fileName is .go, write with highlights.
if filepath.Ext(e.filePath) == ".go" {
colors := e.highlight(buf)
e.writeWithColor(buf, colors)
} else {
e.write(buf)
}
}
func (e *Editor) flush() {
e.write([]byte("\033[2J"))
}
func (e *Editor) flushRow() {
e.write([]byte("\033[2K"))
}
func (e *Editor) setBgColor(color color) {
s := fmt.Sprintf("\033[%dm", color)
e.write([]byte(s))
}
func (e *Editor) moveCursor(row, col int) {
s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin
e.write([]byte(s))
}
func (e *Editor) updateRowRunes(row *Row) {
if e.crow < e.terminal.height {
e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes())
e.writeRow(row)
}
}
func (e *Editor) refreshAllRows() {
for i := 0; i < e.terminal.height; i += 1 {
e.crow = i
e.writeRow(e.rows[e.scroolrow+i])
}
}
func (e *Editor) setRowPos(row int) {
if row >= e.n {
row = e.n - 1
}
if row < 0 {
if e.scroolrow > 0 {
e.scroolrow -= 1
e.refreshAllRows()
}
row = 0
}
if row >= e.terminal.height {
if row+e.scroolrow <= e.n {
e.scroolrow += 1
}
row = e.terminal.height - 1
e.refreshAllRows()
}
e.crow = row
e.moveCursor(row, e.ccol)
}
func (e *Editor) setColPos(col int) {
if col < 0 {
col = 0
}
if col >= e.currentRow().visibleLen() {
col = e.currentRow().visibleLen()
}
if col >= e.terminal.width {
col = e.terminal.width - 1 | }
func (e *Editor) setRowCol(row int, col int) {
if row > e.n && col > e.currentRow().visibleLen() {
return
}
e.setRowPos(row)
e.setColPos(col)
}
// Models
func (r *Row) deleteAt(col int) {
if col >= r.len() {
return
}
r.chars.DeleteAt(col)
}
func (r *Row) insertAt(colPos int, newRune rune) {
if colPos > r.len() {
colPos = r.len()
}
r.chars.InsertAt(colPos, newRune)
}
func (r *Row) len() int { return r.chars.Len() }
func (r *Row) visibleLen() int { return r.chars.VisibleLen() }
func (e *Editor) currentRow() *Row {
return e.rows[e.crow + e.scroolrow]
}
func (e *Editor) deleteRune(row *Row, col int) {
row.deleteAt(col)
e.updateRowRunes(row)
e.setRowCol(e.crow, e.ccol - 1)
}
func (e *Editor) insertRune(row *Row, col int, newRune rune) {
row.insertAt(col, newRune)
e.updateRowRunes(row)
}
func (e *Editor) deleteRow(row int) {
e.rows = append(e.rows[:row], e.rows[row+1:]...)
e.n -= 1
prevRowPos := e.crow
e.refreshAllRows()
e.crow = prevRowPos
}
func (e *Editor) replaceRune(row int, newRune []rune) {
gt := NewGapTable(128)
for _, r := range newRune {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
e.rows[row] = r
prevRowPos := e.crow
e.crow = row - e.scroolrow
e.updateRowRunes(r)
e.crow = prevRowPos
}
func (e *Editor) insertRow(row int, runes []rune) {
gt := NewGapTable(128)
for _, r := range runes {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
// https://github.com/golang/go/wiki/SliceTricks
e.rows = append(e | }
e.ccol = col
e.moveCursor(e.crow, e.ccol) | random_line_split |
main.go | .write(buf)
}
}
func (e *Editor) flush() {
e.write([]byte("\033[2J"))
}
func (e *Editor) flushRow() {
e.write([]byte("\033[2K"))
}
func (e *Editor) setBgColor(color color) {
s := fmt.Sprintf("\033[%dm", color)
e.write([]byte(s))
}
func (e *Editor) moveCursor(row, col int) {
s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin
e.write([]byte(s))
}
func (e *Editor) updateRowRunes(row *Row) {
if e.crow < e.terminal.height {
e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes())
e.writeRow(row)
}
}
func (e *Editor) refreshAllRows() {
for i := 0; i < e.terminal.height; i += 1 {
e.crow = i
e.writeRow(e.rows[e.scroolrow+i])
}
}
func (e *Editor) setRowPos(row int) {
if row >= e.n {
row = e.n - 1
}
if row < 0 {
if e.scroolrow > 0 {
e.scroolrow -= 1
e.refreshAllRows()
}
row = 0
}
if row >= e.terminal.height {
if row+e.scroolrow <= e.n {
e.scroolrow += 1
}
row = e.terminal.height - 1
e.refreshAllRows()
}
e.crow = row
e.moveCursor(row, e.ccol)
}
func (e *Editor) setColPos(col int) {
if col < 0 {
col = 0
}
if col >= e.currentRow().visibleLen() {
col = e.currentRow().visibleLen()
}
if col >= e.terminal.width {
col = e.terminal.width - 1
}
e.ccol = col
e.moveCursor(e.crow, e.ccol)
}
func (e *Editor) setRowCol(row int, col int) {
if row > e.n && col > e.currentRow().visibleLen() {
return
}
e.setRowPos(row)
e.setColPos(col)
}
// Models
func (r *Row) deleteAt(col int) {
if col >= r.len() {
return
}
r.chars.DeleteAt(col)
}
func (r *Row) insertAt(colPos int, newRune rune) {
if colPos > r.len() {
colPos = r.len()
}
r.chars.InsertAt(colPos, newRune)
}
func (r *Row) len() int { return r.chars.Len() }
func (r *Row) visibleLen() int { return r.chars.VisibleLen() }
func (e *Editor) currentRow() *Row {
return e.rows[e.crow + e.scroolrow]
}
func (e *Editor) deleteRune(row *Row, col int) {
row.deleteAt(col)
e.updateRowRunes(row)
e.setRowCol(e.crow, e.ccol - 1)
}
func (e *Editor) insertRune(row *Row, col int, newRune rune) {
row.insertAt(col, newRune)
e.updateRowRunes(row)
}
func (e *Editor) deleteRow(row int) {
e.rows = append(e.rows[:row], e.rows[row+1:]...)
e.n -= 1
prevRowPos := e.crow
e.refreshAllRows()
e.crow = prevRowPos
}
func (e *Editor) replaceRune(row int, newRune []rune) {
gt := NewGapTable(128)
for _, r := range newRune {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
e.rows[row] = r
prevRowPos := e.crow
e.crow = row - e.scroolrow
e.updateRowRunes(r)
e.crow = prevRowPos
}
func (e *Editor) insertRow(row int, runes []rune) {
gt := NewGapTable(128)
for _, r := range runes {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
// https://github.com/golang/go/wiki/SliceTricks
e.rows = append(e.rows[:row], append([]*Row{ r }, e.rows[row:]...)...)
e.n += 1
e.reallocBufferIfNeeded()
prevRowPos := e.crow
e.refreshAllRows()
e.crow = prevRowPos
}
func (e *Editor) reallocBufferIfNeeded() {
if e.n == len(e.rows) {
newCap := cap(e.rows) * 2
newRows := make([]*Row, newCap)
copy(newRows, e.rows)
e.rows = newRows
e.debugPrint("DEBUG: realloc occurred")
}
}
func (e *Editor) numberOfRunesInRow() int { return e.currentRow().chars.Len() }
func (e *Editor) backspace() {
row := e.currentRow()
if e.ccol == 0 {
if e.crow + e.scroolrow > 0 {
prevRowPos := e.crow + e.scroolrow - 1
prevRow := e.rows[prevRowPos]
// Update the previous row.
newRunes := append([]rune{}, prevRow.chars.Runes()[:prevRow.len()-1]...)
newRunes = append(newRunes, row.chars.Runes()...)
e.replaceRune(prevRowPos, newRunes)
// Delete the current row
currentRowPos := e.crow + e.scroolrow
e.deleteRow(currentRowPos)
e.setRowCol(e.crow - 1, prevRow.len() - 1)
}
} else {
e.deleteRune(row, e.ccol - 1)
}
e.debugRowRunes()
}
func (e *Editor) back() {
if e.ccol == 0 {
if e.crow > 0 {
e.setRowCol(e.crow-1, e.rows[e.crow+e.scroolrow-1].visibleLen())
}
} else {
e.setRowCol(e.crow, e.ccol-1)
}
}
func (e *Editor) next() {
if e.ccol >= e.currentRow().visibleLen() {
if e.crow+1 < e.n {
e.setRowCol(e.crow+1, 0)
}
} else {
e.setRowCol(e.crow, e.ccol+1)
}
}
func (e *Editor) newLine() {
// Insert the new row.
currentLineRowPos := e.crow + e.scroolrow
currentLineRow := e.rows[currentLineRowPos]
newLineRowPos := e.crow + e.scroolrow + 1
nextRowRunes := append([]rune{}, currentLineRow.chars.Runes()[e.ccol:]...)
e.insertRow(newLineRowPos, nextRowRunes)
// Update the current row.
currentRowNewRunes := append([]rune{}, currentLineRow.chars.Runes()[:e.ccol]...)
currentRowNewRunes = append(currentRowNewRunes, '\n')
e.replaceRune(e.crow + e.scroolrow, currentRowNewRunes)
e.setRowCol(e.crow + 1, 0)
e.debugRowRunes()
}
func existsFile(filename string) bool {
_, err := os.Stat(filename)
return err == nil
}
func saveFile(filePath string, rows []*Row) {
sb := strings.Builder{}
for _, r := range rows {
if r.len() >= 1 {
for _, ch := range r.chars.Runes() {
sb.WriteRune(ch)
}
}
}
_ = ioutil.WriteFile(filePath, []byte(sb.String()), 0644)
}
func loadFile(filePath string) *Editor {
e := &Editor{
crow: 0,
ccol: 0,
scroolrow: 0,
filePath: filePath,
keyChan: make(chan rune),
timeChan: make(chan messageType),
n: 1,
}
rows := makeRows()
bytes, err := ioutil.ReadFile(filePath)
if err != nil {
panic(err)
}
gt := NewGapTable(128)
for _, b := range bytes | {
// Treat TAB as 4 spaces.
if b == Tab {
gt.AppendRune(rune(0x20))
gt.AppendRune(rune(0x20))
gt.AppendRune(rune(0x20))
gt.AppendRune(rune(0x20))
continue
}
// ASCII-only
gt.AppendRune(rune(b))
if b == '\n' {
rows[e.n-1] = &Row{chars: gt}
e.n += 1
gt = NewGapTable(128)
}
} | conditional_block |
|
main.go | }
}
func (e *Editor) debugRowRunes() {
if e.debug {
i := 0
for i < e.n {
_, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes())
i += 1
}
}
}
// Terminal
func makeRaw(fd int) *unix.Termios {
termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA)
if err != nil {
panic(err)
}
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
termios.Oflag &^= unix.OPOST
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
termios.Cflag &^= unix.CSIZE | unix.PARENB
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil {
panic(err)
}
return termios
}
func (e *Editor) restoreTerminal(fd int) {
if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil {
panic(err)
}
}
func getWindowSize(fd int) (int, int) {
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
panic(err)
}
return int(ws.Col), int(ws.Row)
}
func (e *Editor) initTerminal() {
e.flush()
e.writeHelpMenu(helpMessage)
e.writeStatusBar()
e.moveCursor(e.crow, e.ccol)
}
func (e *Editor) writeHelpMenu(message string) {
prevRow, prevCol := e.crow, e.ccol
for i, ch := range message {
e.moveCursor(e.terminal.height+1, i)
e.write([]byte(string(ch)))
}
for i := len(message); i < e.terminal.width; i++ {
e.moveCursor(e.terminal.height+1, i)
e.write([]byte{' '})
}
e.moveCursor(prevRow, prevCol)
}
func (e *Editor) writeStatusBar() {
e.setBgColor(BgCyan)
defer e.setBgColor(BgBlack)
// Write file name
for i, ch := range e.filePath {
e.moveCursor(e.terminal.height, i)
e.write([]byte(string(ch)))
}
// Write Spacer
for i := len(e.filePath); i < e.terminal.width; i++ {
e.moveCursor(e.terminal.height, i)
e.write([]byte{' '})
}
}
// Views
func (e *Editor) write(b []byte) {
syscall.Write(0, b)
}
func (e *Editor) writeWithColor(b []byte, colors []color) {
var newBuf []byte
for i, c := range colors {
s := fmt.Sprintf("\033[%dm", c)
newBuf = append(newBuf, []byte(s)...)
newBuf = append(newBuf, b[i])
}
syscall.Write(0, newBuf)
}
func (e *Editor) highlight(b []byte) []color {
colors := make([]color, len(b))
for i := range colors {
colors[i] = DummyColor
}
// ASCII-only
ascii := string(b)
// Keywords
for key := range keywordColor {
index := strings.Index(ascii, string(key))
if index != -1 {
for i := 0; i < len(string(key)); i += 1 {
colors[index+i] = keywordColor[key]
}
}
}
// String Literal
isStringLit := false
for i, b := range ascii {
if b == '"' || isStringLit {
if b == '"' {
isStringLit = !isStringLit
}
colors[i] = FgGreen
}
}
return colors
}
func (e *Editor) writeRow(r *Row) {
var buf []byte
for _, r := range r.chars.Runes() {
buf = append(buf, []byte(string(r))...)
}
e.moveCursor(e.crow, 0)
e.flushRow()
// If the extension of fileName is .go, write with highlights.
if filepath.Ext(e.filePath) == ".go" {
colors := e.highlight(buf)
e.writeWithColor(buf, colors)
} else {
e.write(buf)
}
}
func (e *Editor) flush() {
e.write([]byte("\033[2J"))
}
func (e *Editor) flushRow() {
e.write([]byte("\033[2K"))
}
func (e *Editor) setBgColor(color color) {
s := fmt.Sprintf("\033[%dm", color)
e.write([]byte(s))
}
func (e *Editor) | (row, col int) {
s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin
e.write([]byte(s))
}
func (e *Editor) updateRowRunes(row *Row) {
if e.crow < e.terminal.height {
e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes())
e.writeRow(row)
}
}
func (e *Editor) refreshAllRows() {
for i := 0; i < e.terminal.height; i += 1 {
e.crow = i
e.writeRow(e.rows[e.scroolrow+i])
}
}
func (e *Editor) setRowPos(row int) {
if row >= e.n {
row = e.n - 1
}
if row < 0 {
if e.scroolrow > 0 {
e.scroolrow -= 1
e.refreshAllRows()
}
row = 0
}
if row >= e.terminal.height {
if row+e.scroolrow <= e.n {
e.scroolrow += 1
}
row = e.terminal.height - 1
e.refreshAllRows()
}
e.crow = row
e.moveCursor(row, e.ccol)
}
func (e *Editor) setColPos(col int) {
if col < 0 {
col = 0
}
if col >= e.currentRow().visibleLen() {
col = e.currentRow().visibleLen()
}
if col >= e.terminal.width {
col = e.terminal.width - 1
}
e.ccol = col
e.moveCursor(e.crow, e.ccol)
}
func (e *Editor) setRowCol(row int, col int) {
if row > e.n && col > e.currentRow().visibleLen() {
return
}
e.setRowPos(row)
e.setColPos(col)
}
// Models
func (r *Row) deleteAt(col int) {
if col >= r.len() {
return
}
r.chars.DeleteAt(col)
}
func (r *Row) insertAt(colPos int, newRune rune) {
if colPos > r.len() {
colPos = r.len()
}
r.chars.InsertAt(colPos, newRune)
}
func (r *Row) len() int { return r.chars.Len() }
func (r *Row) visibleLen() int { return r.chars.VisibleLen() }
func (e *Editor) currentRow() *Row {
return e.rows[e.crow + e.scroolrow]
}
func (e *Editor) deleteRune(row *Row, col int) {
row.deleteAt(col)
e.updateRowRunes(row)
e.setRowCol(e.crow, e.ccol - 1)
}
func (e *Editor) insertRune(row *Row, col int, newRune rune) {
row.insertAt(col, newRune)
e.updateRowRunes(row)
}
func (e *Editor) deleteRow(row int) {
e.rows = append(e.rows[:row], e.rows[row+1:]...)
e.n -= 1
prevRowPos := e.crow
e.refreshAllRows()
e.crow = prevRowPos
}
func (e *Editor) replaceRune(row int, newRune []rune) {
gt := NewGapTable(128)
for _, r := range newRune {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
e.rows[row] = r
prevRowPos := e.crow
e.crow = row - e.scroolrow
e.updateRowRunes(r)
e.crow = prevRowPos
}
func (e *Editor) insertRow(row int, runes []rune) {
gt := NewGapTable(128)
for _, r := range runes {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
// https://github.com/golang/go/wiki/SliceTricks
e.rows = | moveCursor | identifier_name |
main.go | }
}
func (e *Editor) debugRowRunes() {
if e.debug {
i := 0
for i < e.n {
_, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes())
i += 1
}
}
}
// Terminal
func makeRaw(fd int) *unix.Termios {
termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA)
if err != nil {
panic(err)
}
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
termios.Oflag &^= unix.OPOST
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
termios.Cflag &^= unix.CSIZE | unix.PARENB
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil {
panic(err)
}
return termios
}
func (e *Editor) restoreTerminal(fd int) {
if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil {
panic(err)
}
}
func getWindowSize(fd int) (int, int) {
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
panic(err)
}
return int(ws.Col), int(ws.Row)
}
func (e *Editor) initTerminal() {
e.flush()
e.writeHelpMenu(helpMessage)
e.writeStatusBar()
e.moveCursor(e.crow, e.ccol)
}
func (e *Editor) writeHelpMenu(message string) {
prevRow, prevCol := e.crow, e.ccol
for i, ch := range message {
e.moveCursor(e.terminal.height+1, i)
e.write([]byte(string(ch)))
}
for i := len(message); i < e.terminal.width; i++ {
e.moveCursor(e.terminal.height+1, i)
e.write([]byte{' '})
}
e.moveCursor(prevRow, prevCol)
}
func (e *Editor) writeStatusBar() {
e.setBgColor(BgCyan)
defer e.setBgColor(BgBlack)
// Write file name
for i, ch := range e.filePath {
e.moveCursor(e.terminal.height, i)
e.write([]byte(string(ch)))
}
// Write Spacer
for i := len(e.filePath); i < e.terminal.width; i++ {
e.moveCursor(e.terminal.height, i)
e.write([]byte{' '})
}
}
// Views
func (e *Editor) write(b []byte) {
syscall.Write(0, b)
}
func (e *Editor) writeWithColor(b []byte, colors []color) {
var newBuf []byte
for i, c := range colors {
s := fmt.Sprintf("\033[%dm", c)
newBuf = append(newBuf, []byte(s)...)
newBuf = append(newBuf, b[i])
}
syscall.Write(0, newBuf)
}
func (e *Editor) highlight(b []byte) []color {
colors := make([]color, len(b))
for i := range colors {
colors[i] = DummyColor
}
// ASCII-only
ascii := string(b)
// Keywords
for key := range keywordColor {
index := strings.Index(ascii, string(key))
if index != -1 {
for i := 0; i < len(string(key)); i += 1 {
colors[index+i] = keywordColor[key]
}
}
}
// String Literal
isStringLit := false
for i, b := range ascii {
if b == '"' || isStringLit {
if b == '"' {
isStringLit = !isStringLit
}
colors[i] = FgGreen
}
}
return colors
}
func (e *Editor) writeRow(r *Row) {
var buf []byte
for _, r := range r.chars.Runes() {
buf = append(buf, []byte(string(r))...)
}
e.moveCursor(e.crow, 0)
e.flushRow()
// If the extension of fileName is .go, write with highlights.
if filepath.Ext(e.filePath) == ".go" {
colors := e.highlight(buf)
e.writeWithColor(buf, colors)
} else {
e.write(buf)
}
}
func (e *Editor) flush() {
e.write([]byte("\033[2J"))
}
func (e *Editor) flushRow() {
e.write([]byte("\033[2K"))
}
func (e *Editor) setBgColor(color color) {
s := fmt.Sprintf("\033[%dm", color)
e.write([]byte(s))
}
func (e *Editor) moveCursor(row, col int) {
s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin
e.write([]byte(s))
}
func (e *Editor) updateRowRunes(row *Row) {
if e.crow < e.terminal.height {
e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes())
e.writeRow(row)
}
}
func (e *Editor) refreshAllRows() {
for i := 0; i < e.terminal.height; i += 1 {
e.crow = i
e.writeRow(e.rows[e.scroolrow+i])
}
}
func (e *Editor) setRowPos(row int) {
if row >= e.n {
row = e.n - 1
}
if row < 0 {
if e.scroolrow > 0 {
e.scroolrow -= 1
e.refreshAllRows()
}
row = 0
}
if row >= e.terminal.height {
if row+e.scroolrow <= e.n {
e.scroolrow += 1
}
row = e.terminal.height - 1
e.refreshAllRows()
}
e.crow = row
e.moveCursor(row, e.ccol)
}
func (e *Editor) setColPos(col int) {
if col < 0 {
col = 0
}
if col >= e.currentRow().visibleLen() {
col = e.currentRow().visibleLen()
}
if col >= e.terminal.width {
col = e.terminal.width - 1
}
e.ccol = col
e.moveCursor(e.crow, e.ccol)
}
func (e *Editor) setRowCol(row int, col int) {
if row > e.n && col > e.currentRow().visibleLen() {
return
}
e.setRowPos(row)
e.setColPos(col)
}
// Models
func (r *Row) deleteAt(col int) |
func (r *Row) insertAt(colPos int, newRune rune) {
if colPos > r.len() {
colPos = r.len()
}
r.chars.InsertAt(colPos, newRune)
}
func (r *Row) len() int { return r.chars.Len() }
func (r *Row) visibleLen() int { return r.chars.VisibleLen() }
func (e *Editor) currentRow() *Row {
return e.rows[e.crow + e.scroolrow]
}
func (e *Editor) deleteRune(row *Row, col int) {
row.deleteAt(col)
e.updateRowRunes(row)
e.setRowCol(e.crow, e.ccol - 1)
}
func (e *Editor) insertRune(row *Row, col int, newRune rune) {
row.insertAt(col, newRune)
e.updateRowRunes(row)
}
func (e *Editor) deleteRow(row int) {
e.rows = append(e.rows[:row], e.rows[row+1:]...)
e.n -= 1
prevRowPos := e.crow
e.refreshAllRows()
e.crow = prevRowPos
}
func (e *Editor) replaceRune(row int, newRune []rune) {
gt := NewGapTable(128)
for _, r := range newRune {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
e.rows[row] = r
prevRowPos := e.crow
e.crow = row - e.scroolrow
e.updateRowRunes(r)
e.crow = prevRowPos
}
func (e *Editor) insertRow(row int, runes []rune) {
gt := NewGapTable(128)
for _, r := range runes {
gt.AppendRune(r)
}
r := &Row{
chars: gt,
}
// https://github.com/golang/go/wiki/SliceTricks
e.rows = | {
if col >= r.len() {
return
}
r.chars.DeleteAt(col)
} | identifier_body |
decoder.go | 9\.0-9])*(b|c|d|e|f|g|E|F|G|h|i|l|L|n|o|O|p|q|t|u|x|X)` // assumes no `%%` inside string!
// patNextFormatUSpecifier is a regex to find next format u specifier in a string
// It does also match %%u positions!
//patNextFormatUSpecifier = `(?:%[0-9]*u)`
patNextFormatUSpecifier = `%[0-9]*u` // assumes no `%%` inside string!
// patNextFormatISpecifier is a regex to find next format i specifier in a string
// It does also match %%i positions!
patNextFormatISpecifier = `%[0-9]*i` // assumes no `%%` inside string!
// patNextFormatXSpecifier is a regex to find next format x specifier in a string
// It does also match %%x positions!
// patNextFormatXSpecifier = `(?:%[0-9]*(l|o|O|x|X|b))`
patNextFormatXSpecifier = `%[0-9]*(l|o|O|x|X|b|p|t)` // assumes no `%%` inside string!
// patNextFormatFSpecifier is a regex to find next format f specifier in a string
// It does also match %%f positions!
patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string!
// patNextFormatBoolSpecifier is a regex to find next format f specifier in a string
// It does also match %%t positions!
patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string!
// patNextFormatPointerSpecifier is a regex to find next format f specifier in a string
// It does also match %%t positions!
patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string!
// hints is the help information in case of errors.
Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?"
// DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d "
//
// DefaultStamp16 = "ms_µs" // "tim: %2d_%03d "
UnsignedFormatSpecifier = 0 // %u -> %d
SignedFormatSpecifier = 1 //
FloatFormatSpecifier = 2 // %f and relatives
BooleanFormatSpecifier = 3 // a %t (bool) found
PointerFormatSpecifier = 4 // a %p (pointer) found
)
var (
// Verbose gives more information on output if set. The value is injected from main packages.
Verbose bool
// ShowID is used as format string for displaying the first trice ID at the start of each line if not "".
ShowID string
// decoder.LastTriceID is last decoded ID. It is used for switch -showID.
LastTriceID id.TriceID
// TestTableMode is a special option for easy decoder test table generation.
TestTableMode bool
// Unsigned if true, forces hex and in values printed as unsigned values.
Unsigned bool
matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier)
matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier)
matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier)
matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier)
matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier)
matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier)
matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier)
DebugOut = false // DebugOut enables debug information.
DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder.
InitialCycle = true // InitialCycle is a helper for the cycle counter automatic.
TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value.
TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part.
TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting.
TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps.
TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps.
TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps.
TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed.
ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed.
ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed.
ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed.
LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name.
TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type.
TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete)
PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS)
IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder)
NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting.
)
// New abstracts the function type for a new decoder.
type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder
// Decoder is providing a byte reader returning decoded trice's.
// SetInput allows switching the input stream to a different source.
type Decoder interface {
io.Reader
SetInput(io.Reader)
}
// DecoderData is the common data struct for all decoders.
type DecoderData struct {
W io.Writer // io.Stdout or the like
In io.Reader // in is the inner reader, which is used to get raw bytes
InnerBuffer []byte // avoid repeated allocation (trex)
IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation.
B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices.
B0 []byte // initial value for B
Endian bool // endian is true for LittleEndian and false for BigEndian
TriceSize int // trice head and payload size as number of bytes
ParamSpace int // trice payload size after head
SLen int // string length for TRICE_S
Lut id.TriceIDLookUp // id look-up map for translation
LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher
Li id.TriceIDLookUpLI // location information map
Trice id.TriceFmt // id.TriceFmt // received trice
}
// SetInput allows switching the input stream to a different source.
//
// This function is for easier testing with cycle counters.
func (p *DecoderData) SetInput(r io.Reader) {
p.In = r
}
// ReadU16 returns the 2 b bytes as uint16 according the specified endianness
func (p *DecoderData) ReadU16(b []byte) uint16 {
if p.Endian {
return binary.LittleEndian.Uint16(b)
}
return binary.BigEndian.Uint16(b)
}
// ReadU32 returns the 4 b bytes as uint32 according the specified endianness
func (p *DecoderData) ReadU32(b []byte) uint32 {
if p.Endian {
return binary.LittleEndian.Uint32(b)
}
return binary.BigEndian.Uint32(b)
}
// ReadU64 returns the 8 b bytes as uint64 according the specified endianness
func (p *DecoderData) ReadU64(b []byte) uint64 {
if p.Endian {
return binary.LittleEndian.Uint64(b)
}
return binary.BigEndian.Uint64(b)
}
// UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o.
//
// If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers.
// Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1.
// If a float format specifier was found at position k, u[k] is 2,
// http://www.cplusplus.com/reference/cstdio/printf/
// https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output
func UR | eplaceN(i | identifier_name |
|
decoder.go | |X|b|p|t)` // assumes no `%%` inside string!
// patNextFormatFSpecifier is a regex to find next format f specifier in a string
// It does also match %%f positions!
patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string!
// patNextFormatBoolSpecifier is a regex to find next format f specifier in a string
// It does also match %%t positions!
patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string!
// patNextFormatPointerSpecifier is a regex to find next format f specifier in a string
// It does also match %%t positions!
patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string!
// hints is the help information in case of errors.
Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?"
// DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d "
//
// DefaultStamp16 = "ms_µs" // "tim: %2d_%03d "
UnsignedFormatSpecifier = 0 // %u -> %d
SignedFormatSpecifier = 1 //
FloatFormatSpecifier = 2 // %f and relatives
BooleanFormatSpecifier = 3 // a %t (bool) found
PointerFormatSpecifier = 4 // a %p (pointer) found
)
var (
// Verbose gives more information on output if set. The value is injected from main packages.
Verbose bool
// ShowID is used as format string for displaying the first trice ID at the start of each line if not "".
ShowID string
// decoder.LastTriceID is last decoded ID. It is used for switch -showID.
LastTriceID id.TriceID
// TestTableMode is a special option for easy decoder test table generation.
TestTableMode bool
// Unsigned if true, forces hex and in values printed as unsigned values.
Unsigned bool
matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier)
matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier)
matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier)
matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier)
matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier)
matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier)
matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier)
DebugOut = false // DebugOut enables debug information.
DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder.
InitialCycle = true // InitialCycle is a helper for the cycle counter automatic.
TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value.
TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part.
TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting.
TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps.
TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps.
TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps.
TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed.
ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed.
ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed.
ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed.
LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name.
TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type.
TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete)
PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS)
IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder)
NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting.
)
// New abstracts the function type for a new decoder.
type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder
// Decoder is providing a byte reader returning decoded trice's.
// SetInput allows switching the input stream to a different source.
type Decoder interface {
io.Reader
SetInput(io.Reader)
}
// DecoderData is the common data struct for all decoders.
type DecoderData struct {
W io.Writer // io.Stdout or the like
In io.Reader // in is the inner reader, which is used to get raw bytes
InnerBuffer []byte // avoid repeated allocation (trex)
IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation.
B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices.
B0 []byte // initial value for B
Endian bool // endian is true for LittleEndian and false for BigEndian
TriceSize int // trice head and payload size as number of bytes
ParamSpace int // trice payload size after head
SLen int // string length for TRICE_S
Lut id.TriceIDLookUp // id look-up map for translation
LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher
Li id.TriceIDLookUpLI // location information map
Trice id.TriceFmt // id.TriceFmt // received trice
}
// SetInput allows switching the input stream to a different source.
//
// This function is for easier testing with cycle counters.
func (p *DecoderData) SetInput(r io.Reader) {
p.In = r
}
// ReadU16 returns the 2 b bytes as uint16 according the specified endianness
func (p *DecoderData) ReadU16(b []byte) uint16 {
if p.Endian {
return binary.LittleEndian.Uint16(b)
}
return binary.BigEndian.Uint16(b)
}
// ReadU32 returns the 4 b bytes as uint32 according the specified endianness
func (p *DecoderData) ReadU32(b []byte) uint32 {
if p.Endian {
return binary.LittleEndian.Uint32(b)
}
return binary.BigEndian.Uint32(b)
}
// ReadU64 returns the 8 b bytes as uint64 according the specified endianness
func (p *DecoderData) ReadU64(b []byte) uint64 {
if p.Endian {
return binary.LittleEndian.Uint64(b)
}
return binary.BigEndian.Uint64(b)
}
// UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o.
//
// If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers.
// Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1.
// If a float format specifier was found at position k, u[k] is 2,
// http://www.cplusplus.com/reference/cstdio/printf/
// https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output
func UReplaceN(i string) (o string, u []int) {
| o = i
i = strings.ReplaceAll(i, "%%", "__") // this makes regex easier and faster
var offset int
for {
s := i[offset:] // remove processed part
loc := matchNextFormatSpecifier.FindStringIndex(s)
if nil == loc { // no (more) fm found
return
}
offset += loc[1] // track position
fm := s[loc[0]:loc[1]]
locPointer := matchNextFormatPointerSpecifier.FindStringIndex(fm)
if nil != locPointer { // a %p found
// This would require `unsafe.Pointer(uintptr(n))` inside unSignedOrSignedOut.
// There are false positive windows vet warnings:
// https://stackoverflow.com/questions/43767898/casting-a-int-to-a-pointer
// https://github.com/golang/go/issues/41205
// As workaround replace %p with %x in the format strings.
// Then trice64( "%p", -1 ) could be a problem when using `trice log -unsigned false`
// But that we simply ignore right now. | identifier_body |
|
decoder.go | O|p|q|u|x|X|n|b))`
//patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b|t)` // assumes no `%%` inside string!
patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(b|c|d|e|f|g|E|F|G|h|i|l|L|n|o|O|p|q|t|u|x|X)` // assumes no `%%` inside string!
// patNextFormatUSpecifier is a regex to find next format u specifier in a string
// It does also match %%u positions!
//patNextFormatUSpecifier = `(?:%[0-9]*u)`
patNextFormatUSpecifier = `%[0-9]*u` // assumes no `%%` inside string!
// patNextFormatISpecifier is a regex to find next format i specifier in a string
// It does also match %%i positions!
patNextFormatISpecifier = `%[0-9]*i` // assumes no `%%` inside string!
// patNextFormatXSpecifier is a regex to find next format x specifier in a string
// It does also match %%x positions!
// patNextFormatXSpecifier = `(?:%[0-9]*(l|o|O|x|X|b))`
patNextFormatXSpecifier = `%[0-9]*(l|o|O|x|X|b|p|t)` // assumes no `%%` inside string!
// patNextFormatFSpecifier is a regex to find next format f specifier in a string
// It does also match %%f positions!
patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string!
// patNextFormatBoolSpecifier is a regex to find next format f specifier in a string
// It does also match %%t positions!
patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string!
// patNextFormatPointerSpecifier is a regex to find next format f specifier in a string
// It does also match %%t positions!
patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string!
// hints is the help information in case of errors.
Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?"
// DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d "
//
// DefaultStamp16 = "ms_µs" // "tim: %2d_%03d "
UnsignedFormatSpecifier = 0 // %u -> %d
SignedFormatSpecifier = 1 //
FloatFormatSpecifier = 2 // %f and relatives
BooleanFormatSpecifier = 3 // a %t (bool) found
PointerFormatSpecifier = 4 // a %p (pointer) found
)
var (
// Verbose gives more information on output if set. The value is injected from main packages.
Verbose bool
// ShowID is used as format string for displaying the first trice ID at the start of each line if not "".
ShowID string
// decoder.LastTriceID is last decoded ID. It is used for switch -showID.
LastTriceID id.TriceID
// TestTableMode is a special option for easy decoder test table generation.
TestTableMode bool
// Unsigned if true, forces hex and in values printed as unsigned values.
Unsigned bool
matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier)
matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier)
matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier)
matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier)
matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier)
matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier)
matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier)
DebugOut = false // DebugOut enables debug information.
DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder.
InitialCycle = true // InitialCycle is a helper for the cycle counter automatic.
TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value.
TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part.
TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting.
TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps.
TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps.
TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps.
TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed.
ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed.
ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed.
ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed.
LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name.
TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type.
TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete)
PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS)
IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder)
NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting.
)
// New abstracts the function type for a new decoder.
type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder
// Decoder is providing a byte reader returning decoded trice's.
// SetInput allows switching the input stream to a different source.
type Decoder interface {
io.Reader
SetInput(io.Reader)
}
// DecoderData is the common data struct for all decoders.
type DecoderData struct {
W io.Writer // io.Stdout or the like
In io.Reader // in is the inner reader, which is used to get raw bytes
InnerBuffer []byte // avoid repeated allocation (trex)
IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation.
B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices.
B0 []byte // initial value for B
Endian bool // endian is true for LittleEndian and false for BigEndian
TriceSize int // trice head and payload size as number of bytes
ParamSpace int // trice payload size after head
SLen int // string length for TRICE_S
Lut id.TriceIDLookUp // id look-up map for translation
LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher
Li id.TriceIDLookUpLI // location information map
Trice id.TriceFmt // id.TriceFmt // received trice
} |
// SetInput allows switching the input stream to a different source.
//
// This function is for easier testing with cycle counters.
func (p *DecoderData) SetInput(r io.Reader) {
p.In = r
}
// ReadU16 returns the 2 b bytes as uint16 according the specified endianness
func (p *DecoderData) ReadU16(b []byte) uint16 {
if p.Endian {
return binary.LittleEndian.Uint16(b)
}
return binary.BigEndian.Uint16(b)
}
// ReadU32 returns the 4 b bytes as uint32 according the specified endianness
func (p *DecoderData) ReadU32(b []byte) uint32 {
if p.Endian {
return binary.LittleEndian.Uint32(b)
}
return binary.BigEndian.Uint32(b)
}
// ReadU64 returns the 8 b bytes as uint64 according the specified endianness
func (p *DecoderData) ReadU64(b []byte) uint64 {
if p.Endian {
return binary.LittleEndian.Uint64(b)
}
return binary.BigEndian.Uint64(b)
}
// UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o.
//
// If a replacement took place on position | random_line_split |
|
decoder.go | bool
// ShowID is used as format string for displaying the first trice ID at the start of each line if not "".
ShowID string
// decoder.LastTriceID is last decoded ID. It is used for switch -showID.
LastTriceID id.TriceID
// TestTableMode is a special option for easy decoder test table generation.
TestTableMode bool
// Unsigned if true, forces hex and in values printed as unsigned values.
Unsigned bool
matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier)
matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier)
matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier)
matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier)
matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier)
matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier)
matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier)
DebugOut = false // DebugOut enables debug information.
DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder.
InitialCycle = true // InitialCycle is a helper for the cycle counter automatic.
TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value.
TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part.
TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting.
TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps.
TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps.
TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps.
TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed.
ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed.
ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed.
ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed.
LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name.
TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type.
TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete)
PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS)
IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder)
NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting.
)
// New abstracts the function type for a new decoder.
type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder
// Decoder is providing a byte reader returning decoded trice's.
// SetInput allows switching the input stream to a different source.
type Decoder interface {
io.Reader
SetInput(io.Reader)
}
// DecoderData is the common data struct for all decoders.
type DecoderData struct {
W io.Writer // io.Stdout or the like
In io.Reader // in is the inner reader, which is used to get raw bytes
InnerBuffer []byte // avoid repeated allocation (trex)
IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation.
B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices.
B0 []byte // initial value for B
Endian bool // endian is true for LittleEndian and false for BigEndian
TriceSize int // trice head and payload size as number of bytes
ParamSpace int // trice payload size after head
SLen int // string length for TRICE_S
Lut id.TriceIDLookUp // id look-up map for translation
LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher
Li id.TriceIDLookUpLI // location information map
Trice id.TriceFmt // id.TriceFmt // received trice
}
// SetInput allows switching the input stream to a different source.
//
// This function is for easier testing with cycle counters.
func (p *DecoderData) SetInput(r io.Reader) {
p.In = r
}
// ReadU16 returns the 2 b bytes as uint16 according the specified endianness
func (p *DecoderData) ReadU16(b []byte) uint16 {
if p.Endian {
return binary.LittleEndian.Uint16(b)
}
return binary.BigEndian.Uint16(b)
}
// ReadU32 returns the 4 b bytes as uint32 according the specified endianness
func (p *DecoderData) ReadU32(b []byte) uint32 {
if p.Endian {
return binary.LittleEndian.Uint32(b)
}
return binary.BigEndian.Uint32(b)
}
// ReadU64 returns the 8 b bytes as uint64 according the specified endianness
func (p *DecoderData) ReadU64(b []byte) uint64 {
if p.Endian {
return binary.LittleEndian.Uint64(b)
}
return binary.BigEndian.Uint64(b)
}
// UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o.
//
// If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers.
// Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1.
// If a float format specifier was found at position k, u[k] is 2,
// http://www.cplusplus.com/reference/cstdio/printf/
// https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output
func UReplaceN(i string) (o string, u []int) {
o = i
i = strings.ReplaceAll(i, "%%", "__") // this makes regex easier and faster
var offset int
for {
s := i[offset:] // remove processed part
loc := matchNextFormatSpecifier.FindStringIndex(s)
if nil == loc { // no (more) fm found
return
}
offset += loc[1] // track position
fm := s[loc[0]:loc[1]]
locPointer := matchNextFormatPointerSpecifier.FindStringIndex(fm)
if nil != locPointer { // a %p found
// This would require `unsafe.Pointer(uintptr(n))` inside unSignedOrSignedOut.
// There are false positive windows vet warnings:
// https://stackoverflow.com/questions/43767898/casting-a-int-to-a-pointer
// https://github.com/golang/go/issues/41205
// As workaround replace %p with %x in the format strings.
// Then trice64( "%p", -1 ) could be a problem when using `trice log -unsigned false`
// But that we simply ignore right now.
o = o[:offset-1] + "x" + o[offset:] // replace %np -> %nx
u = append(u, PointerFormatSpecifier) // pointer value
continue
}
locBool := matchNextFormatBoolSpecifier.FindStringIndex(fm)
if nil != locBool { // a %t found
u = append(u, BooleanFormatSpecifier) // bool value
continue
}
locF := matchNextFormatFSpecifier.FindStringIndex(fm)
if nil != locF { // a %nf found
u = append(u, FloatFormatSpecifier) // float value
continue
}
locU := matchNextFormatUSpecifier.FindStringIndex(fm)
if nil != locU { // a %nu found
o = o[:offset-1] + "d" + o[offset:] // replace %nu -> %nd
u = append(u, UnsignedFormatSpecifier) // no negative values
continue
}
locI := matchNextFormatISpecifier.FindStringIndex(fm)
if nil != locI { // a %ni found
o = o[:offset-1] + "d" + o[offset:] // replace %ni -> %nd
u = append(u, SignedFormatSpecifier) // also negative values
continue
}
locX := matchNextFormatXSpecifier.FindStringIndex(fm)
if nil != locX { | // a %nx, %nX or, %no, %nO or %nb found
if Unsigned {
u = append(u, 0) // no negative values
} else {
u = append(u, 1) // also negative values
}
continue
}
| conditional_block |
|
pool.rs | 1, Ordering::Relaxed);
// SAFETY: We cannot permit the reuse of thread IDs since reusing a
// thread ID might result in more than one thread "owning" a pool,
// and thus, permit accessing a mutable value from multiple threads
// simultaneously without synchronization. The intent of this panic is
// to be a sanity check. It is not expected that the thread ID space
// will actually be exhausted in practice.
//
// This checks that the counter never wraps around, since atomic
// addition wraps around on overflow.
if next == 0 {
panic!("regex: thread ID allocation space exhausted");
}
next
};
);
/// The type of the function used to create values in a pool when the pool is
/// empty and the caller requests one.
type CreateFn<T> =
Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>;
/// A simple thread safe pool for reusing values.
///
/// Getting a value out comes with a guard. When that guard is dropped, the
/// value is automatically put back in the pool.
///
/// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means
/// that T can use interior mutability. This is possible because a pool is
/// guaranteed to provide a value to exactly one thread at any time.
///
/// Currently, a pool never contracts in size. Its size is proportional to the
/// number of simultaneous uses.
pub struct Pool<T> {
/// A stack of T values to hand out. These are used when a Pool is
/// accessed by a thread that didn't create it.
stack: Mutex<Vec<Box<T>>>,
/// A function to create more T values when stack is empty and a caller
/// has requested a T.
create: CreateFn<T>,
/// The ID of the thread that owns this pool. The owner is the thread
/// that makes the first call to 'get'. When the owner calls 'get', it
/// gets 'owner_val' directly instead of returning a T from 'stack'.
/// See comments elsewhere for details, but this is intended to be an
/// optimization for the common case that makes getting a T faster.
///
/// It is initialized to a value of zero (an impossible thread ID) as a
/// sentinel to indicate that it is unowned.
owner: AtomicUsize,
/// A value to return when the caller is in the same thread that created
/// the Pool.
owner_val: T,
}
// SAFETY: Since we want to use a Pool from multiple threads simultaneously
// behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T>
// would be Sync. However, since we use a Pool to store mutable scratch space,
// we wind up using a T that has interior mutability and is thus itself not
// Sync. So what we *really* want is for our Pool<T> to by Sync even when T is
// not Sync (but is at least Send).
//
// The only non-sync aspect of a Pool is its 'owner_val' field, which is used
// to implement faster access to a pool value in the common case of a pool
// being accessed in the same thread in which it was created. The 'stack' field
// is also shared, but a Mutex<T> where T: Send is already Sync. So we only
// need to worry about 'owner_val'.
//
// The key is to guarantee that 'owner_val' can only ever be accessed from one
// thread. In our implementation below, we guarantee this by only returning the
// 'owner_val' when the ID of the current thread matches the ID of the thread
// that created the Pool. Since this can only ever be one thread, it follows
// that only one thread can access 'owner_val' at any point in time. Thus, it
// is safe to declare that Pool<T> is Sync when T is Send.
//
// NOTE: It would also be possible to make the owning thread be the *first*
// thread that tries to get a value out of a Pool. However, the current
// implementation is a little simpler and it's not clear if making the first
// thread (rather than the creating thread) is meaningfully better.
//
// If there is a way to achieve our performance goals using safe code, then
// I would very much welcome a patch. As it stands, the implementation below
// tries to balance safety with performance. The case where a Regex is used
// from multiple threads simultaneously will suffer a bit since getting a cache
// will require unlocking a mutex.
unsafe impl<T: Send> Sync for Pool<T> {}
impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
f.debug_struct("Pool")
.field("stack", &self.stack)
.field("owner", &self.owner)
.field("owner_val", &self.owner_val)
.finish()
}
}
/// A guard that is returned when a caller requests a value from the pool.
///
/// The purpose of the guard is to use RAII to automatically put the value back
/// in the pool once it's dropped.
#[derive(Debug)]
pub struct PoolGuard<'a, T: Send> {
/// The pool that this guard is attached to.
pool: &'a Pool<T>,
/// This is None when the guard represents the special "owned" value. In
/// which case, the value is retrieved from 'pool.owner_val'.
value: Option<Box<T>>,
}
impl<T: Send> Pool<T> {
/// Create a new pool. The given closure is used to create values in the
/// pool when necessary.
pub fn new(create: CreateFn<T>) -> Pool<T> {
let owner = AtomicUsize::new(0);
let owner_val = create();
Pool { stack: Mutex::new(vec![]), create, owner, owner_val }
}
/// Get a value from the pool. The caller is guaranteed to have exclusive
/// access to the given value.
///
/// Note that there is no guarantee provided about which value in the
/// pool is returned. That is, calling get, dropping the guard (causing
/// the value to go back into the pool) and then calling get again is NOT
/// guaranteed to return the same value received in the first get call.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn get(&self) -> PoolGuard<'_, T> {
// Our fast path checks if the caller is the thread that "owns" this
// pool. Or stated differently, whether it is the first thread that
// tried to extract a value from the pool. If it is, then we can return
// a T to the caller without going through a mutex.
//
// SAFETY: We must guarantee that only one thread gets access to this
// value. Since a thread is uniquely identified by the THREAD_ID thread
// local, it follows that is the caller's thread ID is equal to the
// owner, then only one thread may receive this value.
let caller = THREAD_ID.with(|id| *id);
let owner = self.owner.load(Ordering::Relaxed);
if caller == owner {
return self.guard_owned();
}
self.get_slow(caller, owner)
}
/// This is the "slow" version that goes through a mutex to pop an
/// allocated value off a stack to return to the caller. (Or, if the stack
/// is empty, a new value is created.)
///
/// If the pool has no owner, then this will set the owner.
#[cold]
fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> {
use std::sync::atomic::Ordering::Relaxed;
if owner == 0 {
// The sentinel 0 value means this pool is not yet owned. We
// try to atomically set the owner. If we do, then this thread
// becomes the owner and we can return a guard that represents
// the special T for the owner.
let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed);
if res.is_ok() {
return self.guard_owned();
}
}
let mut stack = self.stack.lock().unwrap();
let value = match stack.pop() {
None => Box::new((self.create)()),
Some(value) => value,
};
self.guard_stack(value)
}
/// Puts a value back into the pool. Callers don't need to call this. Once
/// the guard that's returned by 'get' is dropped, it is put back into the
/// pool automatically.
fn put(&self, value: Box<T>) {
let mut stack = self.stack.lock().unwrap();
stack.push(value);
}
/// Create a guard that represents the special owned T.
fn guard_owned(&self) -> PoolGuard<'_, T> {
PoolGuard { pool: self, value: None }
}
/// Create a guard that contains a value from the pool's stack.
fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T> | {
PoolGuard { pool: self, value: Some(value) }
} | identifier_body |
|
pool.rs | /regex/issues/362
// for example. (Why do I want it to be simple? Well, I suppose what I mean is,
// "use as much safe code as possible to minimize risk and be as sure as I can
// be that it is correct.")
//
// My guess is that the thread_local design is probably not appropriate for
// regex since its memory usage scales to the number of active threads that
// have used a regex, where as the pool below scales to the number of threads
// that simultaneously use a regex. While neither case permits contraction,
// since we own the pool data structure below, we can add contraction if a
// clear use case pops up in the wild. More pressingly though, it seems that
// there are at least some use case patterns where one might have many threads
// sitting around that might have used a regex at one point. While thread_local
// does try to reuse space previously used by a thread that has since stopped,
// its maximal memory usage still scales with the total number of active
// threads. In contrast, the pool below scales with the total number of threads
// *simultaneously* using the pool. The hope is that this uses less memory
// overall. And if it doesn't, we can hopefully tune it somehow.
//
// It seems that these sort of conditions happen frequently
// in FFI inside of other more "managed" languages. This was
// mentioned in the issue linked above, and also mentioned here:
// https://github.com/BurntSushi/rure-go/issues/3. And in particular, users
// confirm that disabling the use of thread_local resolves the leak.
//
// There were other weaker reasons for moving off of thread_local as well.
// Namely, at the time, I was looking to reduce dependencies. And for something
// like regex, maintenance can be simpler when we own the full dependency tree.
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
/// An atomic counter used to allocate thread IDs.
static COUNTER: AtomicUsize = AtomicUsize::new(1);
thread_local!(
/// A thread local used to assign an ID to a thread.
static THREAD_ID: usize = {
let next = COUNTER.fetch_add(1, Ordering::Relaxed);
// SAFETY: We cannot permit the reuse of thread IDs since reusing a
// thread ID might result in more than one thread "owning" a pool,
// and thus, permit accessing a mutable value from multiple threads
// simultaneously without synchronization. The intent of this panic is
// to be a sanity check. It is not expected that the thread ID space
// will actually be exhausted in practice.
//
// This checks that the counter never wraps around, since atomic
// addition wraps around on overflow.
if next == 0 {
panic!("regex: thread ID allocation space exhausted");
}
next
};
);
/// The type of the function used to create values in a pool when the pool is
/// empty and the caller requests one.
type CreateFn<T> =
Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>;
/// A simple thread safe pool for reusing values.
///
/// Getting a value out comes with a guard. When that guard is dropped, the
/// value is automatically put back in the pool.
///
/// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means
/// that T can use interior mutability. This is possible because a pool is
/// guaranteed to provide a value to exactly one thread at any time.
///
/// Currently, a pool never contracts in size. Its size is proportional to the
/// number of simultaneous uses.
pub struct | <T> {
/// A stack of T values to hand out. These are used when a Pool is
/// accessed by a thread that didn't create it.
stack: Mutex<Vec<Box<T>>>,
/// A function to create more T values when stack is empty and a caller
/// has requested a T.
create: CreateFn<T>,
/// The ID of the thread that owns this pool. The owner is the thread
/// that makes the first call to 'get'. When the owner calls 'get', it
/// gets 'owner_val' directly instead of returning a T from 'stack'.
/// See comments elsewhere for details, but this is intended to be an
/// optimization for the common case that makes getting a T faster.
///
/// It is initialized to a value of zero (an impossible thread ID) as a
/// sentinel to indicate that it is unowned.
owner: AtomicUsize,
/// A value to return when the caller is in the same thread that created
/// the Pool.
owner_val: T,
}
// SAFETY: Since we want to use a Pool from multiple threads simultaneously
// behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T>
// would be Sync. However, since we use a Pool to store mutable scratch space,
// we wind up using a T that has interior mutability and is thus itself not
// Sync. So what we *really* want is for our Pool<T> to by Sync even when T is
// not Sync (but is at least Send).
//
// The only non-sync aspect of a Pool is its 'owner_val' field, which is used
// to implement faster access to a pool value in the common case of a pool
// being accessed in the same thread in which it was created. The 'stack' field
// is also shared, but a Mutex<T> where T: Send is already Sync. So we only
// need to worry about 'owner_val'.
//
// The key is to guarantee that 'owner_val' can only ever be accessed from one
// thread. In our implementation below, we guarantee this by only returning the
// 'owner_val' when the ID of the current thread matches the ID of the thread
// that created the Pool. Since this can only ever be one thread, it follows
// that only one thread can access 'owner_val' at any point in time. Thus, it
// is safe to declare that Pool<T> is Sync when T is Send.
//
// NOTE: It would also be possible to make the owning thread be the *first*
// thread that tries to get a value out of a Pool. However, the current
// implementation is a little simpler and it's not clear if making the first
// thread (rather than the creating thread) is meaningfully better.
//
// If there is a way to achieve our performance goals using safe code, then
// I would very much welcome a patch. As it stands, the implementation below
// tries to balance safety with performance. The case where a Regex is used
// from multiple threads simultaneously will suffer a bit since getting a cache
// will require unlocking a mutex.
unsafe impl<T: Send> Sync for Pool<T> {}
impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
f.debug_struct("Pool")
.field("stack", &self.stack)
.field("owner", &self.owner)
.field("owner_val", &self.owner_val)
.finish()
}
}
/// A guard that is returned when a caller requests a value from the pool.
///
/// The purpose of the guard is to use RAII to automatically put the value back
/// in the pool once it's dropped.
#[derive(Debug)]
pub struct PoolGuard<'a, T: Send> {
/// The pool that this guard is attached to.
pool: &'a Pool<T>,
/// This is None when the guard represents the special "owned" value. In
/// which case, the value is retrieved from 'pool.owner_val'.
value: Option<Box<T>>,
}
impl<T: Send> Pool<T> {
/// Create a new pool. The given closure is used to create values in the
/// pool when necessary.
pub fn new(create: CreateFn<T>) -> Pool<T> {
let owner = AtomicUsize::new(0);
let owner_val = create();
Pool { stack: Mutex::new(vec![]), create, owner, owner_val }
}
/// Get a value from the pool. The caller is guaranteed to have exclusive
/// access to the given value.
///
/// Note that there is no guarantee provided about which value in the
/// pool is returned. That is, calling get, dropping the guard (causing
/// the value to go back into the pool) and then calling get again is NOT
/// guaranteed to return the same value received in the first get call.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn get(&self) -> PoolGuard<'_, T> {
// Our fast path checks if the caller is the thread that "owns" this
// pool. Or stated differently, whether it is the first thread that
// tried to extract a value from the pool. If it is, then we can return
// a T to the caller without going through a mutex.
//
// SAFETY: We must guarantee that only one thread gets access to this
// value. Since a thread is uniquely identified by the THREAD_ID thread
// local, it follows that is the caller's thread ID is equal to | Pool | identifier_name |
pool.rs | -lang/regex/issues/362
// for example. (Why do I want it to be simple? Well, I suppose what I mean is,
// "use as much safe code as possible to minimize risk and be as sure as I can
// be that it is correct.")
//
// My guess is that the thread_local design is probably not appropriate for
// regex since its memory usage scales to the number of active threads that
// have used a regex, where as the pool below scales to the number of threads
// that simultaneously use a regex. While neither case permits contraction,
// since we own the pool data structure below, we can add contraction if a
// clear use case pops up in the wild. More pressingly though, it seems that
// there are at least some use case patterns where one might have many threads
// sitting around that might have used a regex at one point. While thread_local
// does try to reuse space previously used by a thread that has since stopped,
// its maximal memory usage still scales with the total number of active
// threads. In contrast, the pool below scales with the total number of threads
// *simultaneously* using the pool. The hope is that this uses less memory
// overall. And if it doesn't, we can hopefully tune it somehow.
//
// It seems that these sort of conditions happen frequently
// in FFI inside of other more "managed" languages. This was
// mentioned in the issue linked above, and also mentioned here:
// https://github.com/BurntSushi/rure-go/issues/3. And in particular, users
// confirm that disabling the use of thread_local resolves the leak.
//
// There were other weaker reasons for moving off of thread_local as well.
// Namely, at the time, I was looking to reduce dependencies. And for something
// like regex, maintenance can be simpler when we own the full dependency tree.
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
/// An atomic counter used to allocate thread IDs.
static COUNTER: AtomicUsize = AtomicUsize::new(1);
thread_local!(
/// A thread local used to assign an ID to a thread.
static THREAD_ID: usize = {
let next = COUNTER.fetch_add(1, Ordering::Relaxed);
// SAFETY: We cannot permit the reuse of thread IDs since reusing a
// thread ID might result in more than one thread "owning" a pool,
// and thus, permit accessing a mutable value from multiple threads
// simultaneously without synchronization. The intent of this panic is
// to be a sanity check. It is not expected that the thread ID space
// will actually be exhausted in practice.
//
// This checks that the counter never wraps around, since atomic
// addition wraps around on overflow.
if next == 0 {
panic!("regex: thread ID allocation space exhausted");
}
next
};
);
/// The type of the function used to create values in a pool when the pool is
/// empty and the caller requests one.
type CreateFn<T> =
Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>;
/// A simple thread safe pool for reusing values.
///
/// Getting a value out comes with a guard. When that guard is dropped, the
/// value is automatically put back in the pool.
///
/// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means
/// that T can use interior mutability. This is possible because a pool is
/// guaranteed to provide a value to exactly one thread at any time.
///
/// Currently, a pool never contracts in size. Its size is proportional to the
/// number of simultaneous uses.
pub struct Pool<T> {
/// A stack of T values to hand out. These are used when a Pool is
/// accessed by a thread that didn't create it.
stack: Mutex<Vec<Box<T>>>,
/// A function to create more T values when stack is empty and a caller
/// has requested a T.
create: CreateFn<T>, | /// gets 'owner_val' directly instead of returning a T from 'stack'.
/// See comments elsewhere for details, but this is intended to be an
/// optimization for the common case that makes getting a T faster.
///
/// It is initialized to a value of zero (an impossible thread ID) as a
/// sentinel to indicate that it is unowned.
owner: AtomicUsize,
/// A value to return when the caller is in the same thread that created
/// the Pool.
owner_val: T,
}
// SAFETY: Since we want to use a Pool from multiple threads simultaneously
// behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T>
// would be Sync. However, since we use a Pool to store mutable scratch space,
// we wind up using a T that has interior mutability and is thus itself not
// Sync. So what we *really* want is for our Pool<T> to by Sync even when T is
// not Sync (but is at least Send).
//
// The only non-sync aspect of a Pool is its 'owner_val' field, which is used
// to implement faster access to a pool value in the common case of a pool
// being accessed in the same thread in which it was created. The 'stack' field
// is also shared, but a Mutex<T> where T: Send is already Sync. So we only
// need to worry about 'owner_val'.
//
// The key is to guarantee that 'owner_val' can only ever be accessed from one
// thread. In our implementation below, we guarantee this by only returning the
// 'owner_val' when the ID of the current thread matches the ID of the thread
// that created the Pool. Since this can only ever be one thread, it follows
// that only one thread can access 'owner_val' at any point in time. Thus, it
// is safe to declare that Pool<T> is Sync when T is Send.
//
// NOTE: It would also be possible to make the owning thread be the *first*
// thread that tries to get a value out of a Pool. However, the current
// implementation is a little simpler and it's not clear if making the first
// thread (rather than the creating thread) is meaningfully better.
//
// If there is a way to achieve our performance goals using safe code, then
// I would very much welcome a patch. As it stands, the implementation below
// tries to balance safety with performance. The case where a Regex is used
// from multiple threads simultaneously will suffer a bit since getting a cache
// will require unlocking a mutex.
unsafe impl<T: Send> Sync for Pool<T> {}
impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
f.debug_struct("Pool")
.field("stack", &self.stack)
.field("owner", &self.owner)
.field("owner_val", &self.owner_val)
.finish()
}
}
/// A guard that is returned when a caller requests a value from the pool.
///
/// The purpose of the guard is to use RAII to automatically put the value back
/// in the pool once it's dropped.
#[derive(Debug)]
pub struct PoolGuard<'a, T: Send> {
/// The pool that this guard is attached to.
pool: &'a Pool<T>,
/// This is None when the guard represents the special "owned" value. In
/// which case, the value is retrieved from 'pool.owner_val'.
value: Option<Box<T>>,
}
impl<T: Send> Pool<T> {
/// Create a new pool. The given closure is used to create values in the
/// pool when necessary.
pub fn new(create: CreateFn<T>) -> Pool<T> {
let owner = AtomicUsize::new(0);
let owner_val = create();
Pool { stack: Mutex::new(vec![]), create, owner, owner_val }
}
/// Get a value from the pool. The caller is guaranteed to have exclusive
/// access to the given value.
///
/// Note that there is no guarantee provided about which value in the
/// pool is returned. That is, calling get, dropping the guard (causing
/// the value to go back into the pool) and then calling get again is NOT
/// guaranteed to return the same value received in the first get call.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn get(&self) -> PoolGuard<'_, T> {
// Our fast path checks if the caller is the thread that "owns" this
// pool. Or stated differently, whether it is the first thread that
// tried to extract a value from the pool. If it is, then we can return
// a T to the caller without going through a mutex.
//
// SAFETY: We must guarantee that only one thread gets access to this
// value. Since a thread is uniquely identified by the THREAD_ID thread
// local, it follows that is the caller's thread ID is equal to the | /// The ID of the thread that owns this pool. The owner is the thread
/// that makes the first call to 'get'. When the owner calls 'get', it | random_line_split |
common.py | rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}"
DEVICE = None
def set_device(device):
global DEVICE
DEVICE = device
if torch.cuda.is_available():
print(torch.cuda.get_device_properties(0))
print('Using', DEVICE)
def get_device():
global DEVICE
return DEVICE
def set_seed(seed):
# note: there are another nuances for gpu and multi-gpu
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def clear_or_create_directory(dir_name):
""" ignoring all possible errors """
shutil.rmtree(dir_name, ignore_errors=True)
cntr = 0
while True:
try:
os.makedirs(dir_name, exist_ok=True)
return
except OSError:
if cntr < 10:
# some windows bug?
cntr += 1
from time import sleep
sleep(0.1 * cntr)
else:
raise
class SummarizationDataset(Dataset):
def __init__(self, texts, titles):
self.texts = texts
self.titles = titles
def __getitem__(self, item):
return self.texts[item], self.titles[item]
def __len__(self):
return len(self.texts)
class SimpleVocabulary:
def __init__(self, all_words, max_vocab_size, pretrained_words=None):
helper_symbols = ["<PAD>", "<UNK>", "<EOS>"]
self.PAD_IDX = 0
self.UNK_IDX = 1
self.EOS_IDX = 2
counts = Counter(all_words)
print(f'Number of unique input words: {len(counts)}')
words = [w for w, c in counts.most_common(max_vocab_size)]
num_words_added = len(helper_symbols)
if pretrained_words is not None:
pretrained_words = set(pretrained_words).difference(set(words))
num_words_added += len(pretrained_words)
assert max_vocab_size >= num_words_added
words = words[:-num_words_added]
print(
f'SimpleVocabulary:\n'
f'{len(words)} words from input data,\n'
f'{len(helper_symbols)} helper words,\n'
f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,'
)
words = helper_symbols + words + (pretrained_words if pretrained_words is not None else [])
print(f'{len(words)} words total')
self.itos = words
self.stoi = {s: i for i, s in enumerate(self.itos)}
def encode(self, text):
return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX]
def __iter__(self):
return iter(self.itos)
def __len__(self):
return len(self.itos)
def encode_text(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = [tokenizer.encode(
txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts]
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
def encode_text_end(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = []
for txt in texts:
enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0)
enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]])
enc_texts.append(enc)
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
class CollateFnStart:
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
class CollateFnEnd:
""" takes end of text """
def __init__(self, tokenizer, max_len_src, max_len_tgt):
|
def __call__(self, batch):
return (
encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
def decode_text(tokenizer, vocab_ids):
return tokenizer.decode(
vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True)
def nltk_stem_sentence_rus(sentence):
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
stemmer = RussianStemmer()
russian_stopwords = set(stopwords.words("russian"))
tokens = word_tokenize(sentence, language='russian')
tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords]
stems = [stemmer.stem(t) for t in tokens]
return ' '.join(stems)
def lemmatize_sentence_rus(sentence):
from nltk.corpus import stopwords
from pymystem3 import Mystem
my_stem = Mystem()
russian_stopwords = set(stopwords.words("russian"))
lemmas = my_stem.lemmatize(sentence)
lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords]
return ' '.join(lemmas)
def lemmatize_sentences_rus(sentences):
"""much faster than call lemmatize_sentence_rus in cycle"""
split = 'fks2hwras1ma39hka766gbk'
chunk_size = 10000
def handle_chunk(sentences_chunk):
all_sents = (' ' + split + ' ').join(sentences_chunk)
all_lemmas = lemmatize_sentence_rus(all_sents).split()
chunk_res = [[]]
for lemma in all_lemmas:
if lemma == split:
chunk_res.append([])
else:
chunk_res[-1].append(lemma)
return chunk_res
res = []
i = 0
while i < len(sentences):
if len(sentences) > chunk_size:
print(f'Lemmatization: Done for {i} from {len(sentences)} sentences')
i_step = min(chunk_size, len(sentences) - i)
res.extend(handle_chunk(sentences[i:i + i_step]))
i += i_step
assert len(res) == len(sentences)
res = [' '.join(arr) for arr in res]
return res
def lemmatize_texts_rus(texts):
"""split each text to sentences and lemmatize them"""
sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts]
texts_lengths = [len(t) for t in sentenized]
sentences = [s for t in sentenized for s in t]
sentences_lemm = lemmatize_sentences_rus(sentences)
texts_lemm = []
pos = 0
for text_length in texts_lengths:
texts_lemm.append(sentences_lemm[pos:pos + text_length])
pos += text_length
assert pos == len(sentences)
assert len(sentenized) == len(texts_lemm)
assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm))
return texts_lemm, sentenized
def lemmatize_text_rus(text):
"""split text to sentences and lemmatize them"""
text_lemm, text_sent = lemmatize_texts_rus([text])
text_lemm, text_sent = text_lemm[0], text_sent[0]
return text_lemm, text_sent
def get_num_lines_in_file(file_path, *args, **kwargs):
with open(file_path, *args, **kwargs) as f:
return sum(1 for _ in f)
class ConsoleColors:
Map = {
'PINK': '\033[95m',
'BLUE': '\033[34m',
'YELLOW': '\033[93m',
'RED': '\033[31m',
'GREEN': '\033[92m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'ITALIC': '\ | self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt | identifier_body |
common.py | rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}"
DEVICE = None
def set_device(device):
global DEVICE
DEVICE = device
if torch.cuda.is_available():
print(torch.cuda.get_device_properties(0))
print('Using', DEVICE)
def get_device():
global DEVICE
return DEVICE
def set_seed(seed):
# note: there are another nuances for gpu and multi-gpu
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def clear_or_create_directory(dir_name):
""" ignoring all possible errors """
shutil.rmtree(dir_name, ignore_errors=True)
cntr = 0
while True:
try:
os.makedirs(dir_name, exist_ok=True)
return
except OSError:
if cntr < 10:
# some windows bug?
cntr += 1
from time import sleep
sleep(0.1 * cntr)
else:
raise
class SummarizationDataset(Dataset):
def __init__(self, texts, titles):
self.texts = texts
self.titles = titles
def __getitem__(self, item):
return self.texts[item], self.titles[item]
def __len__(self):
return len(self.texts)
class SimpleVocabulary:
def __init__(self, all_words, max_vocab_size, pretrained_words=None):
helper_symbols = ["<PAD>", "<UNK>", "<EOS>"]
self.PAD_IDX = 0
self.UNK_IDX = 1
self.EOS_IDX = 2
counts = Counter(all_words)
print(f'Number of unique input words: {len(counts)}')
words = [w for w, c in counts.most_common(max_vocab_size)]
num_words_added = len(helper_symbols)
if pretrained_words is not None:
pretrained_words = set(pretrained_words).difference(set(words))
num_words_added += len(pretrained_words)
assert max_vocab_size >= num_words_added
words = words[:-num_words_added]
print(
f'SimpleVocabulary:\n'
f'{len(words)} words from input data,\n'
f'{len(helper_symbols)} helper words,\n'
f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,'
)
words = helper_symbols + words + (pretrained_words if pretrained_words is not None else [])
print(f'{len(words)} words total')
self.itos = words
self.stoi = {s: i for i, s in enumerate(self.itos)}
def encode(self, text):
return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX]
def __iter__(self):
return iter(self.itos)
def __len__(self):
return len(self.itos)
def | (tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = [tokenizer.encode(
txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts]
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
def encode_text_end(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = []
for txt in texts:
enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0)
enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]])
enc_texts.append(enc)
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
class CollateFnStart:
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
class CollateFnEnd:
""" takes end of text """
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
def decode_text(tokenizer, vocab_ids):
return tokenizer.decode(
vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True)
def nltk_stem_sentence_rus(sentence):
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
stemmer = RussianStemmer()
russian_stopwords = set(stopwords.words("russian"))
tokens = word_tokenize(sentence, language='russian')
tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords]
stems = [stemmer.stem(t) for t in tokens]
return ' '.join(stems)
def lemmatize_sentence_rus(sentence):
from nltk.corpus import stopwords
from pymystem3 import Mystem
my_stem = Mystem()
russian_stopwords = set(stopwords.words("russian"))
lemmas = my_stem.lemmatize(sentence)
lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords]
return ' '.join(lemmas)
def lemmatize_sentences_rus(sentences):
"""much faster than call lemmatize_sentence_rus in cycle"""
split = 'fks2hwras1ma39hka766gbk'
chunk_size = 10000
def handle_chunk(sentences_chunk):
all_sents = (' ' + split + ' ').join(sentences_chunk)
all_lemmas = lemmatize_sentence_rus(all_sents).split()
chunk_res = [[]]
for lemma in all_lemmas:
if lemma == split:
chunk_res.append([])
else:
chunk_res[-1].append(lemma)
return chunk_res
res = []
i = 0
while i < len(sentences):
if len(sentences) > chunk_size:
print(f'Lemmatization: Done for {i} from {len(sentences)} sentences')
i_step = min(chunk_size, len(sentences) - i)
res.extend(handle_chunk(sentences[i:i + i_step]))
i += i_step
assert len(res) == len(sentences)
res = [' '.join(arr) for arr in res]
return res
def lemmatize_texts_rus(texts):
"""split each text to sentences and lemmatize them"""
sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts]
texts_lengths = [len(t) for t in sentenized]
sentences = [s for t in sentenized for s in t]
sentences_lemm = lemmatize_sentences_rus(sentences)
texts_lemm = []
pos = 0
for text_length in texts_lengths:
texts_lemm.append(sentences_lemm[pos:pos + text_length])
pos += text_length
assert pos == len(sentences)
assert len(sentenized) == len(texts_lemm)
assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm))
return texts_lemm, sentenized
def lemmatize_text_rus(text):
"""split text to sentences and lemmatize them"""
text_lemm, text_sent = lemmatize_texts_rus([text])
text_lemm, text_sent = text_lemm[0], text_sent[0]
return text_lemm, text_sent
def get_num_lines_in_file(file_path, *args, **kwargs):
with open(file_path, *args, **kwargs) as f:
return sum(1 for _ in f)
class ConsoleColors:
Map = {
'PINK': '\033[95m',
'BLUE': '\033[34m',
'YELLOW': '\033[93m',
'RED': '\033[31m',
'GREEN': '\033[92m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'ITALIC': | encode_text | identifier_name |
common.py |
for k1, d in res.items():
for k2 in d:
res[k1][k2] /= len(rouges)
return res
def str_rouge(rg):
return f"R1 {rg['rouge-1']['f']:.02f}, R2 {rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}"
DEVICE = None
def set_device(device):
global DEVICE
DEVICE = device
if torch.cuda.is_available():
print(torch.cuda.get_device_properties(0))
print('Using', DEVICE)
def get_device():
global DEVICE
return DEVICE
def set_seed(seed):
# note: there are another nuances for gpu and multi-gpu
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def clear_or_create_directory(dir_name):
""" ignoring all possible errors """
shutil.rmtree(dir_name, ignore_errors=True)
cntr = 0
while True:
try:
os.makedirs(dir_name, exist_ok=True)
return
except OSError:
if cntr < 10:
# some windows bug?
cntr += 1
from time import sleep
sleep(0.1 * cntr)
else:
raise
class SummarizationDataset(Dataset):
def __init__(self, texts, titles):
self.texts = texts
self.titles = titles
def __getitem__(self, item):
return self.texts[item], self.titles[item]
def __len__(self):
return len(self.texts)
class SimpleVocabulary:
def __init__(self, all_words, max_vocab_size, pretrained_words=None):
helper_symbols = ["<PAD>", "<UNK>", "<EOS>"]
self.PAD_IDX = 0
self.UNK_IDX = 1
self.EOS_IDX = 2
counts = Counter(all_words)
print(f'Number of unique input words: {len(counts)}')
words = [w for w, c in counts.most_common(max_vocab_size)]
num_words_added = len(helper_symbols)
if pretrained_words is not None:
pretrained_words = set(pretrained_words).difference(set(words))
num_words_added += len(pretrained_words)
assert max_vocab_size >= num_words_added
words = words[:-num_words_added]
print(
f'SimpleVocabulary:\n'
f'{len(words)} words from input data,\n'
f'{len(helper_symbols)} helper words,\n'
f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,'
)
words = helper_symbols + words + (pretrained_words if pretrained_words is not None else [])
print(f'{len(words)} words total')
self.itos = words
self.stoi = {s: i for i, s in enumerate(self.itos)}
def encode(self, text):
return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX]
def __iter__(self):
return iter(self.itos)
def __len__(self):
return len(self.itos)
def encode_text(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = [tokenizer.encode(
txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts]
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
def encode_text_end(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = []
for txt in texts:
enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0)
enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]])
enc_texts.append(enc)
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
class CollateFnStart:
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
class CollateFnEnd:
""" takes end of text """
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
def decode_text(tokenizer, vocab_ids):
return tokenizer.decode(
vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True)
def nltk_stem_sentence_rus(sentence):
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
stemmer = RussianStemmer()
russian_stopwords = set(stopwords.words("russian"))
tokens = word_tokenize(sentence, language='russian')
tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords]
stems = [stemmer.stem(t) for t in tokens]
return ' '.join(stems)
def lemmatize_sentence_rus(sentence):
from nltk.corpus import stopwords
from pymystem3 import Mystem
my_stem = Mystem()
russian_stopwords = set(stopwords.words("russian"))
lemmas = my_stem.lemmatize(sentence)
lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords]
return ' '.join(lemmas)
def lemmatize_sentences_rus(sentences):
"""much faster than call lemmatize_sentence_rus in cycle"""
split = 'fks2hwras1ma39hka766gbk'
chunk_size = 10000
def handle_chunk(sentences_chunk):
all_sents = (' ' + split + ' ').join(sentences_chunk)
all_lemmas = lemmatize_sentence_rus(all_sents).split()
chunk_res = [[]]
for lemma in all_lemmas:
if lemma == split:
chunk_res.append([])
else:
chunk_res[-1].append(lemma)
return chunk_res
res = []
i = 0
while i < len(sentences):
if len(sentences) > chunk_size:
print(f'Lemmatization: Done for {i} from {len(sentences)} sentences')
i_step = min(chunk_size, len(sentences) - i)
res.extend(handle_chunk(sentences[i:i + i_step]))
i += i_step
assert len(res) == len(sentences)
res = [' '.join(arr) for arr in res]
return res
def lemmatize_texts_rus(texts):
"""split each text to sentences and lemmatize them"""
sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts]
texts_lengths = [len(t) for t in sentenized]
sentences = [s for t in sentenized for s in t]
sentences_lemm = lemmatize_sentences_rus(sentences)
texts_lemm = []
pos = 0
for text_length in texts_lengths:
texts_lemm.append(sentences_lemm[pos:pos + text_length])
pos += text_length
assert pos == len(sentences)
assert len(sentenized) == len(texts_lemm)
assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm))
return texts_lemm, sentenized
def lemmatize_text_rus(text):
"""split text to sentences and lemmatize them"""
text_lemm, text_sent = lemmatize_texts_rus([text])
text_lemm, text_sent = text_lemm[0], text_sent[0]
return text_lemm, text_sent
def get_num_lines_in_file(file_path, *args, **kwargs):
with open(file_path, *args, **kwargs) as f:
return sum(1 for _ in f)
class ConsoleColors:
Map = {
'PINK': '\03 | for k2 in d:
res[k1][k2] += item[k1][k2] | conditional_block |
|
common.py | def str_rouge(rg):
return f"R1 {rg['rouge-1']['f']:.02f}, R2 {rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}"
DEVICE = None
def set_device(device):
global DEVICE
DEVICE = device
if torch.cuda.is_available():
print(torch.cuda.get_device_properties(0))
print('Using', DEVICE)
def get_device():
global DEVICE
return DEVICE
def set_seed(seed):
# note: there are another nuances for gpu and multi-gpu
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def clear_or_create_directory(dir_name):
""" ignoring all possible errors """
shutil.rmtree(dir_name, ignore_errors=True)
cntr = 0
while True:
try:
os.makedirs(dir_name, exist_ok=True)
return
except OSError:
if cntr < 10:
# some windows bug?
cntr += 1
from time import sleep
sleep(0.1 * cntr)
else:
raise
class SummarizationDataset(Dataset):
def __init__(self, texts, titles):
self.texts = texts
self.titles = titles
def __getitem__(self, item):
return self.texts[item], self.titles[item]
def __len__(self):
return len(self.texts)
class SimpleVocabulary:
def __init__(self, all_words, max_vocab_size, pretrained_words=None):
helper_symbols = ["<PAD>", "<UNK>", "<EOS>"]
self.PAD_IDX = 0
self.UNK_IDX = 1
self.EOS_IDX = 2
counts = Counter(all_words)
print(f'Number of unique input words: {len(counts)}')
words = [w for w, c in counts.most_common(max_vocab_size)]
num_words_added = len(helper_symbols)
if pretrained_words is not None:
pretrained_words = set(pretrained_words).difference(set(words))
num_words_added += len(pretrained_words)
assert max_vocab_size >= num_words_added
words = words[:-num_words_added]
print(
f'SimpleVocabulary:\n'
f'{len(words)} words from input data,\n'
f'{len(helper_symbols)} helper words,\n'
f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,'
)
words = helper_symbols + words + (pretrained_words if pretrained_words is not None else [])
print(f'{len(words)} words total')
self.itos = words
self.stoi = {s: i for i, s in enumerate(self.itos)}
def encode(self, text):
return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX]
def __iter__(self):
return iter(self.itos)
def __len__(self):
return len(self.itos)
def encode_text(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = [tokenizer.encode(
txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts]
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
def encode_text_end(tokenizer, texts, max_len=None):
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list)
if max_len is None:
max_len = 999999999
enc_texts = []
for txt in texts:
enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0)
enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]])
enc_texts.append(enc)
texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id)
return texts_batch
class CollateFnStart:
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
class CollateFnEnd:
""" takes end of text """
def __init__(self, tokenizer, max_len_src, max_len_tgt):
self.tokenizer = tokenizer
self.max_len_src = max_len_src
self.max_len_tgt = max_len_tgt
def __call__(self, batch):
return (
encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src),
encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt)
)
def decode_text(tokenizer, vocab_ids):
return tokenizer.decode(
vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True)
def nltk_stem_sentence_rus(sentence):
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
stemmer = RussianStemmer()
russian_stopwords = set(stopwords.words("russian"))
tokens = word_tokenize(sentence, language='russian')
tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords]
stems = [stemmer.stem(t) for t in tokens]
return ' '.join(stems)
def lemmatize_sentence_rus(sentence):
from nltk.corpus import stopwords
from pymystem3 import Mystem
my_stem = Mystem()
russian_stopwords = set(stopwords.words("russian"))
lemmas = my_stem.lemmatize(sentence)
lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords]
return ' '.join(lemmas)
def lemmatize_sentences_rus(sentences):
"""much faster than call lemmatize_sentence_rus in cycle"""
split = 'fks2hwras1ma39hka766gbk'
chunk_size = 10000
def handle_chunk(sentences_chunk):
all_sents = (' ' + split + ' ').join(sentences_chunk)
all_lemmas = lemmatize_sentence_rus(all_sents).split()
chunk_res = [[]]
for lemma in all_lemmas:
if lemma == split:
chunk_res.append([])
else:
chunk_res[-1].append(lemma)
return chunk_res
res = []
i = 0
while i < len(sentences):
if len(sentences) > chunk_size:
print(f'Lemmatization: Done for {i} from {len(sentences)} sentences')
i_step = min(chunk_size, len(sentences) - i)
res.extend(handle_chunk(sentences[i:i + i_step]))
i += i_step
assert len(res) == len(sentences)
res = [' '.join(arr) for arr in res]
return res
def lemmatize_texts_rus(texts):
"""split each text to sentences and lemmatize them"""
sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts]
texts_lengths = [len(t) for t in sentenized]
sentences = [s for t in sentenized for s in t]
sentences_lemm = lemmatize_sentences_rus(sentences)
texts_lemm = []
pos = 0
for text_length in texts_lengths:
texts_lemm.append(sentences_lemm[pos:pos + text_length])
pos += text_length
assert pos == len(sentences)
assert len(sentenized) == len(texts_lemm)
assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm))
return texts_lemm, sentenized
def lemmatize_text_rus(text):
"""split text to sentences and lemmatize them"""
text_lemm, text_sent = lemmatize_texts_rus([text])
text_lemm, text_sent = text_lemm[0], text_sent[0]
return text_lemm, text_sent
def get_num_lines_in_file(file_path, *args, **kwargs):
with open(file_path, *args, **kwargs) as f:
return sum(1 for _ in f)
class ConsoleColors:
Map = {
'PINK': '\033[95m',
'BLUE': '\033[34m',
'YELLOW': '\033[93m',
'RED': '\033[31m',
'GREEN': '\033[92m',
| random_line_split |
||
manager.py | in your current directory).",
"create-tables":
"Deletes the currently existing user and content tables and recreates them from DDL.",
"import-ratings":
"Run an express job to import movie ratings",
"import-user-info":
"Run an express job to import user information",
"import-movie-info":
"Run an express job to import movie information",
"train-item-item-cf":
"Calculate item-item similarities",
"register-freshener":
"Register freshener for scoring function",
"help-actions":
"Print this help",
}
jars = (
# 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar',
#'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar',
'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar',
# Needed on classpath for registering the scoring function and executing it.
'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar'
)
# This is the path from movie_advisor_home
ddls = (
'layout/src/main/resources/users.ddl',
'layout/src/main/resources/movies.ddl',
)
express_jar = 'express/target/express-1.0-SNAPSHOT.jar'
# assert set(actions_help.keys()) == set(possible_actions)
def _help_actions(self):
""" Print detailed information about how the different actions work """
actions_str = ""
for (key, value) in self.actions_help.items():
|
print(actions_str)
sys.exit(0)
def _setup_parser(self):
""" Add actions for the command-line arguments parser """
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \
"\n\t".join(self.possible_actions))
# TODO: Detailed help information that prints out all of the available actions and their
# assumptions
parser.add_argument(
"action",
nargs='*',
help="Action to take")
parser.add_argument(
'--bento-home',
help='Location of bento box',
default='kiji-bento-ebi')
parser.add_argument(
'--bento-tgz',
help='Bento TAR file name',
default='kiji-bento-ebi-2.0.2-release.tar.gz')
parser.add_argument(
'--movie-advisor-home',
help='Location of checkout of WibiData MovieAdvisor github repo',
default='movie-advisor')
# Set up dates for training, testing, etc.
parser.add_argument(
'--train-start-date',
default='2013-11-01')
parser.add_argument(
'--train-end-date',
default='2013-11-15')
parser.add_argument(
'--test-start-date',
default='2013-11-16')
parser.add_argument(
'--test-end-date',
default='2013-11-30')
parser.add_argument(
"--backtest-results-file",
default="backtest.txt")
parser.add_argument(
"--kill-bento",
action="store_true",
default=False,
help="Automatically kill existing BentoBox processes.")
parser.add_argument(
"--show-classpath",
action="store_true",
default=False,
help="Echo $KIJI_CLASSPATH and exit")
return parser
def _setup_environment_vars(self, opts):
""" Set up useful variables (would be environment vars outside of the script) """
# Check that these directories actually exist
assert os.path.isdir(opts.movie_advisor_home)
#if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)
self.movie_advisor_home = opts.movie_advisor_home
self.bento_home = opts.bento_home
self.bento_tgz = opts.bento_tgz
self.kiji_uri = "kiji://.env/tutorial"
# "express job" takes a jar file as an argument
assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))
# Set the classpath for all of the commands that we'll run
jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]
for jar in jarsFullPaths: assert os.path.isfile(jar)
classpath = ":".join(jarsFullPaths)
os.environ['KIJI_CLASSPATH'] = classpath
if opts.show_classpath:
print("export KIJI_CLASSPATH=%s" % classpath)
sys.exit(0)
def _parse_options(self, args):
""" Parse the command-line options and configure the script appropriately """
parser = self._setup_parser()
opts = parser.parse_args(args)
self.actions = opts.action
for action in self.actions:
assert action in self.possible_actions, "Action %s is not a known action for the script" % action
self.b_kill_bento = opts.kill_bento
if 'help-actions' in self.actions: self._help_actions()
self._setup_environment_vars(opts)
self.backtest_results_file = opts.backtest_results_file
def _exit_if_bento_still_running(self):
jps_results = run('jps')
if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento:
assert False, "Please kill all bento-related jobs (run 'jps' to get a list)"
# Kill all of the bento processes
for line in jps_results.splitlines():
toks = line.split()
if len(toks) == 1: continue
assert len(toks) == 2, toks
(pid, job) = toks
if job == 'Jps': continue
cmd = "kill -9 " + pid
run(cmd)
def _do_action_bento_setup(self):
""" Install the BentoBox, install Kiji, etc. """
self._exit_if_bento_still_running()
cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format(
bento_dir=self.bento_home,
bento_tar=self.bento_tgz)
print(run(cmd))
for command_suffix in ["-env.sh", ""]:
kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix)
bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix)
if not os.path.isfile(kiji_env):
assert os.path.isfile(bento_env)
cmd = 'cp {bento_env} {kiji_env}'.format(
bento_env=bento_env,
kiji_env=kiji_env)
run(cmd)
cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format(
bento_dir=self.bento_home,
)
print(run(cmd))
assert os.path.isdir(self.bento_home)
def _run_express_job(self, class_name, options=""):
"""
Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying
dates, kiji table, etc.
"""
cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}"
cmd = cmd.format(
bento_home=self.bento_home,
jar=os.path.join(self.movie_advisor_home, self.express_jar),
myclass=class_name,
kiji_uri=self.kiji_uri,
) + " " + options
print(run(cmd))
def _run_kiji_job(self, cmd):
cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format(
bento_home=self.bento_home, cmd=cmd)
print(run(cmd))
def _scan_table(self, uri):
""" Scan this table and print out a couple of rows as a sanity check """
cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format(
kiji_uri=self.kiji_uri,
uri=uri)
self._run_kiji_job(cmd)
def _do_action_tables_create(self):
""" Run the schema shell to create the tables """
schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell")
assert os.path.isfile(schema_shell), schema_shell
# Delete the table first!
cmd = (
"kiji delete --target={kiji_uri} --interactive=false; " +
"kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri)
self._run_kiji_job(cmd)
for ddl in self.ddls:
ddl_full_path = os.path.join(self.movie_advisor_home, ddl)
assert os.path.isfile(ddl_full_path)
cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format(
schema_shell=schema_shell,
kiji_uri=self.kiji_uri,
ddl_full_path=ddl_full_path)
self._ | actions_str += "command: %s\n%s\n\n" % (key, value) | conditional_block |
manager.py |
class TimeInterval:
def __init__(self, start, end):
""" TODO: Check times formatting properly here """
self.start = start
self.end = end
class MovieAdvisorManager:
# Put these into a sensible order
possible_actions = [
'help-actions',
'install-bento',
'create-tables',
'import-ratings',
'import-user-info',
'import-movie-info',
'train-item-item-cf',
'register-freshener',
]
actions_help = {
"install-bento":
"Will set up a bento box for you. Assumes that you are in a directory with a tar.gz "
"file for the latest bento build. This command will rm -rf your current bento box "
"(which is also assumed to be in your current directory).",
"create-tables":
"Deletes the currently existing user and content tables and recreates them from DDL.",
"import-ratings":
"Run an express job to import movie ratings",
"import-user-info":
"Run an express job to import user information",
"import-movie-info":
"Run an express job to import movie information",
"train-item-item-cf":
"Calculate item-item similarities",
"register-freshener":
"Register freshener for scoring function",
"help-actions":
"Print this help",
}
jars = (
# 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar',
#'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar',
'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar',
# Needed on classpath for registering the scoring function and executing it.
'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar'
)
# This is the path from movie_advisor_home
ddls = (
'layout/src/main/resources/users.ddl',
'layout/src/main/resources/movies.ddl',
)
express_jar = 'express/target/express-1.0-SNAPSHOT.jar'
# assert set(actions_help.keys()) == set(possible_actions)
def _help_actions(self):
""" Print detailed information about how the different actions work """
actions_str = ""
for (key, value) in self.actions_help.items():
actions_str += "command: %s\n%s\n\n" % (key, value)
print(actions_str)
sys.exit(0)
def _setup_parser(self):
""" Add actions for the command-line arguments parser """
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \
"\n\t".join(self.possible_actions))
# TODO: Detailed help information that prints out all of the available actions and their
# assumptions
parser.add_argument(
"action",
nargs='*',
help="Action to take")
parser.add_argument(
'--bento-home',
help='Location of bento box',
default='kiji-bento-ebi')
parser.add_argument(
'--bento-tgz',
help='Bento TAR file name',
default='kiji-bento-ebi-2.0.2-release.tar.gz')
parser.add_argument(
'--movie-advisor-home',
help='Location of checkout of WibiData MovieAdvisor github repo',
default='movie-advisor')
# Set up dates for training, testing, etc.
parser.add_argument(
'--train-start-date',
default='2013-11-01')
parser.add_argument(
'--train-end-date',
default='2013-11-15')
parser.add_argument(
'--test-start-date',
default='2013-11-16')
parser.add_argument(
'--test-end-date',
default='2013-11-30')
parser.add_argument(
"--backtest-results-file",
default="backtest.txt")
parser.add_argument(
"--kill-bento",
action="store_true",
default=False,
help="Automatically kill existing BentoBox processes.")
parser.add_argument(
"--show-classpath",
action="store_true",
default=False,
help="Echo $KIJI_CLASSPATH and exit")
return parser
def _setup_environment_vars(self, opts):
""" Set up useful variables (would be environment vars outside of the script) """
# Check that these directories actually exist
assert os.path.isdir(opts.movie_advisor_home)
#if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)
self.movie_advisor_home = opts.movie_advisor_home
self.bento_home = opts.bento_home
self.bento_tgz = opts.bento_tgz
self.kiji_uri = "kiji://.env/tutorial"
# "express job" takes a jar file as an argument
assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))
# Set the classpath for all of the commands that we'll run
jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]
for jar in jarsFullPaths: assert os.path.isfile(jar)
classpath = ":".join(jarsFullPaths)
os.environ['KIJI_CLASSPATH'] = classpath
if opts.show_classpath:
print("export KIJI_CLASSPATH=%s" % classpath)
sys.exit(0)
def _parse_options(self, args):
""" Parse the command-line options and configure the script appropriately """
parser = self._setup_parser()
opts = parser.parse_args(args)
self.actions = opts.action
for action in self.actions:
assert action in self.possible_actions, "Action %s is not a known action for the script" % action
self.b_kill_bento = opts.kill_bento
if 'help-actions' in self.actions: self._help_actions()
self._setup_environment_vars(opts)
self.backtest_results_file = opts.backtest_results_file
def _exit_if_bento_still_running(self):
jps_results = run('jps')
if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento:
assert False, "Please kill all bento-related jobs (run 'jps' to get a list)"
# Kill all of the bento processes
for line in jps_results.splitlines():
toks = line.split()
if len(toks) == 1: continue
assert len(toks) == 2, toks
(pid, job) = toks
if job == 'Jps': continue
cmd = "kill -9 " + pid
run(cmd)
def _do_action_bento_setup(self):
""" Install the BentoBox, install Kiji, etc. """
self._exit_if_bento_still_running()
cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format(
bento_dir=self.bento_home,
bento_tar=self.bento_tgz)
print(run(cmd))
for command_suffix in ["-env.sh", ""]:
kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix)
bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix)
if not os.path.isfile(kiji_env):
assert os.path.isfile(bento_env)
cmd = 'cp {bento_env} {kiji_env}'.format(
bento_env=bento_env,
kiji_env=kiji_env)
run(cmd)
cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format(
bento_dir=self.bento_home,
)
print(run(cmd))
assert os.path.isdir(self.bento_home)
def _run_express_job(self, class_name, options=""):
"""
Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying
dates, kiji table, etc.
"""
cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}"
cmd = cmd.format(
bento_home=self.bento_home,
jar=os.path.join(self.movie_advisor_home, self.express_jar),
myclass=class_name,
kiji_uri=self.kiji_uri,
) + " " + options
print(run(cmd))
def _run_kiji_job(self, cmd):
cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format(
bento_home=self.bento_home, cmd=cmd)
print(run(cmd))
def _scan_table(self, uri):
""" Scan this table and print out a couple of rows as a sanity check """
cmd = 'kiji | print(cmd)
try:
res = subprocess.check_output(cmd, shell=True).decode('utf-8')
except subprocess.CalledProcessError as cpe:
print("Error runn command " + cmd)
print("Output = " + cpe.output.decode('utf-8'))
raise cpe
return res | identifier_body |
|
manager.py | in your current directory).",
"create-tables":
"Deletes the currently existing user and content tables and recreates them from DDL.",
"import-ratings":
"Run an express job to import movie ratings",
"import-user-info":
"Run an express job to import user information",
"import-movie-info":
"Run an express job to import movie information",
"train-item-item-cf":
"Calculate item-item similarities",
"register-freshener":
"Register freshener for scoring function",
"help-actions":
"Print this help",
}
jars = (
# 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar',
#'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar',
'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar',
# Needed on classpath for registering the scoring function and executing it.
'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar'
)
# This is the path from movie_advisor_home
ddls = (
'layout/src/main/resources/users.ddl',
'layout/src/main/resources/movies.ddl',
)
express_jar = 'express/target/express-1.0-SNAPSHOT.jar'
# assert set(actions_help.keys()) == set(possible_actions)
def _help_actions(self):
""" Print detailed information about how the different actions work """
actions_str = ""
for (key, value) in self.actions_help.items():
actions_str += "command: %s\n%s\n\n" % (key, value)
print(actions_str)
sys.exit(0)
def _setup_parser(self):
""" Add actions for the command-line arguments parser """
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \
"\n\t".join(self.possible_actions))
# TODO: Detailed help information that prints out all of the available actions and their
# assumptions
parser.add_argument(
"action",
nargs='*',
help="Action to take")
parser.add_argument(
'--bento-home',
help='Location of bento box',
default='kiji-bento-ebi')
parser.add_argument(
'--bento-tgz',
help='Bento TAR file name',
default='kiji-bento-ebi-2.0.2-release.tar.gz')
parser.add_argument(
'--movie-advisor-home',
help='Location of checkout of WibiData MovieAdvisor github repo',
default='movie-advisor')
# Set up dates for training, testing, etc.
parser.add_argument(
'--train-start-date',
default='2013-11-01')
parser.add_argument(
'--train-end-date',
default='2013-11-15')
parser.add_argument(
'--test-start-date',
default='2013-11-16')
parser.add_argument(
'--test-end-date',
default='2013-11-30')
parser.add_argument(
"--backtest-results-file",
default="backtest.txt")
parser.add_argument(
"--kill-bento",
action="store_true",
default=False,
help="Automatically kill existing BentoBox processes.")
parser.add_argument(
"--show-classpath",
action="store_true",
default=False,
help="Echo $KIJI_CLASSPATH and exit")
return parser
def _setup_environment_vars(self, opts):
""" Set up useful variables (would be environment vars outside of the script) """
# Check that these directories actually exist
assert os.path.isdir(opts.movie_advisor_home)
#if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)
self.movie_advisor_home = opts.movie_advisor_home
self.bento_home = opts.bento_home
self.bento_tgz = opts.bento_tgz
self.kiji_uri = "kiji://.env/tutorial"
# "express job" takes a jar file as an argument
assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))
# Set the classpath for all of the commands that we'll run
jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]
for jar in jarsFullPaths: assert os.path.isfile(jar)
classpath = ":".join(jarsFullPaths)
os.environ['KIJI_CLASSPATH'] = classpath
if opts.show_classpath:
print("export KIJI_CLASSPATH=%s" % classpath)
sys.exit(0)
def _parse_options(self, args):
""" Parse the command-line options and configure the script appropriately """
parser = self._setup_parser()
opts = parser.parse_args(args)
self.actions = opts.action
for action in self.actions:
assert action in self.possible_actions, "Action %s is not a known action for the script" % action
self.b_kill_bento = opts.kill_bento
if 'help-actions' in self.actions: self._help_actions()
self._setup_environment_vars(opts)
self.backtest_results_file = opts.backtest_results_file
def _exit_if_bento_still_running(self):
jps_results = run('jps')
if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento:
assert False, "Please kill all bento-related jobs (run 'jps' to get a list)"
# Kill all of the bento processes
for line in jps_results.splitlines():
toks = line.split()
if len(toks) == 1: continue
assert len(toks) == 2, toks
(pid, job) = toks
if job == 'Jps': continue
cmd = "kill -9 " + pid
run(cmd)
def _do_action_bento_setup(self):
""" Install the BentoBox, install Kiji, etc. """
self._exit_if_bento_still_running()
cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format(
bento_dir=self.bento_home,
bento_tar=self.bento_tgz)
print(run(cmd))
for command_suffix in ["-env.sh", ""]:
kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix)
bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix)
if not os.path.isfile(kiji_env):
assert os.path.isfile(bento_env)
cmd = 'cp {bento_env} {kiji_env}'.format(
bento_env=bento_env,
kiji_env=kiji_env)
run(cmd)
cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format(
bento_dir=self.bento_home,
)
print(run(cmd))
assert os.path.isdir(self.bento_home)
def | (self, class_name, options=""):
"""
Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying
dates, kiji table, etc.
"""
cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}"
cmd = cmd.format(
bento_home=self.bento_home,
jar=os.path.join(self.movie_advisor_home, self.express_jar),
myclass=class_name,
kiji_uri=self.kiji_uri,
) + " " + options
print(run(cmd))
def _run_kiji_job(self, cmd):
cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format(
bento_home=self.bento_home, cmd=cmd)
print(run(cmd))
def _scan_table(self, uri):
""" Scan this table and print out a couple of rows as a sanity check """
cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format(
kiji_uri=self.kiji_uri,
uri=uri)
self._run_kiji_job(cmd)
def _do_action_tables_create(self):
""" Run the schema shell to create the tables """
schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell")
assert os.path.isfile(schema_shell), schema_shell
# Delete the table first!
cmd = (
"kiji delete --target={kiji_uri} --interactive=false; " +
"kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri)
self._run_kiji_job(cmd)
for ddl in self.ddls:
ddl_full_path = os.path.join(self.movie_advisor_home, ddl)
assert os.path.isfile(ddl_full_path)
cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format(
schema_shell=schema_shell,
kiji_uri=self.kiji_uri,
ddl_full_path=ddl_full_path)
self._run | _run_express_job | identifier_name |
manager.py | in your current directory).",
"create-tables":
"Deletes the currently existing user and content tables and recreates them from DDL.",
"import-ratings":
"Run an express job to import movie ratings",
"import-user-info":
"Run an express job to import user information",
"import-movie-info":
"Run an express job to import movie information",
"train-item-item-cf":
"Calculate item-item similarities",
"register-freshener":
"Register freshener for scoring function",
"help-actions":
"Print this help",
}
jars = (
# 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar',
#'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar',
'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar',
# Needed on classpath for registering the scoring function and executing it.
'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar'
)
# This is the path from movie_advisor_home
ddls = (
'layout/src/main/resources/users.ddl',
'layout/src/main/resources/movies.ddl',
)
express_jar = 'express/target/express-1.0-SNAPSHOT.jar'
# assert set(actions_help.keys()) == set(possible_actions)
def _help_actions(self):
""" Print detailed information about how the different actions work """
actions_str = ""
for (key, value) in self.actions_help.items():
actions_str += "command: %s\n%s\n\n" % (key, value)
print(actions_str)
sys.exit(0)
def _setup_parser(self):
""" Add actions for the command-line arguments parser """
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \
"\n\t".join(self.possible_actions))
# TODO: Detailed help information that prints out all of the available actions and their
# assumptions
parser.add_argument(
"action",
nargs='*',
help="Action to take")
parser.add_argument(
'--bento-home',
help='Location of bento box',
default='kiji-bento-ebi')
parser.add_argument(
'--bento-tgz',
help='Bento TAR file name',
default='kiji-bento-ebi-2.0.2-release.tar.gz')
parser.add_argument(
'--movie-advisor-home',
help='Location of checkout of WibiData MovieAdvisor github repo',
default='movie-advisor')
# Set up dates for training, testing, etc.
parser.add_argument(
'--train-start-date',
default='2013-11-01')
parser.add_argument(
'--train-end-date',
default='2013-11-15')
parser.add_argument(
'--test-start-date',
default='2013-11-16')
parser.add_argument(
'--test-end-date',
default='2013-11-30')
parser.add_argument(
"--backtest-results-file",
default="backtest.txt")
parser.add_argument(
"--kill-bento",
action="store_true",
default=False,
help="Automatically kill existing BentoBox processes.")
parser.add_argument(
"--show-classpath",
action="store_true",
default=False,
help="Echo $KIJI_CLASSPATH and exit")
return parser
def _setup_environment_vars(self, opts):
""" Set up useful variables (would be environment vars outside of the script) """ | #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)
self.movie_advisor_home = opts.movie_advisor_home
self.bento_home = opts.bento_home
self.bento_tgz = opts.bento_tgz
self.kiji_uri = "kiji://.env/tutorial"
# "express job" takes a jar file as an argument
assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))
# Set the classpath for all of the commands that we'll run
jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]
for jar in jarsFullPaths: assert os.path.isfile(jar)
classpath = ":".join(jarsFullPaths)
os.environ['KIJI_CLASSPATH'] = classpath
if opts.show_classpath:
print("export KIJI_CLASSPATH=%s" % classpath)
sys.exit(0)
def _parse_options(self, args):
""" Parse the command-line options and configure the script appropriately """
parser = self._setup_parser()
opts = parser.parse_args(args)
self.actions = opts.action
for action in self.actions:
assert action in self.possible_actions, "Action %s is not a known action for the script" % action
self.b_kill_bento = opts.kill_bento
if 'help-actions' in self.actions: self._help_actions()
self._setup_environment_vars(opts)
self.backtest_results_file = opts.backtest_results_file
def _exit_if_bento_still_running(self):
jps_results = run('jps')
if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento:
assert False, "Please kill all bento-related jobs (run 'jps' to get a list)"
# Kill all of the bento processes
for line in jps_results.splitlines():
toks = line.split()
if len(toks) == 1: continue
assert len(toks) == 2, toks
(pid, job) = toks
if job == 'Jps': continue
cmd = "kill -9 " + pid
run(cmd)
def _do_action_bento_setup(self):
""" Install the BentoBox, install Kiji, etc. """
self._exit_if_bento_still_running()
cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format(
bento_dir=self.bento_home,
bento_tar=self.bento_tgz)
print(run(cmd))
for command_suffix in ["-env.sh", ""]:
kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix)
bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix)
if not os.path.isfile(kiji_env):
assert os.path.isfile(bento_env)
cmd = 'cp {bento_env} {kiji_env}'.format(
bento_env=bento_env,
kiji_env=kiji_env)
run(cmd)
cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format(
bento_dir=self.bento_home,
)
print(run(cmd))
assert os.path.isdir(self.bento_home)
def _run_express_job(self, class_name, options=""):
"""
Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying
dates, kiji table, etc.
"""
cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}"
cmd = cmd.format(
bento_home=self.bento_home,
jar=os.path.join(self.movie_advisor_home, self.express_jar),
myclass=class_name,
kiji_uri=self.kiji_uri,
) + " " + options
print(run(cmd))
def _run_kiji_job(self, cmd):
cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format(
bento_home=self.bento_home, cmd=cmd)
print(run(cmd))
def _scan_table(self, uri):
""" Scan this table and print out a couple of rows as a sanity check """
cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format(
kiji_uri=self.kiji_uri,
uri=uri)
self._run_kiji_job(cmd)
def _do_action_tables_create(self):
""" Run the schema shell to create the tables """
schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell")
assert os.path.isfile(schema_shell), schema_shell
# Delete the table first!
cmd = (
"kiji delete --target={kiji_uri} --interactive=false; " +
"kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri)
self._run_kiji_job(cmd)
for ddl in self.ddls:
ddl_full_path = os.path.join(self.movie_advisor_home, ddl)
assert os.path.isfile(ddl_full_path)
cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format(
schema_shell=schema_shell,
kiji_uri=self.kiji_uri,
ddl_full_path=ddl_full_path)
self._run | # Check that these directories actually exist
assert os.path.isdir(opts.movie_advisor_home)
| random_line_split |
reader_test.go | /4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne5vbA+63vRCnrc8QuYwIDAQAB
AoGAQKIRELQOsrZsxZowfj/ia9jPUvAmO0apnn2lK/E07k2lbtFMS1H4m1XtGr8F
oxQU7rLyyP/FmeJUqJyRXLwsJzma13OpxkQtZmRpL9jEwevnunHYJfceVapQOJ7/
6Oz0pPWEq39GCn+tTMtgSmkEaSH8Ki9t32g9KuQIKBB2hbECQQDsg7D5fHQB1BXG
HJm9JmYYX0Yk6Z2SWBr4mLO0C4hHBnV5qPCLyevInmaCV2cOjDZ5Sz6iF5RK5mw7
qzvFa8ePAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC
kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz
gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H
VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk
r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU
73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo=
-----END RSA PRIVATE KEY-----
`
PublicKeyError = `-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia
v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc
XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne
5vbA+63vRCnrc8QuYwIDAQAC
-----END PUBLIC KEY-----`
)
func MakeRootfsImageArtifact(version int, signed bool,
hasScripts bool) (io.Reader, error) {
upd, err := MakeFakeUpdate(TestUpdateFileContent)
if err != nil {
return nil, err
}
defer os.Remove(upd)
comp := artifact.NewCompressorGzip()
art := bytes.NewBuffer(nil)
var aw *awriter.Writer
if !signed {
aw = awriter.NewWriter(art, comp)
} else {
s := artifact.NewSigner([]byte(PrivateKey))
aw = awriter.NewWriterSigned(art, comp, s)
}
var u handlers.Composer
switch version {
case 1:
u = handlers.NewRootfsV1(upd, comp)
case 2:
u = handlers.NewRootfsV2(upd, comp)
}
scr := artifact.Scripts{}
if hasScripts {
s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_")
if err != nil {
return nil, err
}
defer os.Remove(s.Name())
_, err = io.WriteString(s, "execute me!")
if err := scr.Add(s.Name()); err != nil {
return nil, err
}
}
updates := &awriter.Updates{U: []handlers.Composer{u}}
err = aw.WriteArtifact("mender", version, []string{"vexpress"},
"mender-1.1", updates, &scr)
if err != nil {
return nil, err
}
return art, nil
}
func TestReadArtifact(t *testing.T) {
updFileContent := bytes.NewBuffer(nil)
copy := func(r io.Reader, f *handlers.DataFile) error {
_, err := io.Copy(updFileContent, r)
return err
}
rfh := handlers.NewRootfsInstaller()
rfh.InstallHandler = copy
tc := []struct {
version int
signed bool
handler handlers.Installer
verifier artifact.Verifier
readError error
}{
{1, false, rfh, nil, nil},
{2, false, rfh, nil, nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)),
errors.New("reader: invalid signature: crypto/rsa: verification error")},
// // test that we do not need a verifier for signed artifact
{2, true, rfh, nil, nil},
}
// first create archive, that we will be able to read
for _, test := range tc {
art, err := MakeRootfsImageArtifact(test.version, test.signed, false)
assert.NoError(t, err)
aReader := NewReader(art)
if test.handler != nil {
aReader.RegisterHandler(test.handler)
}
if test.verifier != nil {
aReader.VerifySignatureCallback = test.verifier.Verify
}
err = aReader.ReadArtifact()
if test.readError != nil {
assert.Equal(t, test.readError.Error(), err.Error())
continue
}
assert.NoError(t, err)
assert.Equal(t, TestUpdateFileContent, updFileContent.String())
devComp := aReader.GetCompatibleDevices()
assert.Len(t, devComp, 1)
assert.Equal(t, "vexpress", devComp[0])
if test.handler != nil {
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType())
}
assert.Equal(t, "mender-1.1", aReader.GetArtifactName())
// clean the buffer
updFileContent.Reset()
}
}
func TestReadSigned(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader := NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: verify signature callback not registered")
art, err = MakeRootfsImageArtifact(2, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact, but no signature file found")
art, err = MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader = NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
art, err = MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact")
}
func TestRegisterMultipleHandlers(t *testing.T) {
aReader := NewReader(nil)
err := aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.NoError(t, err)
err = aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.Error(t, err)
err = aReader.RegisterHandler(nil)
assert.Error(t, err)
}
func TestReadNoHandler(t *testing.T) {
art, err := MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader := NewReader(art) | err = aReader.ReadArtifact()
assert.NoError(t, err)
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType())
}
func TestReadBroken(t *testing.T) {
broken := []byte("this is broken artifact")
buf := bytes.NewBuffer(broken)
aReader := NewReader(buf)
err := aReader.ReadArtifact()
assert.Error(t, err)
aReader = NewReader(nil)
err = aReader.ReadArtifact()
assert.Error(t, err)
}
func TestReadWithScripts(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, false, true)
assert.NoError(t, err)
aReader := New | random_line_split |
|
reader_test.go | C
kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz
gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H
VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk
r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU
73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo=
-----END RSA PRIVATE KEY-----
`
PublicKeyError = `-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia
v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc
XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne
5vbA+63vRCnrc8QuYwIDAQAC
-----END PUBLIC KEY-----`
)
func MakeRootfsImageArtifact(version int, signed bool,
hasScripts bool) (io.Reader, error) {
upd, err := MakeFakeUpdate(TestUpdateFileContent)
if err != nil {
return nil, err
}
defer os.Remove(upd)
comp := artifact.NewCompressorGzip()
art := bytes.NewBuffer(nil)
var aw *awriter.Writer
if !signed {
aw = awriter.NewWriter(art, comp)
} else {
s := artifact.NewSigner([]byte(PrivateKey))
aw = awriter.NewWriterSigned(art, comp, s)
}
var u handlers.Composer
switch version {
case 1:
u = handlers.NewRootfsV1(upd, comp)
case 2:
u = handlers.NewRootfsV2(upd, comp)
}
scr := artifact.Scripts{}
if hasScripts {
s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_")
if err != nil {
return nil, err
}
defer os.Remove(s.Name())
_, err = io.WriteString(s, "execute me!")
if err := scr.Add(s.Name()); err != nil {
return nil, err
}
}
updates := &awriter.Updates{U: []handlers.Composer{u}}
err = aw.WriteArtifact("mender", version, []string{"vexpress"},
"mender-1.1", updates, &scr)
if err != nil {
return nil, err
}
return art, nil
}
func TestReadArtifact(t *testing.T) {
updFileContent := bytes.NewBuffer(nil)
copy := func(r io.Reader, f *handlers.DataFile) error {
_, err := io.Copy(updFileContent, r)
return err
}
rfh := handlers.NewRootfsInstaller()
rfh.InstallHandler = copy
tc := []struct {
version int
signed bool
handler handlers.Installer
verifier artifact.Verifier
readError error
}{
{1, false, rfh, nil, nil},
{2, false, rfh, nil, nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)),
errors.New("reader: invalid signature: crypto/rsa: verification error")},
// // test that we do not need a verifier for signed artifact
{2, true, rfh, nil, nil},
}
// first create archive, that we will be able to read
for _, test := range tc {
art, err := MakeRootfsImageArtifact(test.version, test.signed, false)
assert.NoError(t, err)
aReader := NewReader(art)
if test.handler != nil {
aReader.RegisterHandler(test.handler)
}
if test.verifier != nil {
aReader.VerifySignatureCallback = test.verifier.Verify
}
err = aReader.ReadArtifact()
if test.readError != nil {
assert.Equal(t, test.readError.Error(), err.Error())
continue
}
assert.NoError(t, err)
assert.Equal(t, TestUpdateFileContent, updFileContent.String())
devComp := aReader.GetCompatibleDevices()
assert.Len(t, devComp, 1)
assert.Equal(t, "vexpress", devComp[0])
if test.handler != nil {
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType())
}
assert.Equal(t, "mender-1.1", aReader.GetArtifactName())
// clean the buffer
updFileContent.Reset()
}
}
func TestReadSigned(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader := NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: verify signature callback not registered")
art, err = MakeRootfsImageArtifact(2, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact, but no signature file found")
art, err = MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader = NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
art, err = MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact")
}
func TestRegisterMultipleHandlers(t *testing.T) {
aReader := NewReader(nil)
err := aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.NoError(t, err)
err = aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.Error(t, err)
err = aReader.RegisterHandler(nil)
assert.Error(t, err)
}
func TestReadNoHandler(t *testing.T) {
art, err := MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader := NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType())
}
func TestReadBroken(t *testing.T) {
broken := []byte("this is broken artifact")
buf := bytes.NewBuffer(broken)
aReader := NewReader(buf)
err := aReader.ReadArtifact()
assert.Error(t, err)
aReader = NewReader(nil)
err = aReader.ReadArtifact()
assert.Error(t, err)
}
func TestReadWithScripts(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, false, true)
assert.NoError(t, err)
aReader := NewReader(art)
noExec := 0
aReader.ScriptsReadCallback = func(r io.Reader, info os.FileInfo) error {
noExec++
assert.Contains(t, info.Name(), "ArtifactInstall_Enter_10_")
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, r)
assert.NoError(t, err)
assert.Equal(t, "execute me!", buf.String())
return nil
}
err = aReader.ReadArtifact()
assert.NoError(t, err)
assert.Equal(t, 1, noExec)
}
func MakeFakeUpdate(data string) (string, error) {
f, err := ioutil.TempFile("", "test_update")
if err != nil {
return "", err
}
defer f.Close()
if len(data) > 0 {
if _, err := f.WriteString(data); err != nil {
return "", err
}
}
return f.Name(), nil
}
type installer struct {
Data *handlers.DataFile
}
func (i *installer) GetUpdateFiles() [](*handlers.DataFile) {
return [](*handlers.DataFile){i.Data}
}
func (i *installer) GetType() string {
return ""
}
func (i *installer) Copy() handlers.Installer {
return i
}
func (i *installer) ReadHeader(r io.Reader, path string) error | {
return nil
} | identifier_body |
|
reader_test.go | PAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC
kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz
gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H
VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk
r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU
73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo=
-----END RSA PRIVATE KEY-----
`
PublicKeyError = `-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia
v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc
XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne
5vbA+63vRCnrc8QuYwIDAQAC
-----END PUBLIC KEY-----`
)
func MakeRootfsImageArtifact(version int, signed bool,
hasScripts bool) (io.Reader, error) {
upd, err := MakeFakeUpdate(TestUpdateFileContent)
if err != nil {
return nil, err
}
defer os.Remove(upd)
comp := artifact.NewCompressorGzip()
art := bytes.NewBuffer(nil)
var aw *awriter.Writer
if !signed {
aw = awriter.NewWriter(art, comp)
} else {
s := artifact.NewSigner([]byte(PrivateKey))
aw = awriter.NewWriterSigned(art, comp, s)
}
var u handlers.Composer
switch version {
case 1:
u = handlers.NewRootfsV1(upd, comp)
case 2:
u = handlers.NewRootfsV2(upd, comp)
}
scr := artifact.Scripts{}
if hasScripts {
s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_")
if err != nil {
return nil, err
}
defer os.Remove(s.Name())
_, err = io.WriteString(s, "execute me!")
if err := scr.Add(s.Name()); err != nil {
return nil, err
}
}
updates := &awriter.Updates{U: []handlers.Composer{u}}
err = aw.WriteArtifact("mender", version, []string{"vexpress"},
"mender-1.1", updates, &scr)
if err != nil {
return nil, err
}
return art, nil
}
func TestReadArtifact(t *testing.T) {
updFileContent := bytes.NewBuffer(nil)
copy := func(r io.Reader, f *handlers.DataFile) error {
_, err := io.Copy(updFileContent, r)
return err
}
rfh := handlers.NewRootfsInstaller()
rfh.InstallHandler = copy
tc := []struct {
version int
signed bool
handler handlers.Installer
verifier artifact.Verifier
readError error
}{
{1, false, rfh, nil, nil},
{2, false, rfh, nil, nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)),
errors.New("reader: invalid signature: crypto/rsa: verification error")},
// // test that we do not need a verifier for signed artifact
{2, true, rfh, nil, nil},
}
// first create archive, that we will be able to read
for _, test := range tc {
art, err := MakeRootfsImageArtifact(test.version, test.signed, false)
assert.NoError(t, err)
aReader := NewReader(art)
if test.handler != nil {
aReader.RegisterHandler(test.handler)
}
if test.verifier != nil {
aReader.VerifySignatureCallback = test.verifier.Verify
}
err = aReader.ReadArtifact()
if test.readError != nil {
assert.Equal(t, test.readError.Error(), err.Error())
continue
}
assert.NoError(t, err)
assert.Equal(t, TestUpdateFileContent, updFileContent.String())
devComp := aReader.GetCompatibleDevices()
assert.Len(t, devComp, 1)
assert.Equal(t, "vexpress", devComp[0])
if test.handler != nil {
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType())
}
assert.Equal(t, "mender-1.1", aReader.GetArtifactName())
// clean the buffer
updFileContent.Reset()
}
}
func TestReadSigned(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader := NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: verify signature callback not registered")
art, err = MakeRootfsImageArtifact(2, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact, but no signature file found")
art, err = MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader = NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
art, err = MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact")
}
func TestRegisterMultipleHandlers(t *testing.T) {
aReader := NewReader(nil)
err := aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.NoError(t, err)
err = aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.Error(t, err)
err = aReader.RegisterHandler(nil)
assert.Error(t, err)
}
func TestReadNoHandler(t *testing.T) {
art, err := MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader := NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType())
}
func TestReadBroken(t *testing.T) {
broken := []byte("this is broken artifact")
buf := bytes.NewBuffer(broken)
aReader := NewReader(buf)
err := aReader.ReadArtifact()
assert.Error(t, err)
aReader = NewReader(nil)
err = aReader.ReadArtifact()
assert.Error(t, err)
}
func TestReadWithScripts(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, false, true)
assert.NoError(t, err)
aReader := NewReader(art)
noExec := 0
aReader.ScriptsReadCallback = func(r io.Reader, info os.FileInfo) error {
noExec++
assert.Contains(t, info.Name(), "ArtifactInstall_Enter_10_")
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, r)
assert.NoError(t, err)
assert.Equal(t, "execute me!", buf.String())
return nil
}
err = aReader.ReadArtifact()
assert.NoError(t, err)
assert.Equal(t, 1, noExec)
}
func MakeFakeUpdate(data string) (string, error) {
f, err := ioutil.TempFile("", "test_update")
if err != nil {
return "", err
}
defer f.Close()
if len(data) > 0 {
if _, err := f.WriteString(data); err != nil {
return "", err
}
}
return f.Name(), nil
}
type installer struct {
Data *handlers.DataFile
}
func (i *installer) GetUpdateFiles() [](*handlers.DataFile) {
return [](*handlers.DataFile){i.Data}
}
func (i *installer) | GetType | identifier_name |
|
reader_test.go | 4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne5vbA+63vRCnrc8QuYwIDAQAB
AoGAQKIRELQOsrZsxZowfj/ia9jPUvAmO0apnn2lK/E07k2lbtFMS1H4m1XtGr8F
oxQU7rLyyP/FmeJUqJyRXLwsJzma13OpxkQtZmRpL9jEwevnunHYJfceVapQOJ7/
6Oz0pPWEq39GCn+tTMtgSmkEaSH8Ki9t32g9KuQIKBB2hbECQQDsg7D5fHQB1BXG
HJm9JmYYX0Yk6Z2SWBr4mLO0C4hHBnV5qPCLyevInmaCV2cOjDZ5Sz6iF5RK5mw7
qzvFa8ePAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC
kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz
gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H
VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk
r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU
73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo=
-----END RSA PRIVATE KEY-----
`
PublicKeyError = `-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia
v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc
XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne
5vbA+63vRCnrc8QuYwIDAQAC
-----END PUBLIC KEY-----`
)
func MakeRootfsImageArtifact(version int, signed bool,
hasScripts bool) (io.Reader, error) {
upd, err := MakeFakeUpdate(TestUpdateFileContent)
if err != nil {
return nil, err
}
defer os.Remove(upd)
comp := artifact.NewCompressorGzip()
art := bytes.NewBuffer(nil)
var aw *awriter.Writer
if !signed {
aw = awriter.NewWriter(art, comp)
} else {
s := artifact.NewSigner([]byte(PrivateKey))
aw = awriter.NewWriterSigned(art, comp, s)
}
var u handlers.Composer
switch version {
case 1:
u = handlers.NewRootfsV1(upd, comp)
case 2:
u = handlers.NewRootfsV2(upd, comp)
}
scr := artifact.Scripts{}
if hasScripts {
s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_")
if err != nil {
return nil, err
}
defer os.Remove(s.Name())
_, err = io.WriteString(s, "execute me!")
if err := scr.Add(s.Name()); err != nil {
return nil, err
}
}
updates := &awriter.Updates{U: []handlers.Composer{u}}
err = aw.WriteArtifact("mender", version, []string{"vexpress"},
"mender-1.1", updates, &scr)
if err != nil {
return nil, err
}
return art, nil
}
func TestReadArtifact(t *testing.T) {
updFileContent := bytes.NewBuffer(nil)
copy := func(r io.Reader, f *handlers.DataFile) error {
_, err := io.Copy(updFileContent, r)
return err
}
rfh := handlers.NewRootfsInstaller()
rfh.InstallHandler = copy
tc := []struct {
version int
signed bool
handler handlers.Installer
verifier artifact.Verifier
readError error
}{
{1, false, rfh, nil, nil},
{2, false, rfh, nil, nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil},
{2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)),
errors.New("reader: invalid signature: crypto/rsa: verification error")},
// // test that we do not need a verifier for signed artifact
{2, true, rfh, nil, nil},
}
// first create archive, that we will be able to read
for _, test := range tc {
art, err := MakeRootfsImageArtifact(test.version, test.signed, false)
assert.NoError(t, err)
aReader := NewReader(art)
if test.handler != nil {
aReader.RegisterHandler(test.handler)
}
if test.verifier != nil {
aReader.VerifySignatureCallback = test.verifier.Verify
}
err = aReader.ReadArtifact()
if test.readError != nil |
assert.NoError(t, err)
assert.Equal(t, TestUpdateFileContent, updFileContent.String())
devComp := aReader.GetCompatibleDevices()
assert.Len(t, devComp, 1)
assert.Equal(t, "vexpress", devComp[0])
if test.handler != nil {
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType())
}
assert.Equal(t, "mender-1.1", aReader.GetArtifactName())
// clean the buffer
updFileContent.Reset()
}
}
func TestReadSigned(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader := NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: verify signature callback not registered")
art, err = MakeRootfsImageArtifact(2, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact, but no signature file found")
art, err = MakeRootfsImageArtifact(2, true, false)
assert.NoError(t, err)
aReader = NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
art, err = MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader = NewReaderSigned(art)
err = aReader.ReadArtifact()
assert.Error(t, err)
assert.Contains(t, err.Error(),
"reader: expecting signed artifact")
}
func TestRegisterMultipleHandlers(t *testing.T) {
aReader := NewReader(nil)
err := aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.NoError(t, err)
err = aReader.RegisterHandler(handlers.NewRootfsInstaller())
assert.Error(t, err)
err = aReader.RegisterHandler(nil)
assert.Error(t, err)
}
func TestReadNoHandler(t *testing.T) {
art, err := MakeRootfsImageArtifact(1, false, false)
assert.NoError(t, err)
aReader := NewReader(art)
err = aReader.ReadArtifact()
assert.NoError(t, err)
assert.Len(t, aReader.GetHandlers(), 1)
assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType())
}
func TestReadBroken(t *testing.T) {
broken := []byte("this is broken artifact")
buf := bytes.NewBuffer(broken)
aReader := NewReader(buf)
err := aReader.ReadArtifact()
assert.Error(t, err)
aReader = NewReader(nil)
err = aReader.ReadArtifact()
assert.Error(t, err)
}
func TestReadWithScripts(t *testing.T) {
art, err := MakeRootfsImageArtifact(2, false, true)
assert.NoError(t, err)
aReader := | {
assert.Equal(t, test.readError.Error(), err.Error())
continue
} | conditional_block |
analysisPlots_TWZ_nLep.py | Level, logFile = None)
if args.small: args.plot_directory += "_small"
if args.noData: args.plot_directory += "_noData"
if args.normalize: args.plot_directory += "_normalize"
#
# Make samples, will be searched for in the postProcessing directory
#
if args.year == 2016:
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/"
signals = []
#
# Text on the plots
#
def drawObjects( plotData, dataMCScale, lumi_scale ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale)
]
return [tex.DrawLatex(*l) for l in lines]
scaling = { i+1:0 for i in range(len(signals)) }
def drawPlots(plots, mode, dataMCScale):
for log in [False, True]:
plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection)
for plot in plots:
if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot
if not args.noData:
if mode == "all": plot.histos[1][0].legendText = "Data"
if mode == "SF": plot.histos[1][0].legendText = "Data (SF)"
extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png']
plotting.draw(plot,
plot_directory = plot_directory_,
extensions = extensions_,
ratio = {'yRange':(0.1,1.9)} if not args.noData else None,
logX = False, logY = log, sorting = True,
yRange = (0.03, "auto") if log else (0.001, "auto"),
scaling = scaling if args.normalize else {},
legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2],
drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ),
copyIndexPHP = True,
)
def getLeptonSelection( mode ):
# if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1"
if mode =="nLep": return "(1)"
#
# Read variables and sequences
#
read_variables = ["weight/F",
"jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I",
"lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I",
"met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I",
"Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I",
"Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F",
"Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I",
]
sequence = []
def getLooseLeptonMult( event, sample ):
leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))]
lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ]
event.nLepLoose = len(lepLoose)
sequence.append( getLooseLeptonMult )
#
# Loop over channels
#
yields = {}
allPlots = {}
allModes = ['nLep']
for index, mode in enumerate(allModes):
yields[mode] = {}
logger.info("Working on mode %s", mode)
if not args.noData:
data_sample = Run2016 if args.year == 2016 else Run2017
data_sample.texName = "data"
data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)])
data_sample.name = "data"
data_sample.read_variables = ["evt/I","run/I"]
data_sample.style = styles.errorStyle(ROOT.kBlack)
lumi_scale = data_sample.lumi/1000
if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0
weight_ = lambda event, sample: event.weight
lumi_scale = 300
TTZ_mc = TTZtoLLNuNu
if args.year == 2016:
mc = [ yt_TWW ]
# mc = [ dim6top_TTW ]
# mc = [ TWZ ]
# mc = [ yt_TWZ ]
# mc = [ yt_TWZ_filter, yt_TWZ ]
# mc = [ yt_TWZ_filter ]
for sample in mc: sample.style = styles.fillStyle(sample.color)
for sample in mc + signals:
sample.scale = lumi_scale
#sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F']
sample.weight = lambda event, sample: 1
# if args.year == 2016:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt)
# else:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt)
# tr = triggerSelector(args.year)
sample.setSelectionString(getLeptonSelection(mode))
#sample.setSelectionString([getFilterCut(isData=False, year=args.year), getLeptonSelection(mode), tr.getSelection("MC")])
if not args.noData:
stack = Stack(mc, data_sample)
else:
| stack = Stack(mc) | conditional_block |
|
analysisPlots_TWZ_nLep.py | # quadlep-lepSelQuad-njet2p-btag0p-onZ1-offZ2 or quadlep-lepSelQuad-njet2p-btag1p-onZ1-offZ2 for signal regions
argParser.add_argument('--normalize', action='store_true', default=False, help="Normalize yields" )
argParser.add_argument('--year', action='store', default=2016, type=int, help="Which year?" )
args = argParser.parse_args()
# PU reweighting on the fly
from TopEFT.Tools.puProfileCache import puProfile
from TopEFT.Tools.puReweighting import getReweightingFunction
from TopEFT.samples.helpers import fromHeppySample
#
# Logger
#
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
if args.small: args.plot_directory += "_small"
if args.noData: args.plot_directory += "_noData"
if args.normalize: args.plot_directory += "_normalize"
#
# Make samples, will be searched for in the postProcessing directory
#
if args.year == 2016:
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/"
signals = []
#
# Text on the plots
#
def | ( plotData, dataMCScale, lumi_scale ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale)
]
return [tex.DrawLatex(*l) for l in lines]
scaling = { i+1:0 for i in range(len(signals)) }
def drawPlots(plots, mode, dataMCScale):
for log in [False, True]:
plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection)
for plot in plots:
if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot
if not args.noData:
if mode == "all": plot.histos[1][0].legendText = "Data"
if mode == "SF": plot.histos[1][0].legendText = "Data (SF)"
extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png']
plotting.draw(plot,
plot_directory = plot_directory_,
extensions = extensions_,
ratio = {'yRange':(0.1,1.9)} if not args.noData else None,
logX = False, logY = log, sorting = True,
yRange = (0.03, "auto") if log else (0.001, "auto"),
scaling = scaling if args.normalize else {},
legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2],
drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ),
copyIndexPHP = True,
)
def getLeptonSelection( mode ):
# if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1"
if mode =="nLep": return "(1)"
#
# Read variables and sequences
#
read_variables = ["weight/F",
"jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I",
"lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I",
"met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I",
"Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I",
"Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F",
"Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I",
]
sequence = []
def getLooseLeptonMult( event, sample ):
leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))]
lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ]
event.nLepLoose = len(lepLoose)
sequence.append( getLooseLeptonMult )
#
# Loop over channels
#
yields = {}
allPlots = {}
allModes = ['nLep']
for index, mode in enumerate(allModes):
yields[mode] = {}
logger.info("Working on mode %s", mode)
if not args.noData:
data_sample = Run2016 if args.year == 2016 else Run2017
data_sample.texName = "data"
data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)])
data_sample.name = "data"
data_sample.read_variables = ["evt/I","run/I"]
data_sample.style = styles.errorStyle(ROOT.kBlack)
lumi_scale = data_sample.lumi/1000
if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0
weight_ = lambda event, sample: event.weight
lumi_scale = 300
TTZ_mc = TTZtoLLNuNu
if args.year == 2016:
mc = [ yt_TWW ]
# mc = [ dim6top_TTW ]
# mc = [ TWZ ]
# mc = [ yt_TWZ ]
# mc = [ yt_TWZ_filter, yt_TWZ ]
# mc = [ yt_TWZ_filter ]
for sample in mc: sample.style = styles.fillStyle(sample.color)
for sample in mc + signals:
sample.scale = lumi_scale
#sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F']
sample.weight = lambda event, sample: 1
# if args.year == 2016:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.re | drawObjects | identifier_name |
analysisPlots_TWZ_nLep.py | # quadlep-lepSelQuad-njet2p-btag0p-onZ1-offZ2 or quadlep-lepSelQuad-njet2p-btag1p-onZ1-offZ2 for signal regions
argParser.add_argument('--normalize', action='store_true', default=False, help="Normalize yields" )
argParser.add_argument('--year', action='store', default=2016, type=int, help="Which year?" )
args = argParser.parse_args()
# PU reweighting on the fly
from TopEFT.Tools.puProfileCache import puProfile
from TopEFT.Tools.puReweighting import getReweightingFunction
from TopEFT.samples.helpers import fromHeppySample
#
# Logger
#
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
if args.small: args.plot_directory += "_small"
if args.noData: args.plot_directory += "_noData"
if args.normalize: args.plot_directory += "_normalize"
#
# Make samples, will be searched for in the postProcessing directory
#
if args.year == 2016:
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/"
signals = []
#
# Text on the plots
#
def drawObjects( plotData, dataMCScale, lumi_scale ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale)
]
return [tex.DrawLatex(*l) for l in lines]
scaling = { i+1:0 for i in range(len(signals)) }
def drawPlots(plots, mode, dataMCScale):
for log in [False, True]:
plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection)
for plot in plots:
if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot
if not args.noData:
if mode == "all": plot.histos[1][0].legendText = "Data"
if mode == "SF": plot.histos[1][0].legendText = "Data (SF)"
extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png']
plotting.draw(plot,
plot_directory = plot_directory_,
extensions = extensions_,
ratio = {'yRange':(0.1,1.9)} if not args.noData else None,
logX = False, logY = log, sorting = True,
yRange = (0.03, "auto") if log else (0.001, "auto"),
scaling = scaling if args.normalize else {},
legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2],
drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ),
copyIndexPHP = True,
)
def getLeptonSelection( mode ):
# if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1"
if mode =="nLep": return "(1)"
#
# Read variables and sequences
#
read_variables = ["weight/F",
"jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I",
"lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I",
"met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I",
"Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I",
"Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F",
"Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I",
]
sequence = []
def getLooseLeptonMult( event, sample ):
|
sequence.append( getLooseLeptonMult )
#
# Loop over channels
#
yields = {}
allPlots = {}
allModes = ['nLep']
for index, mode in enumerate(allModes):
yields[mode] = {}
logger.info("Working on mode %s", mode)
if not args.noData:
data_sample = Run2016 if args.year == 2016 else Run2017
data_sample.texName = "data"
data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)])
data_sample.name = "data"
data_sample.read_variables = ["evt/I","run/I"]
data_sample.style = styles.errorStyle(ROOT.kBlack)
lumi_scale = data_sample.lumi/1000
if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0
weight_ = lambda event, sample: event.weight
lumi_scale = 300
TTZ_mc = TTZtoLLNuNu
if args.year == 2016:
mc = [ yt_TWW ]
# mc = [ dim6top_TTW ]
# mc = [ TWZ ]
# mc = [ yt_TWZ ]
# mc = [ yt_TWZ_filter, yt_TWZ ]
# mc = [ yt_TWZ_filter ]
for sample in mc: sample.style = styles.fillStyle(sample.color)
for sample in mc + signals:
sample.scale = lumi_scale
#sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F']
sample.weight = lambda event, sample: 1
# if args.year == 2016:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.re | leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))]
lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ]
event.nLepLoose = len(lepLoose) | identifier_body |
analysisPlots_TWZ_nLep.py | ightingFunction
from TopEFT.samples.helpers import fromHeppySample
#
# Logger
#
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
if args.small: args.plot_directory += "_small"
if args.noData: args.plot_directory += "_noData"
if args.normalize: args.plot_directory += "_normalize"
#
# Make samples, will be searched for in the postProcessing directory
#
if args.year == 2016:
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/"
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/"
signals = []
#
# Text on the plots
#
def drawObjects( plotData, dataMCScale, lumi_scale ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale)
]
return [tex.DrawLatex(*l) for l in lines]
scaling = { i+1:0 for i in range(len(signals)) }
def drawPlots(plots, mode, dataMCScale):
for log in [False, True]:
plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection)
for plot in plots:
if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot
if not args.noData:
if mode == "all": plot.histos[1][0].legendText = "Data"
if mode == "SF": plot.histos[1][0].legendText = "Data (SF)"
extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png']
plotting.draw(plot,
plot_directory = plot_directory_,
extensions = extensions_,
ratio = {'yRange':(0.1,1.9)} if not args.noData else None,
logX = False, logY = log, sorting = True,
yRange = (0.03, "auto") if log else (0.001, "auto"),
scaling = scaling if args.normalize else {},
legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2],
drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ),
copyIndexPHP = True,
)
def getLeptonSelection( mode ):
# if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1"
if mode =="nLep": return "(1)"
#
# Read variables and sequences
#
read_variables = ["weight/F",
"jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I",
"lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I",
"met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I",
"Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I",
"Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F",
"Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I",
]
sequence = []
def getLooseLeptonMult( event, sample ):
leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))]
lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ]
event.nLepLoose = len(lepLoose)
sequence.append( getLooseLeptonMult )
#
# Loop over channels
#
yields = {}
allPlots = {}
allModes = ['nLep']
for index, mode in enumerate(allModes):
yields[mode] = {}
logger.info("Working on mode %s", mode)
if not args.noData:
data_sample = Run2016 if args.year == 2016 else Run2017
data_sample.texName = "data"
data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)])
data_sample.name = "data"
data_sample.read_variables = ["evt/I","run/I"]
data_sample.style = styles.errorStyle(ROOT.kBlack)
lumi_scale = data_sample.lumi/1000
if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0
weight_ = lambda event, sample: event.weight
lumi_scale = 300
TTZ_mc = TTZtoLLNuNu
if args.year == 2016:
mc = [ yt_TWW ]
# mc = [ dim6top_TTW ]
# mc = [ TWZ ]
# mc = [ yt_TWZ ]
# mc = [ yt_TWZ_filter, yt_TWZ ]
# mc = [ yt_TWZ_filter ]
for sample in mc: sample.style = styles.fillStyle(sample.color)
for sample in mc + signals:
sample.scale = lumi_scale
#sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F']
sample.weight = lambda event, sample: 1
# if args.year == 2016:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt)
# else: | # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt)
# tr = triggerSelector(args.year) | random_line_split |
|
main.go | LoadByte(addr uint32) uint8 {
return cpu.memory.LoadByte(addr)
}
func (cpu *Cpu) StoreWord(addr uint32, v uint32) {
cpu.memory.StoreWord(addr, v)
}
func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) {
cpu.memory.StoreHalfWord(addr, v)
}
func (cpu *Cpu) StoreByte(addr uint32, v uint8) {
cpu.memory.StoreByte(addr, v)
}
func (cpu *Cpu) IsValidCsr(csr uint32) bool {
if csr == CsrHalt {
return true
}
priv := csr & ^uint32(0xcff) // save priv
csr &= 0xcff // ignore priv
switch csr {
case CsrCycle,
CsrCycleh,
CsrTime,
CsrTimeh,
CsrInstret,
CsrInstreth:
return true
}
if priv != CsrM {
return false
}
switch csr {
case CsrTvec,
CsrTval,
CsrCause,
CsrEpc,
CsrScratch:
return true
}
return false
}
func (cpu *Cpu) GetCsr(csr uint32) uint32 {
if csr == CsrHalt {
return cpu.haltValue
}
priv := csr & ^uint32(0xcff) // save priv
csr &= 0xcff // ignore priv
switch csr {
case CsrCycle:
return uint32(cpu.cycles)
case CsrCycleh:
return uint32(cpu.cycles >> 32)
case CsrTime:
return uint32(cpu.ticks)
case CsrTimeh:
return uint32(cpu.ticks >> 32)
case CsrInstret:
return uint32(cpu.instret)
case CsrInstreth:
return uint32(cpu.instret >> 32)
}
// we only have machine mode csrs for everything else
if priv != CsrM {
panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr))
}
switch csr {
case CsrTvec:
return cpu.mtvec & 0xfffffffc
case CsrTval:
return cpu.mtval
case CsrCause:
return cpu.mcause
case CsrEpc:
return cpu.mepc & 0xfffffffe
case CsrScratch:
return cpu.mscratch
default:
fmt.Printf("invalid csr: 0x%03x\n", csr)
}
return 0
}
func (cpu *Cpu) SetCsr(csr uint32, v uint32) {
if csr == CsrHalt {
cpu.halt = true
cpu.haltValue = v
return
}
priv := csr & ^uint32(0xcff) // save priv
if priv != CsrM {
panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr))
}
csr &= 0xcff // ignore priv
switch csr {
case CsrTvec:
cpu.mtvec = v & 0xfffffffc
case CsrCause:
cpu.mcause = v
case CsrTval:
cpu.mtval = v
case CsrScratch:
cpu.mscratch = v
case CsrEpc:
cpu.mepc = v & 0xfffffffe
}
// do nothing
}
func (cpu *Cpu) Reset() {
for i, _ := range cpu.registers {
cpu.registers[i] = 0
}
cpu.pc = cpu.initialAddr
cpu.halt = false
cpu.cycles = 0
cpu.ticks = 0
cpu.instret = 0
cpu.mtvec = 0
cpu.mcause = 0
cpu.mepc = 0
cpu.mtval = 0
cpu.mscratch = 0
}
func (cpu *Cpu) GetReg(idx uint8) uint32 {
if idx == 0 {
return 0
} else if idx > 0 && idx < 32 {
return cpu.registers[idx]
}
panic(fmt.Sprint("invalid register ", idx))
}
func (cpu *Cpu) SetReg(idx uint8, v uint32) {
if idx == 0 {
// do nothing
} else if idx > 0 && idx < 32 {
cpu.registers[idx] = v
} else {
panic(fmt.Sprint("invalid register ", idx))
}
}
func (cpu *Cpu) Execute() {
for !cpu.halt {
cpu.Step()
}
}
func (cpu *Cpu) Halt() {
cpu.halt = true
}
func (cpu *Cpu) Debug() string {
res := ""
for i := uint8(1); i < 32; i++ {
res += fmt.Sprintf("%s: 0x%08x ", _RegNames[i], cpu.GetReg(i))
}
res += fmt.Sprintf("pc: 0x%08x ", cpu.pc)
return res
}
func (cpu *Cpu) fetch() uint32 {
inst := cpu.LoadWord(cpu.pc)
cpu.pc += 4
return inst
}
func (cpu *Cpu) decode(inst uint32) {
// we are only allowed to trap in the decode phase
// this makes it so the trap function is only visible here
trap := func(cause uint32, value uint32) {
cpu.SetCsr(CsrTval|CsrM, value)
cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4)
cpu.pc = cpu.GetCsr(CsrTvec | CsrM)
cpu.SetCsr(CsrCause|CsrM, cause)
cpu.cycles += 1
cpu.ticks += 1
}
opcode := inst & 0x7f
decode:
switch opcode {
case OP_IMM:
_, rd, funct, rs1, imm := itype(inst)
rs1v := cpu.GetReg(rs1)
var res uint32
switch funct {
case FUNCT_ADDI:
res = rs1v + imm
case FUNCT_SLTI:
if int32(rs1v) < int32(imm) {
res = 1
} else {
res = 0
}
case FUNCT_SLTUI:
if rs1v < imm {
res = 1
} else {
res = 0
}
case FUNCT_XORI:
res = rs1v ^ imm
case FUNCT_ANDI:
res = rs1v & imm
case FUNCT_ORI:
res = rs1v | imm
case FUNCT_SLLI:
res = rs1v << imm
case FUNCT_SRXI:
if imm&0x400 != 0 {
// golang does arithmatic shift for ints
res = uint32(int32(rs1v) >> (imm & 0x1f))
} else {
res = rs1v >> (imm & 0x1f)
}
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
cpu.SetReg(rd, res)
case OP_LUI:
_, rd, imm := utype(inst)
cpu.SetReg(rd, imm<<12)
case OP_AUIPC:
_, rd, imm := utype(inst)
cpu.SetReg(rd, cpu.pc+(imm<<12)-4)
case OP:
_, rd, funct3, rs1, rs2, funct7 := rtype(inst)
rs1v := cpu.GetReg(rs1)
rs2v := cpu.GetReg(rs2)
var res uint32
switch funct3 {
case FUNCT_ADD_SUB:
if funct7&0x20 == 0 {
res = rs1v + rs2v
} else {
res = rs1v - rs2v
}
case FUNCT_SLT:
if int32(rs1v) < int32(rs2v) {
res = 1
} else {
res = 0
}
case FUNCT_SLTU:
if rs1v < rs2v {
res = 1
} else {
res = 0
}
case FUNCT_AND:
res = rs1v & rs2v
case FUNCT_OR:
res = rs1v | rs2v
case FUNCT_XOR:
res = rs1v ^ rs2v
case FUNCT_SLL:
res = rs1v << (rs2v & 0x1f)
case FUNCT_SRX:
if funct7&0x20 == 0 {
res = rs1v >> (rs2v & 0x1f)
} else {
res = uint32(int32(rs1v) >> (rs2v & 0x1f))
}
default: | trap(ExceptionIllegalInstruction, inst)
break decode
}
cpu.SetReg(rd, res) | random_line_split |
|
main.go | ) {
s.StoreByte(addr, uint8(v))
}
func (s *MmioSerial) StoreByte(addr uint32, v uint8) {
if s.w == nil {
return
}
b := []uint8{v}
s.w.Write(b)
}
type Cpu struct {
initialAddr uint32
registers [32]uint32
pc uint32
memory Memory
halt bool
cycles uint64
ticks uint64
instret uint64
mtvec uint32
mcause uint32
mepc uint32
mtval uint32
mscratch uint32
haltValue uint32
}
func New(memory Memory, initialAddr uint32) *Cpu {
cpu := &Cpu{}
cpu.initialAddr = initialAddr
cpu.memory = memory
cpu.Reset()
return cpu
}
func (cpu *Cpu) LoadWord(addr uint32) uint32 {
return cpu.memory.LoadWord(addr)
}
func (cpu *Cpu) LoadHalfWord(addr uint32) uint16 {
return cpu.memory.LoadHalfWord(addr)
}
func (cpu *Cpu) LoadByte(addr uint32) uint8 {
return cpu.memory.LoadByte(addr)
}
func (cpu *Cpu) StoreWord(addr uint32, v uint32) {
cpu.memory.StoreWord(addr, v)
}
func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) {
cpu.memory.StoreHalfWord(addr, v)
}
func (cpu *Cpu) StoreByte(addr uint32, v uint8) {
cpu.memory.StoreByte(addr, v)
}
func (cpu *Cpu) IsValidCsr(csr uint32) bool {
if csr == CsrHalt {
return true
}
priv := csr & ^uint32(0xcff) // save priv
csr &= 0xcff // ignore priv
switch csr {
case CsrCycle,
CsrCycleh,
CsrTime,
CsrTimeh,
CsrInstret,
CsrInstreth:
return true
}
if priv != CsrM {
return false
}
switch csr {
case CsrTvec,
CsrTval,
CsrCause,
CsrEpc,
CsrScratch:
return true
}
return false
}
func (cpu *Cpu) GetCsr(csr uint32) uint32 {
if csr == CsrHalt {
return cpu.haltValue
}
priv := csr & ^uint32(0xcff) // save priv
csr &= 0xcff // ignore priv
switch csr {
case CsrCycle:
return uint32(cpu.cycles)
case CsrCycleh:
return uint32(cpu.cycles >> 32)
case CsrTime:
return uint32(cpu.ticks)
case CsrTimeh:
return uint32(cpu.ticks >> 32)
case CsrInstret:
return uint32(cpu.instret)
case CsrInstreth:
return uint32(cpu.instret >> 32)
}
// we only have machine mode csrs for everything else
if priv != CsrM {
panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr))
}
switch csr {
case CsrTvec:
return cpu.mtvec & 0xfffffffc
case CsrTval:
return cpu.mtval
case CsrCause:
return cpu.mcause
case CsrEpc:
return cpu.mepc & 0xfffffffe
case CsrScratch:
return cpu.mscratch
default:
fmt.Printf("invalid csr: 0x%03x\n", csr)
}
return 0
}
func (cpu *Cpu) SetCsr(csr uint32, v uint32) {
if csr == CsrHalt {
cpu.halt = true
cpu.haltValue = v
return
}
priv := csr & ^uint32(0xcff) // save priv
if priv != CsrM {
panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr))
}
csr &= 0xcff // ignore priv
switch csr {
case CsrTvec:
cpu.mtvec = v & 0xfffffffc
case CsrCause:
cpu.mcause = v
case CsrTval:
cpu.mtval = v
case CsrScratch:
cpu.mscratch = v
case CsrEpc:
cpu.mepc = v & 0xfffffffe
}
// do nothing
}
func (cpu *Cpu) Reset() {
for i, _ := range cpu.registers {
cpu.registers[i] = 0
}
cpu.pc = cpu.initialAddr
cpu.halt = false
cpu.cycles = 0
cpu.ticks = 0
cpu.instret = 0
cpu.mtvec = 0
cpu.mcause = 0
cpu.mepc = 0
cpu.mtval = 0
cpu.mscratch = 0
}
func (cpu *Cpu) GetReg(idx uint8) uint32 {
if idx == 0 {
return 0
} else if idx > 0 && idx < 32 {
return cpu.registers[idx]
}
panic(fmt.Sprint("invalid register ", idx))
}
func (cpu *Cpu) SetReg(idx uint8, v uint32) {
if idx == 0 {
// do nothing
} else if idx > 0 && idx < 32 {
cpu.registers[idx] = v
} else {
panic(fmt.Sprint("invalid register ", idx))
}
}
func (cpu *Cpu) Execute() {
for !cpu.halt {
cpu.Step()
}
}
func (cpu *Cpu) Halt() {
cpu.halt = true
}
func (cpu *Cpu) Debug() string {
res := ""
for i := uint8(1); i < 32; i++ {
res += fmt.Sprintf("%s: 0x%08x ", _RegNames[i], cpu.GetReg(i))
}
res += fmt.Sprintf("pc: 0x%08x ", cpu.pc)
return res
}
func (cpu *Cpu) fetch() uint32 {
inst := cpu.LoadWord(cpu.pc)
cpu.pc += 4
return inst
}
func (cpu *Cpu) decode(inst uint32) {
// we are only allowed to trap in the decode phase
// this makes it so the trap function is only visible here
trap := func(cause uint32, value uint32) {
cpu.SetCsr(CsrTval|CsrM, value)
cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4)
cpu.pc = cpu.GetCsr(CsrTvec | CsrM)
cpu.SetCsr(CsrCause|CsrM, cause)
cpu.cycles += 1
cpu.ticks += 1
}
opcode := inst & 0x7f
decode:
switch opcode {
case OP_IMM:
_, rd, funct, rs1, imm := itype(inst)
rs1v := cpu.GetReg(rs1)
var res uint32
switch funct {
case FUNCT_ADDI:
res = rs1v + imm
case FUNCT_SLTI:
if int32(rs1v) < int32(imm) {
res = 1
} else {
res = 0
}
case FUNCT_SLTUI:
if rs1v < imm {
res = 1
} else {
res = 0
}
case FUNCT_XORI:
res = rs1v ^ imm
case FUNCT_ANDI:
res = rs1v & imm
case FUNCT_ORI:
res = rs1v | imm
case FUNCT_SLLI:
res = rs1v << imm
case FUNCT_SRXI:
if imm&0x400 != 0 {
// golang does arithmatic shift for ints
res = uint32(int32(rs1v) >> (imm & 0x1f))
} else {
res = rs1v >> (imm & 0x1f)
}
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
cpu.SetReg(rd, res)
case OP_LUI:
_, rd, imm := utype(inst)
cpu.SetReg(rd, imm<<12)
case OP_AUIPC:
_, rd, imm := utype(inst)
cpu.SetReg(rd, cpu.pc+(imm<<12)-4)
case OP:
_, rd, funct3, rs1, rs2, funct7 := rtype(inst)
rs1v := cpu.GetReg(rs1)
rs2v := cpu.GetReg(rs2)
var res uint32
switch funct3 {
case FUNCT_ADD_SUB:
if funct7&0x20 == 0 | {
res = rs1v + rs2v
} | conditional_block |
|
main.go | 8x ", _RegNames[i], cpu.GetReg(i))
}
res += fmt.Sprintf("pc: 0x%08x ", cpu.pc)
return res
}
func (cpu *Cpu) fetch() uint32 {
inst := cpu.LoadWord(cpu.pc)
cpu.pc += 4
return inst
}
func (cpu *Cpu) decode(inst uint32) {
// we are only allowed to trap in the decode phase
// this makes it so the trap function is only visible here
trap := func(cause uint32, value uint32) {
cpu.SetCsr(CsrTval|CsrM, value)
cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4)
cpu.pc = cpu.GetCsr(CsrTvec | CsrM)
cpu.SetCsr(CsrCause|CsrM, cause)
cpu.cycles += 1
cpu.ticks += 1
}
opcode := inst & 0x7f
decode:
switch opcode {
case OP_IMM:
_, rd, funct, rs1, imm := itype(inst)
rs1v := cpu.GetReg(rs1)
var res uint32
switch funct {
case FUNCT_ADDI:
res = rs1v + imm
case FUNCT_SLTI:
if int32(rs1v) < int32(imm) {
res = 1
} else {
res = 0
}
case FUNCT_SLTUI:
if rs1v < imm {
res = 1
} else {
res = 0
}
case FUNCT_XORI:
res = rs1v ^ imm
case FUNCT_ANDI:
res = rs1v & imm
case FUNCT_ORI:
res = rs1v | imm
case FUNCT_SLLI:
res = rs1v << imm
case FUNCT_SRXI:
if imm&0x400 != 0 {
// golang does arithmatic shift for ints
res = uint32(int32(rs1v) >> (imm & 0x1f))
} else {
res = rs1v >> (imm & 0x1f)
}
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
cpu.SetReg(rd, res)
case OP_LUI:
_, rd, imm := utype(inst)
cpu.SetReg(rd, imm<<12)
case OP_AUIPC:
_, rd, imm := utype(inst)
cpu.SetReg(rd, cpu.pc+(imm<<12)-4)
case OP:
_, rd, funct3, rs1, rs2, funct7 := rtype(inst)
rs1v := cpu.GetReg(rs1)
rs2v := cpu.GetReg(rs2)
var res uint32
switch funct3 {
case FUNCT_ADD_SUB:
if funct7&0x20 == 0 {
res = rs1v + rs2v
} else {
res = rs1v - rs2v
}
case FUNCT_SLT:
if int32(rs1v) < int32(rs2v) {
res = 1
} else {
res = 0
}
case FUNCT_SLTU:
if rs1v < rs2v {
res = 1
} else {
res = 0
}
case FUNCT_AND:
res = rs1v & rs2v
case FUNCT_OR:
res = rs1v | rs2v
case FUNCT_XOR:
res = rs1v ^ rs2v
case FUNCT_SLL:
res = rs1v << (rs2v & 0x1f)
case FUNCT_SRX:
if funct7&0x20 == 0 {
res = rs1v >> (rs2v & 0x1f)
} else {
res = uint32(int32(rs1v) >> (rs2v & 0x1f))
}
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
cpu.SetReg(rd, res)
case OP_JAL:
_, rd, imm := jtype(inst)
cpu.SetReg(rd, cpu.pc)
cpu.pc += imm - 4
case OP_JALR:
_, rd, _, rs1, imm := itype(inst)
rs1v := cpu.GetReg(rs1)
cpu.SetReg(rd, cpu.pc)
cpu.pc = (rs1v + imm) & 0xfffffffe
case OP_BRANCH:
_, funct3, rs1, rs2, imm := btype(inst)
rs1v := cpu.GetReg(rs1)
rs2v := cpu.GetReg(rs2)
var shouldBranch bool
switch funct3 {
case FUNCT_BEQ:
shouldBranch = rs1v == rs2v
case FUNCT_BNE:
shouldBranch = rs1v != rs2v
case FUNCT_BLT:
shouldBranch = int32(rs1v) < int32(rs2v)
case FUNCT_BLTU:
shouldBranch = rs1v < rs2v
case FUNCT_BGE:
shouldBranch = int32(rs1v) >= int32(rs2v)
case FUNCT_BGEU:
shouldBranch = rs1v >= rs2v
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
if shouldBranch {
cpu.pc += imm - 4
}
case OP_LOAD:
_, dest, width, base, imm := itype(inst)
addr := cpu.GetReg(base) + imm
var res uint32
switch width {
case 0: // LB
res = signExtend(uint32(cpu.LoadByte(addr)), 8)
case 1: // LH
res = signExtend(uint32(cpu.LoadHalfWord(addr)), 16)
case 2: // LW
res = cpu.LoadWord(addr)
case 4: // LBU
res = uint32(cpu.LoadByte(addr))
case 5: // LHU
res = uint32(cpu.LoadHalfWord(addr))
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
cpu.SetReg(dest, res)
case OP_STORE:
_, funct, rs1, rs2, imm := stype(inst)
addr := cpu.GetReg(rs1) + imm
rs2v := cpu.GetReg(rs2)
switch funct {
case 0: // SB
cpu.StoreByte(addr, uint8(rs2v))
case 1: // SH
cpu.StoreHalfWord(addr, uint16(rs2v))
case 2: // LW
cpu.StoreWord(addr, rs2v)
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
case OP_SYSTEM:
_, rd, funct3, rs1, imm := itype(inst)
switch funct3 {
case FUNCT_CSRRW, FUNCT_CSRRS, FUNCT_CSRRC:
csr := imm & 0xfff
if !cpu.IsValidCsr(csr) {
trap(ExceptionIllegalInstruction, inst)
break decode
}
// check if we are trying to write to an RO csr
isReadOnly := csr > 0xc00
if isReadOnly && rs1 != 0 {
trap(ExceptionIllegalInstruction, inst)
break decode
}
csrv := cpu.GetCsr(csr)
rs1v := cpu.GetReg(rs1)
cpu.SetReg(rd, csrv)
if rs1 != 0 {
switch funct3 {
case FUNCT_CSRRW:
csrv = rs1v
case FUNCT_CSRRS:
csrv = csrv & rs1v
case FUNCT_CSRRC:
csrv = csrv & (^rs1v)
}
cpu.SetCsr(csr, csrv)
}
case FUNCT_PRIV:
switch imm {
case PRIV_ECALL:
trap(ExceptionEcallM, cpu.pc-4)
break decode
case PRIV_EBREAK:
trap(ExceptionBreakpoint, cpu.pc-4)
break decode
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
default:
trap(ExceptionIllegalInstruction, inst)
break decode
}
default:
trap(ExceptionIllegalInstruction, inst)
}
cpu.cycles += 1
cpu.ticks += 1
cpu.instret += 1
}
func (cpu *Cpu) Step() {
if cpu.halt {
return
}
inst := cpu.fetch()
cpu.decode(inst)
}
func bitrange(inst uint32, fromBit, len uint) uint32 {
return (inst >> fromBit) & ((1 << len) - 1)
}
func signExtend(n uint32, bit uint) uint32 {
if n&(1<<bit) != 0 {
n |= ^((1 << bit) - 1)
}
return n
}
func | btype | identifier_name |
|
main.go | binary.LittleEndian.Uint16(mem.memory[addr : addr+2])
}
func (mem *Ram) LoadByte(addr uint32) uint8 {
return mem.memory[addr]
}
func (mem *Ram) StoreWord(addr uint32, v uint32) {
binary.LittleEndian.PutUint32(mem.memory[addr:addr+4], v)
}
func (mem *Ram) StoreHalfWord(addr uint32, v uint16) {
binary.LittleEndian.PutUint16(mem.memory[addr:addr+2], v)
}
func (mem *Ram) StoreByte(addr uint32, v uint8) {
mem.memory[addr] = v
}
type Range struct {
Addr, Size uint32
Memory Memory
}
type Mmu struct {
ranges []Range
}
func NewMmu() *Mmu {
return &Mmu{}
}
func (mmu *Mmu) AddRange(addr, size uint32, mem Memory) {
//@todo: sanity checks
mmu.ranges = append(mmu.ranges, Range{addr, size, mem})
}
func (mmu *Mmu) findRange(addr uint32) (*Range, uint32) {
for _, r := range mmu.ranges {
if addr >= r.Addr && addr < (r.Addr+r.Size) {
return &r, addr - r.Addr
}
}
return nil, 0
}
func (mmu *Mmu) LoadWord(addr uint32) uint32 {
r, addr := mmu.findRange(addr)
if r != nil {
return r.Memory.LoadWord(addr)
}
return 0
}
func (mmu *Mmu) LoadHalfWord(addr uint32) uint16 {
r, addr := mmu.findRange(addr)
if r != nil {
return r.Memory.LoadHalfWord(addr)
}
return 0
}
func (mmu *Mmu) LoadByte(addr uint32) uint8 {
r, addr := mmu.findRange(addr)
if r != nil {
return r.Memory.LoadByte(addr)
}
return 0
}
func (mmu *Mmu) StoreWord(addr uint32, v uint32) {
r, addr := mmu.findRange(addr)
if r != nil {
r.Memory.StoreWord(addr, v)
}
}
func (mmu *Mmu) StoreHalfWord(addr uint32, v uint16) {
r, addr := mmu.findRange(addr)
if r != nil {
r.Memory.StoreHalfWord(addr, v)
}
}
func (mmu *Mmu) StoreByte(addr uint32, v uint8) {
r, addr := mmu.findRange(addr)
if r != nil {
r.Memory.StoreByte(addr, v)
}
}
type MmioSerial struct {
w io.Writer
r io.Reader
}
func (s *MmioSerial) LoadWord(addr uint32) uint32 {
return uint32(s.LoadByte(addr))
}
func (s *MmioSerial) LoadHalfWord(addr uint32) uint16 {
return uint16(s.LoadByte(addr))
}
func (s *MmioSerial) LoadByte(addr uint32) uint8 {
if s.r == nil {
return 0
}
var b [1]uint8
s.r.Read(b[:])
return b[0]
}
func (s *MmioSerial) StoreWord(addr uint32, v uint32) {
s.StoreByte(addr, uint8(v))
}
func (s *MmioSerial) StoreHalfWord(addr uint32, v uint16) {
s.StoreByte(addr, uint8(v))
}
func (s *MmioSerial) StoreByte(addr uint32, v uint8) {
if s.w == nil {
return
}
b := []uint8{v}
s.w.Write(b)
}
type Cpu struct {
initialAddr uint32
registers [32]uint32
pc uint32
memory Memory
halt bool
cycles uint64
ticks uint64
instret uint64
mtvec uint32
mcause uint32
mepc uint32
mtval uint32
mscratch uint32
haltValue uint32
}
func New(memory Memory, initialAddr uint32) *Cpu {
cpu := &Cpu{}
cpu.initialAddr = initialAddr
cpu.memory = memory
cpu.Reset()
return cpu
}
func (cpu *Cpu) LoadWord(addr uint32) uint32 {
return cpu.memory.LoadWord(addr)
}
func (cpu *Cpu) LoadHalfWord(addr uint32) uint16 {
return cpu.memory.LoadHalfWord(addr)
}
func (cpu *Cpu) LoadByte(addr uint32) uint8 {
return cpu.memory.LoadByte(addr)
}
func (cpu *Cpu) StoreWord(addr uint32, v uint32) {
cpu.memory.StoreWord(addr, v)
}
func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) {
cpu.memory.StoreHalfWord(addr, v)
}
func (cpu *Cpu) StoreByte(addr uint32, v uint8) {
cpu.memory.StoreByte(addr, v)
}
func (cpu *Cpu) IsValidCsr(csr uint32) bool {
if csr == CsrHalt {
return true
}
priv := csr & ^uint32(0xcff) // save priv
csr &= 0xcff // ignore priv
switch csr {
case CsrCycle,
CsrCycleh,
CsrTime,
CsrTimeh,
CsrInstret,
CsrInstreth:
return true
}
if priv != CsrM {
return false
}
switch csr {
case CsrTvec,
CsrTval,
CsrCause,
CsrEpc,
CsrScratch:
return true
}
return false
}
func (cpu *Cpu) GetCsr(csr uint32) uint32 |
// we only have machine mode csrs for everything else
if priv != CsrM {
panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr))
}
switch csr {
case CsrTvec:
return cpu.mtvec & 0xfffffffc
case CsrTval:
return cpu.mtval
case CsrCause:
return cpu.mcause
case CsrEpc:
return cpu.mepc & 0xfffffffe
case CsrScratch:
return cpu.mscratch
default:
fmt.Printf("invalid csr: 0x%03x\n", csr)
}
return 0
}
func (cpu *Cpu) SetCsr(csr uint32, v uint32) {
if csr == CsrHalt {
cpu.halt = true
cpu.haltValue = v
return
}
priv := csr & ^uint32(0xcff) // save priv
if priv != CsrM {
panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr))
}
csr &= 0xcff // ignore priv
switch csr {
case CsrTvec:
cpu.mtvec = v & 0xfffffffc
case CsrCause:
cpu.mcause = v
case CsrTval:
cpu.mtval = v
case CsrScratch:
cpu.mscratch = v
case CsrEpc:
cpu.mepc = v & 0xfffffffe
}
// do nothing
}
func (cpu *Cpu) Reset() {
for i, _ := range cpu.registers {
cpu.registers[i] = 0
}
cpu.pc = cpu.initialAddr
cpu.halt = false
cpu.cycles = 0
cpu.ticks = 0
cpu.instret = 0
cpu.mtvec = 0
cpu.mcause = 0
cpu.mepc = 0
cpu.mtval = 0
cpu.mscratch = 0
}
func (cpu *Cpu) GetReg(idx uint8) uint32 {
if idx == 0 {
return 0
} else if idx > 0 && idx < 32 {
return cpu.registers[idx]
}
panic(fmt.Sprint("invalid register ", idx))
}
func (cpu *Cpu) SetReg(idx uint8, v uint32) {
if idx == 0 {
// do nothing
} else if idx > 0 && idx < 32 {
cpu.registers[idx] | {
if csr == CsrHalt {
return cpu.haltValue
}
priv := csr & ^uint32(0xcff) // save priv
csr &= 0xcff // ignore priv
switch csr {
case CsrCycle:
return uint32(cpu.cycles)
case CsrCycleh:
return uint32(cpu.cycles >> 32)
case CsrTime:
return uint32(cpu.ticks)
case CsrTimeh:
return uint32(cpu.ticks >> 32)
case CsrInstret:
return uint32(cpu.instret)
case CsrInstreth:
return uint32(cpu.instret >> 32)
} | identifier_body |
storer.go | store.NewTxChunkStore(txStore, sharky)
if err := txChunkStore.Recover(); err != nil {
return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err)
}
return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil
}
func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
txnRepo, commit, rollback := repo.NewTx(ctx)
c, err := cache.New(ctx, txnRepo, capacity)
if err != nil {
return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback()))
}
return c, commit()
}
type noopRadiusSetter struct{}
func (noopRadiusSetter) SetStorageRadius(uint8) {}
func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) {
store, err := initStore(basePath, opts)
if err != nil {
return err
}
defer store.Close()
sharkyBasePath := path.Join(basePath, sharkyPath)
var sharkyRecover *sharky.Recovery
// if this is a fresh node then perform an empty epoch migration
if _, err := os.Stat(sharkyBasePath); err == nil {
sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize)
if err != nil {
return err
}
defer sharkyRecover.Close()
}
logger := opts.Logger.WithName("epochmigration").Register()
var rs reservePutter
if opts.ReserveCapacity > 0 {
rs, err = reserve.New(
opts.Address,
store,
opts.ReserveCapacity,
noopRadiusSetter{},
logger,
func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error {
return nil
},
)
if err != nil {
return err
}
}
defer func() {
if sharkyRecover != nil {
retErr = errors.Join(retErr, sharkyRecover.Save())
}
}()
return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger)
}
const lockKeyNewSession string = "new_session"
// Options provides a container to configure different things in the storer.
type Options struct {
// These are options related to levelDB. Currently, the underlying storage used is levelDB.
LdbStats atomic.Pointer[prometheus.HistogramVec]
LdbOpenFilesLimit uint64
LdbBlockCacheCapacity uint64
LdbWriteBufferSize uint64
LdbDisableSeeksCompaction bool
CacheCapacity uint64
Logger log.Logger
Address swarm.Address
WarmupDuration time.Duration
Batchstore postage.Storer
ValidStamp postage.ValidStampFn
RadiusSetter topology.SetStorageRadiuser
StateStore storage.StateStorer
ReserveCapacity int
ReserveWakeUpDuration time.Duration
}
func defaultOptions() *Options {
return &Options{
LdbOpenFilesLimit: defaultOpenFilesLimit,
LdbBlockCacheCapacity: defaultBlockCacheCapacity,
LdbWriteBufferSize: defaultWriteBufferSize,
LdbDisableSeeksCompaction: defaultDisableSeeksCompaction,
CacheCapacity: defaultCacheCapacity,
Logger: log.Noop,
ReserveCapacity: 4_194_304, // 2^22 chunks
ReserveWakeUpDuration: time.Minute * 15,
}
}
// DB implements all the component stores described above.
type DB struct {
logger log.Logger
metrics metrics
repo storage.Repository
lock *multex.Multex
cacheObj *cache.Cache
retrieval retrieval.Interface
pusherFeed chan *pusher.Op
quit chan struct{}
bgCacheLimiter chan struct{}
bgCacheLimiterWg sync.WaitGroup
dbCloser io.Closer
subscriptionsWG sync.WaitGroup
events *events.Subscriber
directUploadLimiter chan struct{}
reserve *reserve.Reserve
inFlight *util.WaitingCounter
reserveBinEvents *events.Subscriber
baseAddr swarm.Address
batchstore postage.Storer
validStamp postage.ValidStampFn
setSyncerOnce sync.Once
syncer Syncer
opts workerOpts
}
type workerOpts struct {
warmupDuration time.Duration
wakeupDuration time.Duration
}
// New returns a newly constructed DB object which implements all the above
// component stores.
func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
var (
repo storage.Repository
err error
dbCloser io.Closer
)
if opts == nil {
opts = defaultOptions()
}
if opts.Logger == nil {
opts.Logger = log.Noop
}
lock := multex.New()
metrics := newMetrics()
opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats)
locker := func(addr swarm.Address) func() {
lock.Lock(addr.ByteString())
return func() {
lock.Unlock(addr.ByteString())
}
}
if dirPath == "" {
repo, dbCloser, err = initInmemRepository(locker)
if err != nil {
return nil, err
}
} else {
// only perform migration if not done already
if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil {
err = performEpochMigration(ctx, dirPath, opts)
if err != nil {
return nil, err
}
}
repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts)
if err != nil {
return nil, err
}
}
sharkyBasePath := ""
if dirPath != "" {
sharkyBasePath = path.Join(dirPath, sharkyPath)
}
err = migration.Migrate(
repo.IndexStore(),
localmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()),
)
if err != nil {
return nil, err
}
cacheObj, err := initCache(ctx, opts.CacheCapacity, repo)
if err != nil {
return nil, err
}
logger := opts.Logger.WithName(loggerName).Register()
db := &DB{
metrics: metrics,
logger: logger,
baseAddr: opts.Address,
repo: repo,
lock: lock,
cacheObj: cacheObj,
retrieval: noopRetrieval{},
pusherFeed: make(chan *pusher.Op),
quit: make(chan struct{}),
bgCacheLimiter: make(chan struct{}, 16),
dbCloser: dbCloser,
batchstore: opts.Batchstore,
validStamp: opts.ValidStamp,
events: events.NewSubscriber(),
reserveBinEvents: events.NewSubscriber(),
opts: workerOpts{
warmupDuration: opts.WarmupDuration,
wakeupDuration: opts.ReserveWakeUpDuration,
},
directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes),
inFlight: new(util.WaitingCounter),
}
if db.validStamp == nil {
db.validStamp = postage.ValidStamp(db.batchstore)
}
if opts.ReserveCapacity > 0 {
rs, err := reserve.New(
opts.Address,
repo.IndexStore(),
opts.ReserveCapacity,
opts.RadiusSetter,
logger,
func(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error {
defer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }()
db.lock.Lock(cacheAccessLockKey)
defer db.lock.Unlock(cacheAccessLockKey)
return cacheObj.MoveFromReserve(ctx, store, addrs...)
},
)
if err != nil {
return nil, err
}
db.reserve = rs
db.metrics.StorageRadius.Set(float64(rs.Radius()))
db.metrics.ReserveSize.Set(float64(rs.Size()))
}
db.metrics.CacheSize.Set(float64(db.cacheObj.Size()))
// Cleanup any dirty state in upload and pinning stores, this could happen
// in case of dirty shutdowns
err = errors.Join(
upload.CleanupDirty(db),
pinstore.CleanupDirty(db),
)
if err != nil {
return nil, err
}
return db, nil
}
// Metrics returns set of prometheus collectors.
func (db *DB) Metrics() []prometheus.Collector {
collectors := m.PrometheusCollectorsFromFields(db.metrics)
if v, ok := db.repo.(m.Collector); ok {
collectors = append(collectors, v.Metrics()...)
}
return collectors
}
func (db *DB) Close() error {
close(db.quit)
bgReserveWorkersClosed := make(chan struct{})
go func() {
defer close(bgReserveWorkersClosed)
if c := db.inFlight.Wait(5 * time.Second); c > 0 | {
db.logger.Warning("db shutting down with running goroutines")
} | conditional_block |
|
storer.go | indexes which
// will keep track of the chunk in the cache. | Cache() storage.Putter
}
// NetStore is a logical component of the storer that deals with network. It will
// push/retrieve chunks from the network.
type NetStore interface {
// DirectUpload provides a session which can be used to push chunks directly
// to the network.
DirectUpload() PutterSession
// Download provides a getter which can be used to download data. If the data
// is found locally, its returned immediately, otherwise it is retrieved from
// the network.
Download(pin bool) storage.Getter
// PusherFeed is the feed for direct push chunks. This can be used by the
// pusher component to push out the chunks.
PusherFeed() <-chan *pusher.Op
}
var _ Reserve = (*DB)(nil)
// Reserve is a logical component of the storer that deals with reserve
// content. It will implement all the core functionality required for the protocols.
type Reserve interface {
ReserveStore
EvictBatch(ctx context.Context, batchID []byte) error
ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error)
ReserveSize() int
}
// ReserveIterator is a helper interface which can be used to iterate over all
// the chunks in the reserve.
type ReserveIterator interface {
ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error
}
// ReserveStore is a logical component of the storer that deals with reserve
// content. It will implement all the core functionality required for the protocols.
type ReserveStore interface {
ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error)
ReserveHas(addr swarm.Address, batchID []byte) (bool, error)
ReservePutter() storage.Putter
SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
ReserveLastBinIDs() ([]uint64, error)
RadiusChecker
}
// RadiusChecker provides the radius related functionality.
type RadiusChecker interface {
IsWithinStorageRadius(addr swarm.Address) bool
StorageRadius() uint8
}
// LocalStore is a read-only ChunkStore. It can be used to check if chunk is known
// locally, but it cannot tell what is the context of the chunk (whether it is
// pinned, uploaded, etc.).
type LocalStore interface {
ChunkStore() storage.ReadOnlyChunkStore
}
// Debugger is a helper interface which can be used to debug the storer.
type Debugger interface {
DebugInfo(context.Context) (Info, error)
}
type memFS struct {
afero.Fs
}
func (m *memFS) Open(path string) (fs.File, error) {
return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
}
type dirFS struct {
basedir string
}
func (d *dirFS) Open(path string) (fs.File, error) {
return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644)
}
var sharkyNoOfShards = 32
var ErrDBQuit = errors.New("db quit")
type closerFn func() error
func (c closerFn) Close() error { return c() }
func closer(closers ...io.Closer) io.Closer {
return closerFn(func() error {
var err error
for _, closer := range closers {
err = errors.Join(err, closer.Close())
}
return err
})
}
func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) {
store, err := leveldbstore.New("", nil)
if err != nil {
return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err)
}
sharky, err := sharky.New(
&memFS{Fs: afero.NewMemMapFs()},
sharkyNoOfShards,
swarm.SocMaxChunkSize,
)
if err != nil {
return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err)
}
txStore := leveldbstore.NewTxStore(store)
txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky)
return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil
}
// loggerName is the tree path name of the logger for this package.
const loggerName = "storer"
// Default options for levelDB.
const (
defaultOpenFilesLimit = uint64(256)
defaultBlockCacheCapacity = uint64(32 * 1024 * 1024)
defaultWriteBufferSize = uint64(32 * 1024 * 1024)
defaultDisableSeeksCompaction = false
defaultCacheCapacity = uint64(1_000_000)
defaultBgCacheWorkers = 16
indexPath = "indexstore"
sharkyPath = "sharky"
)
func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) {
ldbBasePath := path.Join(basePath, indexPath)
if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) {
err := os.MkdirAll(ldbBasePath, 0777)
if err != nil {
return nil, err
}
}
store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{
OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit),
BlockCacheCapacity: int(opts.LdbBlockCacheCapacity),
WriteBuffer: int(opts.LdbWriteBufferSize),
DisableSeeksCompaction: opts.LdbDisableSeeksCompaction,
CompactionL0Trigger: 8,
Filter: filter.NewBloomFilter(64),
})
if err != nil {
return nil, fmt.Errorf("failed creating levelDB index store: %w", err)
}
return store, nil
}
func initDiskRepository(
ctx context.Context,
basePath string,
locker storage.ChunkLocker,
opts *Options,
) (storage.Repository, io.Closer, error) {
store, err := initStore(basePath, opts)
if err != nil {
return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err)
}
if opts.LdbStats.Load() != nil {
go func() {
ldbStats := opts.LdbStats.Load()
logger := log.NewLogger(loggerName).Register()
ticker := time.NewTicker(15 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
stats := new(leveldb.DBStats)
switch err := store.DB().Stats(stats); {
case errors.Is(err, leveldb.ErrClosed):
return
case err != nil:
logger.Error(err, "snapshot levelDB stats")
default:
ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount))
ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds())
ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots))
ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators))
ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite))
ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead))
ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize))
ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount))
ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp))
ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp))
ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp))
ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp))
for i := 0; i < len(stats.LevelSizes); i++ {
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds())
}
}
}
}
}()
}
sharkyBasePath := path.Join(basePath, sharkyPath)
if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) {
err := os.Mkdir(sharky | random_line_split |
|
storer.go | , error)
ReserveHas(addr swarm.Address, batchID []byte) (bool, error)
ReservePutter() storage.Putter
SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
ReserveLastBinIDs() ([]uint64, error)
RadiusChecker
}
// RadiusChecker provides the radius related functionality.
type RadiusChecker interface {
IsWithinStorageRadius(addr swarm.Address) bool
StorageRadius() uint8
}
// LocalStore is a read-only ChunkStore. It can be used to check if chunk is known
// locally, but it cannot tell what is the context of the chunk (whether it is
// pinned, uploaded, etc.).
type LocalStore interface {
ChunkStore() storage.ReadOnlyChunkStore
}
// Debugger is a helper interface which can be used to debug the storer.
type Debugger interface {
DebugInfo(context.Context) (Info, error)
}
type memFS struct {
afero.Fs
}
func (m *memFS) Open(path string) (fs.File, error) {
return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
}
type dirFS struct {
basedir string
}
func (d *dirFS) Open(path string) (fs.File, error) {
return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644)
}
var sharkyNoOfShards = 32
var ErrDBQuit = errors.New("db quit")
type closerFn func() error
func (c closerFn) Close() error { return c() }
func closer(closers ...io.Closer) io.Closer {
return closerFn(func() error {
var err error
for _, closer := range closers {
err = errors.Join(err, closer.Close())
}
return err
})
}
func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) {
store, err := leveldbstore.New("", nil)
if err != nil {
return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err)
}
sharky, err := sharky.New(
&memFS{Fs: afero.NewMemMapFs()},
sharkyNoOfShards,
swarm.SocMaxChunkSize,
)
if err != nil {
return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err)
}
txStore := leveldbstore.NewTxStore(store)
txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky)
return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil
}
// loggerName is the tree path name of the logger for this package.
const loggerName = "storer"
// Default options for levelDB.
const (
defaultOpenFilesLimit = uint64(256)
defaultBlockCacheCapacity = uint64(32 * 1024 * 1024)
defaultWriteBufferSize = uint64(32 * 1024 * 1024)
defaultDisableSeeksCompaction = false
defaultCacheCapacity = uint64(1_000_000)
defaultBgCacheWorkers = 16
indexPath = "indexstore"
sharkyPath = "sharky"
)
func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) {
ldbBasePath := path.Join(basePath, indexPath)
if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) {
err := os.MkdirAll(ldbBasePath, 0777)
if err != nil {
return nil, err
}
}
store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{
OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit),
BlockCacheCapacity: int(opts.LdbBlockCacheCapacity),
WriteBuffer: int(opts.LdbWriteBufferSize),
DisableSeeksCompaction: opts.LdbDisableSeeksCompaction,
CompactionL0Trigger: 8,
Filter: filter.NewBloomFilter(64),
})
if err != nil {
return nil, fmt.Errorf("failed creating levelDB index store: %w", err)
}
return store, nil
}
func initDiskRepository(
ctx context.Context,
basePath string,
locker storage.ChunkLocker,
opts *Options,
) (storage.Repository, io.Closer, error) {
store, err := initStore(basePath, opts)
if err != nil {
return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err)
}
if opts.LdbStats.Load() != nil {
go func() {
ldbStats := opts.LdbStats.Load()
logger := log.NewLogger(loggerName).Register()
ticker := time.NewTicker(15 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
stats := new(leveldb.DBStats)
switch err := store.DB().Stats(stats); {
case errors.Is(err, leveldb.ErrClosed):
return
case err != nil:
logger.Error(err, "snapshot levelDB stats")
default:
ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount))
ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds())
ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots))
ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators))
ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite))
ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead))
ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize))
ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount))
ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp))
ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp))
ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp))
ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp))
for i := 0; i < len(stats.LevelSizes); i++ {
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i]))
ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds())
}
}
}
}
}()
}
sharkyBasePath := path.Join(basePath, sharkyPath)
if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) {
err := os.Mkdir(sharkyBasePath, 0777)
if err != nil {
return nil, nil, err
}
}
recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts)
if err != nil {
return nil, nil, fmt.Errorf("failed to recover sharky: %w", err)
}
sharky, err := sharky.New(
&dirFS{basedir: sharkyBasePath},
sharkyNoOfShards,
swarm.SocMaxChunkSize,
)
if err != nil {
return nil, nil, fmt.Errorf("failed creating sharky instance: %w", err)
}
txStore := leveldbstore.NewTxStore(store)
if err := txStore.Recover(); err != nil {
return nil, nil, fmt.Errorf("failed to recover index store: %w", err)
}
txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky)
if err := txChunkStore.Recover(); err != nil {
return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err)
}
return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil
}
func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
txnRepo, commit, rollback := repo.NewTx(ctx)
c, err := cache.New(ctx, txnRepo, capacity)
if err != nil {
return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback()))
}
return c, commit()
}
type noopRadiusSetter struct{}
func (noopRadiusSetter) | SetStorageRadius | identifier_name |
|
storer.go | Store, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil
}
func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
txnRepo, commit, rollback := repo.NewTx(ctx)
c, err := cache.New(ctx, txnRepo, capacity)
if err != nil {
return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback()))
}
return c, commit()
}
type noopRadiusSetter struct{}
func (noopRadiusSetter) SetStorageRadius(uint8) {}
func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) {
store, err := initStore(basePath, opts)
if err != nil {
return err
}
defer store.Close()
sharkyBasePath := path.Join(basePath, sharkyPath)
var sharkyRecover *sharky.Recovery
// if this is a fresh node then perform an empty epoch migration
if _, err := os.Stat(sharkyBasePath); err == nil {
sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize)
if err != nil {
return err
}
defer sharkyRecover.Close()
}
logger := opts.Logger.WithName("epochmigration").Register()
var rs reservePutter
if opts.ReserveCapacity > 0 {
rs, err = reserve.New(
opts.Address,
store,
opts.ReserveCapacity,
noopRadiusSetter{},
logger,
func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error {
return nil
},
)
if err != nil {
return err
}
}
defer func() {
if sharkyRecover != nil {
retErr = errors.Join(retErr, sharkyRecover.Save())
}
}()
return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger)
}
const lockKeyNewSession string = "new_session"
// Options provides a container to configure different things in the storer.
type Options struct {
// These are options related to levelDB. Currently, the underlying storage used is levelDB.
LdbStats atomic.Pointer[prometheus.HistogramVec]
LdbOpenFilesLimit uint64
LdbBlockCacheCapacity uint64
LdbWriteBufferSize uint64
LdbDisableSeeksCompaction bool
CacheCapacity uint64
Logger log.Logger
Address swarm.Address
WarmupDuration time.Duration
Batchstore postage.Storer
ValidStamp postage.ValidStampFn
RadiusSetter topology.SetStorageRadiuser
StateStore storage.StateStorer
ReserveCapacity int
ReserveWakeUpDuration time.Duration
}
func defaultOptions() *Options {
return &Options{
LdbOpenFilesLimit: defaultOpenFilesLimit,
LdbBlockCacheCapacity: defaultBlockCacheCapacity,
LdbWriteBufferSize: defaultWriteBufferSize,
LdbDisableSeeksCompaction: defaultDisableSeeksCompaction,
CacheCapacity: defaultCacheCapacity,
Logger: log.Noop,
ReserveCapacity: 4_194_304, // 2^22 chunks
ReserveWakeUpDuration: time.Minute * 15,
}
}
// DB implements all the component stores described above.
type DB struct {
logger log.Logger
metrics metrics
repo storage.Repository
lock *multex.Multex
cacheObj *cache.Cache
retrieval retrieval.Interface
pusherFeed chan *pusher.Op
quit chan struct{}
bgCacheLimiter chan struct{}
bgCacheLimiterWg sync.WaitGroup
dbCloser io.Closer
subscriptionsWG sync.WaitGroup
events *events.Subscriber
directUploadLimiter chan struct{}
reserve *reserve.Reserve
inFlight *util.WaitingCounter
reserveBinEvents *events.Subscriber
baseAddr swarm.Address
batchstore postage.Storer
validStamp postage.ValidStampFn
setSyncerOnce sync.Once
syncer Syncer
opts workerOpts
}
type workerOpts struct {
warmupDuration time.Duration
wakeupDuration time.Duration
}
// New returns a newly constructed DB object which implements all the above
// component stores.
func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
var (
repo storage.Repository
err error
dbCloser io.Closer
)
if opts == nil {
opts = defaultOptions()
}
if opts.Logger == nil {
opts.Logger = log.Noop
}
lock := multex.New()
metrics := newMetrics()
opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats)
locker := func(addr swarm.Address) func() {
lock.Lock(addr.ByteString())
return func() {
lock.Unlock(addr.ByteString())
}
}
if dirPath == "" {
repo, dbCloser, err = initInmemRepository(locker)
if err != nil {
return nil, err
}
} else {
// only perform migration if not done already
if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil {
err = performEpochMigration(ctx, dirPath, opts)
if err != nil {
return nil, err
}
}
repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts)
if err != nil {
return nil, err
}
}
sharkyBasePath := ""
if dirPath != "" {
sharkyBasePath = path.Join(dirPath, sharkyPath)
}
err = migration.Migrate(
repo.IndexStore(),
localmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()),
)
if err != nil {
return nil, err
}
cacheObj, err := initCache(ctx, opts.CacheCapacity, repo)
if err != nil {
return nil, err
}
logger := opts.Logger.WithName(loggerName).Register()
db := &DB{
metrics: metrics,
logger: logger,
baseAddr: opts.Address,
repo: repo,
lock: lock,
cacheObj: cacheObj,
retrieval: noopRetrieval{},
pusherFeed: make(chan *pusher.Op),
quit: make(chan struct{}),
bgCacheLimiter: make(chan struct{}, 16),
dbCloser: dbCloser,
batchstore: opts.Batchstore,
validStamp: opts.ValidStamp,
events: events.NewSubscriber(),
reserveBinEvents: events.NewSubscriber(),
opts: workerOpts{
warmupDuration: opts.WarmupDuration,
wakeupDuration: opts.ReserveWakeUpDuration,
},
directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes),
inFlight: new(util.WaitingCounter),
}
if db.validStamp == nil {
db.validStamp = postage.ValidStamp(db.batchstore)
}
if opts.ReserveCapacity > 0 {
rs, err := reserve.New(
opts.Address,
repo.IndexStore(),
opts.ReserveCapacity,
opts.RadiusSetter,
logger,
func(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error {
defer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }()
db.lock.Lock(cacheAccessLockKey)
defer db.lock.Unlock(cacheAccessLockKey)
return cacheObj.MoveFromReserve(ctx, store, addrs...)
},
)
if err != nil {
return nil, err
}
db.reserve = rs
db.metrics.StorageRadius.Set(float64(rs.Radius()))
db.metrics.ReserveSize.Set(float64(rs.Size()))
}
db.metrics.CacheSize.Set(float64(db.cacheObj.Size()))
// Cleanup any dirty state in upload and pinning stores, this could happen
// in case of dirty shutdowns
err = errors.Join(
upload.CleanupDirty(db),
pinstore.CleanupDirty(db),
)
if err != nil {
return nil, err
}
return db, nil
}
// Metrics returns set of prometheus collectors.
func (db *DB) Metrics() []prometheus.Collector {
collectors := m.PrometheusCollectorsFromFields(db.metrics)
if v, ok := db.repo.(m.Collector); ok {
collectors = append(collectors, v.Metrics()...)
}
return collectors
}
func (db *DB) Close() error | {
close(db.quit)
bgReserveWorkersClosed := make(chan struct{})
go func() {
defer close(bgReserveWorkersClosed)
if c := db.inFlight.Wait(5 * time.Second); c > 0 {
db.logger.Warning("db shutting down with running goroutines")
}
}()
bgCacheWorkersClosed := make(chan struct{})
go func() {
defer close(bgCacheWorkersClosed)
db.bgCacheLimiterWg.Wait()
}()
var err error
closerDone := make(chan struct{})
go func() { | identifier_body |
|
userrole.go | func (m *UserRoleMod) AllowedTypes() base.MessageType {
return m.allowedTypes
}
func (m *UserRoleMod) AllowDMs() bool {
return m.allowDMs
}
func (m *UserRoleMod) Hook() error {
m.bot.Discord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) {
refreshTicker := time.NewTicker(time.Hour)
go func() {
for range refreshTicker.C {
for _, g := range m.bot.Discord.Guilds() {
if g.Unavailable {
continue
}
var userRoles []*database.UserRole
err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID)
if err != nil {
continue
}
for _, ur := range userRoles {
hasRole := false
for _, gr := range g.Roles {
if gr.ID == ur.RoleID {
hasRole = true
break
}
}
if !hasRole {
m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID)
}
}
}
}
}()
})
m.RegisterCommand(NewSetUserRoleCommand(m))
m.RegisterCommand(NewMyRoleCommand(m))
//m.RegisterCommand(NewListUserRolesCommand(m))
return nil
}
func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) {
m.Lock()
defer m.Unlock()
if _, ok := m.commands[cmd.Name]; ok {
panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name()))
}
m.commands[cmd.Name] = cmd
}
func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "setuserrole",
Description: "Binds, unbinds or changes a userrole bind to a user",
Triggers: []string{"m?setuserrole"},
Usage: "m?setuserrole 1231231231231 cool role",
Cooldown: 3,
RequiredPerms: discordgo.PermissionManageRoles,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.setuserroleCommand,
}
}
func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 3 {
return
}
targetMember, err := msg.GetMemberAtArg(1)
if err != nil {
msg.Reply("could not find that user")
return
}
if targetMember.User.Bot {
msg.Reply("Bots dont get to join the fun")
return
}
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply(err.Error())
return
}
var selectedRole *discordgo.Role
for _, role := range g.Roles {
if role.ID == msg.Args()[2] {
selectedRole = role
} else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) {
selectedRole = role
}
}
if selectedRole == nil {
msg.Reply("Could not find that role!")
return
}
userRole := &database.UserRole{}
err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID)
switch err {
case nil:
if selectedRole.ID == userRole.RoleID {
m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String()))
} else {
m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID)
msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name))
}
case sql.ErrNoRows:
m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String()))
default:
fmt.Println(err)
msg.Reply("there was an error, please try again")
}
}
func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "myrole",
Description: "Displays a users bound role, or lets the user change the name or color of their bound role",
Triggers: []string{"m?myrole"},
Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff",
Cooldown: 3,
RequiredPerms: 0,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.myroleCommand,
}
}
func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 1 {
return
}
var (
err error
oldRole *discordgo.Role
target *discordgo.Member
)
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply("some error occurred")
return
}
switch la := msg.LenArgs(); {
case la > 2:
if msg.Args()[1] != "name" && msg.Args()[1] != "color" {
return
}
if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow {
msg.Reply("I am missing 'manage roles' permissions!")
return
}
ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID())
if err != nil && err != sql.ErrNoRows {
m.log.Error("error fetching user role", zap.Error(err))
msg.Reply("there was an error, please try again")
return
} else if err == sql.ErrNoRows {
msg.Reply("No custom role set.")
return
}
for _, role := range g.Roles {
if role.ID == ur.RoleID {
oldRole = role
}
}
if oldRole == nil {
msg.Reply("couldnt find role")
return
}
if msg.Args()[1] == "name" {
newName := strings.Join(msg.RawArgs()[2:], " ")
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical})
return
}
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: oldRole.Color,
Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName),
}
msg.ReplyEmbed(embed)
} else if msg.Args()[1] == "color" {
clr := msg.Args()[2]
if strings.HasPrefix(clr, "#") {
clr = clr[1:]
}
color, err := strconv.ParseInt(clr, 16, 64)
if err != nil || color < 0 || color > 0xFFFFFF {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical})
return
}
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: int(color),
//Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)),
Description | }
func (m *UserRoleMod) Commands() map[string]*base.ModCommand {
return m.commands
} | random_line_split |
|
userrole.go | iscord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) {
refreshTicker := time.NewTicker(time.Hour)
go func() {
for range refreshTicker.C {
for _, g := range m.bot.Discord.Guilds() {
if g.Unavailable {
continue
}
var userRoles []*database.UserRole
err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID)
if err != nil {
continue
}
for _, ur := range userRoles {
hasRole := false
for _, gr := range g.Roles {
if gr.ID == ur.RoleID {
hasRole = true
break
}
}
if !hasRole {
m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID)
}
}
}
}
}()
})
m.RegisterCommand(NewSetUserRoleCommand(m))
m.RegisterCommand(NewMyRoleCommand(m))
//m.RegisterCommand(NewListUserRolesCommand(m))
return nil
}
func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) {
m.Lock()
defer m.Unlock()
if _, ok := m.commands[cmd.Name]; ok {
panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name()))
}
m.commands[cmd.Name] = cmd
}
func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand |
func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 3 {
return
}
targetMember, err := msg.GetMemberAtArg(1)
if err != nil {
msg.Reply("could not find that user")
return
}
if targetMember.User.Bot {
msg.Reply("Bots dont get to join the fun")
return
}
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply(err.Error())
return
}
var selectedRole *discordgo.Role
for _, role := range g.Roles {
if role.ID == msg.Args()[2] {
selectedRole = role
} else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) {
selectedRole = role
}
}
if selectedRole == nil {
msg.Reply("Could not find that role!")
return
}
userRole := &database.UserRole{}
err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID)
switch err {
case nil:
if selectedRole.ID == userRole.RoleID {
m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String()))
} else {
m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID)
msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name))
}
case sql.ErrNoRows:
m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String()))
default:
fmt.Println(err)
msg.Reply("there was an error, please try again")
}
}
func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "myrole",
Description: "Displays a users bound role, or lets the user change the name or color of their bound role",
Triggers: []string{"m?myrole"},
Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff",
Cooldown: 3,
RequiredPerms: 0,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.myroleCommand,
}
}
func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 1 {
return
}
var (
err error
oldRole *discordgo.Role
target *discordgo.Member
)
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply("some error occurred")
return
}
switch la := msg.LenArgs(); {
case la > 2:
if msg.Args()[1] != "name" && msg.Args()[1] != "color" {
return
}
if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow {
msg.Reply("I am missing 'manage roles' permissions!")
return
}
ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID())
if err != nil && err != sql.ErrNoRows {
m.log.Error("error fetching user role", zap.Error(err))
msg.Reply("there was an error, please try again")
return
} else if err == sql.ErrNoRows {
msg.Reply("No custom role set.")
return
}
for _, role := range g.Roles {
if role.ID == ur.RoleID {
oldRole = role
}
}
if oldRole == nil {
msg.Reply("couldnt find role")
return
}
if msg.Args()[1] == "name" {
newName := strings.Join(msg.RawArgs()[2:], " ")
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical})
return
}
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: oldRole.Color,
Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName),
}
msg.ReplyEmbed(embed)
} else if msg.Args()[1] == "color" {
clr := msg.Args()[2]
if strings.HasPrefix(clr, "#") {
clr = clr[1:]
}
color, err := strconv.ParseInt(clr, 16, 64)
if err != nil || color < 0 || color > 0xFFFFFF {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical})
return
}
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: int(color),
//Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)),
Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)),
}
msg.ReplyEmbed(embed)
}
return
case la == 1:
target = msg.Member()
| {
return &base.ModCommand{
Mod: m,
Name: "setuserrole",
Description: "Binds, unbinds or changes a userrole bind to a user",
Triggers: []string{"m?setuserrole"},
Usage: "m?setuserrole 1231231231231 cool role",
Cooldown: 3,
RequiredPerms: discordgo.PermissionManageRoles,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.setuserroleCommand,
}
} | identifier_body |
userrole.go | () bool {
return m.allowDMs
}
func (m *UserRoleMod) Hook() error {
m.bot.Discord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) {
refreshTicker := time.NewTicker(time.Hour)
go func() {
for range refreshTicker.C {
for _, g := range m.bot.Discord.Guilds() {
if g.Unavailable {
continue
}
var userRoles []*database.UserRole
err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID)
if err != nil {
continue
}
for _, ur := range userRoles {
hasRole := false
for _, gr := range g.Roles {
if gr.ID == ur.RoleID {
hasRole = true
break
}
}
if !hasRole {
m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID)
}
}
}
}
}()
})
m.RegisterCommand(NewSetUserRoleCommand(m))
m.RegisterCommand(NewMyRoleCommand(m))
//m.RegisterCommand(NewListUserRolesCommand(m))
return nil
}
func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) {
m.Lock()
defer m.Unlock()
if _, ok := m.commands[cmd.Name]; ok {
panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name()))
}
m.commands[cmd.Name] = cmd
}
func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "setuserrole",
Description: "Binds, unbinds or changes a userrole bind to a user",
Triggers: []string{"m?setuserrole"},
Usage: "m?setuserrole 1231231231231 cool role",
Cooldown: 3,
RequiredPerms: discordgo.PermissionManageRoles,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.setuserroleCommand,
}
}
func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 3 {
return
}
targetMember, err := msg.GetMemberAtArg(1)
if err != nil {
msg.Reply("could not find that user")
return
}
if targetMember.User.Bot {
msg.Reply("Bots dont get to join the fun")
return
}
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply(err.Error())
return
}
var selectedRole *discordgo.Role
for _, role := range g.Roles {
if role.ID == msg.Args()[2] {
selectedRole = role
} else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) {
selectedRole = role
}
}
if selectedRole == nil {
msg.Reply("Could not find that role!")
return
}
userRole := &database.UserRole{}
err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID)
switch err {
case nil:
if selectedRole.ID == userRole.RoleID {
m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String()))
} else {
m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID)
msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name))
}
case sql.ErrNoRows:
m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String()))
default:
fmt.Println(err)
msg.Reply("there was an error, please try again")
}
}
func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "myrole",
Description: "Displays a users bound role, or lets the user change the name or color of their bound role",
Triggers: []string{"m?myrole"},
Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff",
Cooldown: 3,
RequiredPerms: 0,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.myroleCommand,
}
}
func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 1 {
return
}
var (
err error
oldRole *discordgo.Role
target *discordgo.Member
)
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply("some error occurred")
return
}
switch la := msg.LenArgs(); {
case la > 2:
if msg.Args()[1] != "name" && msg.Args()[1] != "color" {
return
}
if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow {
msg.Reply("I am missing 'manage roles' permissions!")
return
}
ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID())
if err != nil && err != sql.ErrNoRows {
m.log.Error("error fetching user role", zap.Error(err))
msg.Reply("there was an error, please try again")
return
} else if err == sql.ErrNoRows {
msg.Reply("No custom role set.")
return
}
for _, role := range g.Roles {
if role.ID == ur.RoleID {
oldRole = role
}
}
if oldRole == nil {
msg.Reply("couldnt find role")
return
}
if msg.Args()[1] == "name" {
newName := strings.Join(msg.RawArgs()[2:], " ")
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical})
return
}
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: oldRole.Color,
Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName),
}
msg.ReplyEmbed(embed)
} else if msg.Args()[1] == "color" {
clr := msg.Args()[2]
if strings.HasPrefix(clr, "#") {
clr = clr[1:]
}
color, err := strconv.ParseInt(clr, 16, 64)
if err != nil || color < 0 || color > 0xFFFFFF {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical})
return
}
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: int(color),
//Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)),
Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)),
}
| AllowDMs | identifier_name |
|
userrole.go | iscord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) {
refreshTicker := time.NewTicker(time.Hour)
go func() {
for range refreshTicker.C {
for _, g := range m.bot.Discord.Guilds() {
if g.Unavailable {
continue
}
var userRoles []*database.UserRole
err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID)
if err != nil {
continue
}
for _, ur := range userRoles {
hasRole := false
for _, gr := range g.Roles {
if gr.ID == ur.RoleID {
hasRole = true
break
}
}
if !hasRole {
m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID)
}
}
}
}
}()
})
m.RegisterCommand(NewSetUserRoleCommand(m))
m.RegisterCommand(NewMyRoleCommand(m))
//m.RegisterCommand(NewListUserRolesCommand(m))
return nil
}
func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) {
m.Lock()
defer m.Unlock()
if _, ok := m.commands[cmd.Name]; ok {
panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name()))
}
m.commands[cmd.Name] = cmd
}
func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "setuserrole",
Description: "Binds, unbinds or changes a userrole bind to a user",
Triggers: []string{"m?setuserrole"},
Usage: "m?setuserrole 1231231231231 cool role",
Cooldown: 3,
RequiredPerms: discordgo.PermissionManageRoles,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.setuserroleCommand,
}
}
func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 3 {
return
}
targetMember, err := msg.GetMemberAtArg(1)
if err != nil {
msg.Reply("could not find that user")
return
}
if targetMember.User.Bot {
msg.Reply("Bots dont get to join the fun")
return
}
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply(err.Error())
return
}
var selectedRole *discordgo.Role
for _, role := range g.Roles {
if role.ID == msg.Args()[2] {
selectedRole = role
} else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) {
selectedRole = role
}
}
if selectedRole == nil {
msg.Reply("Could not find that role!")
return
}
userRole := &database.UserRole{}
err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID)
switch err {
case nil:
if selectedRole.ID == userRole.RoleID {
m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String()))
} else {
m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID)
msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name))
}
case sql.ErrNoRows:
m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID)
msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String()))
default:
fmt.Println(err)
msg.Reply("there was an error, please try again")
}
}
func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand {
return &base.ModCommand{
Mod: m,
Name: "myrole",
Description: "Displays a users bound role, or lets the user change the name or color of their bound role",
Triggers: []string{"m?myrole"},
Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff",
Cooldown: 3,
RequiredPerms: 0,
RequiresOwner: false,
AllowedTypes: base.MessageTypeCreate,
AllowDMs: false,
Enabled: true,
Run: m.myroleCommand,
}
}
func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) {
if msg.LenArgs() < 1 {
return
}
var (
err error
oldRole *discordgo.Role
target *discordgo.Member
)
g, err := msg.Discord.Guild(msg.Message.GuildID)
if err != nil {
msg.Reply("some error occurred")
return
}
switch la := msg.LenArgs(); {
case la > 2:
if msg.Args()[1] != "name" && msg.Args()[1] != "color" {
return
}
if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow {
msg.Reply("I am missing 'manage roles' permissions!")
return
}
ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID())
if err != nil && err != sql.ErrNoRows {
m.log.Error("error fetching user role", zap.Error(err))
msg.Reply("there was an error, please try again")
return
} else if err == sql.ErrNoRows {
msg.Reply("No custom role set.")
return
}
for _, role := range g.Roles {
if role.ID == ur.RoleID {
oldRole = role
}
}
if oldRole == nil {
msg.Reply("couldnt find role")
return
}
if msg.Args()[1] == "name" | else if msg.Args()[1] == "color" {
clr := msg.Args()[2]
if strings.HasPrefix(clr, "#") {
clr = clr[1:]
}
color, err := strconv.ParseInt(clr, 16, 64)
if err != nil || color < 0 || color > 0xFFFFFF {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical})
return
}
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: int(color),
//Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)),
Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)),
}
msg.ReplyEmbed(embed)
}
return
case la == 1:
target = msg.Member()
case | {
newName := strings.Join(msg.RawArgs()[2:], " ")
_, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable)
if err != nil {
if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) {
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical})
return
}
msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical})
return
}
embed := &discordgo.MessageEmbed{
Color: oldRole.Color,
Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName),
}
msg.ReplyEmbed(embed)
} | conditional_block |
threejs_adapter.js | this.getDependencies(node.mulwapp_create_spec),
'props' : {},
'children' : {}
};
if (node instanceof THREE.Object3D) {
var conf = this.config.shareConf(node, undefined, root);
// Return if this object is not to be synchronized
if (!conf) return;
// If called by a parent, set the relation
if (parentNode) {
parentNode.children[node.mulwapp_guid] = true;
}
// Set properties in the doc node
if (conf.watch_props) {
for (var i = 0; i < conf.watch_props.length; i++) {
var prop = conf.watch_props[i];
var val = prop.split('.').reduce(function (prev, step) {
return prev[step];
}, node);
docNode.props[prop] = val;
}
}
// Recurse on children
for (var i = 0; i < node.children.length; i++) {
aux.call(this, node.children[i], docNode);
}
}
// Recurse on dependencies from create spec
for (var i = 0; i < docNode.dependencies.length; i++) {
aux.call(this, this.allLocalObjects[docNode.dependencies[i]], undefined);
}
doc[node.mulwapp_guid] = docNode;
}).call(this, root, undefined);
return doc;
}
/**
* Intercepts constructor calls to create a create specification before
* creating the object.
* @param {Mulwapp} mulwapp - A reference to a Mulwapp object
* @param {Array} constructors - A list of constructors to intercept
*/
ThreeAdapter.prototype.setupConstructorInterceptors = function (mulwapp, constructors) {
if (CONSTRUCTORS_DECORATED) throw 'Constructors have already been decorated';
CONSTRUCTORS_DECORATED = true;
var _this = this;
constructors.forEach(function (name) {
var backupName = '_' + name;
// Backup the original constructor somewhere
THREE[backupName] = THREE[name];
// Override with your own, then call the original
THREE[name] = function () {
// Decorate constructor
if (!this._mulwapp_remote_create) {
if (mulwapp.applicationInitializationOngoing) {
this.mulwapp_guid = 'guid' + _this.nextIncrementalGuid;
_this.nextIncrementalGuid++;
}
var spec = _this.generateCreateSpec(name, this.mulwapp_guid, arguments);
this.mulwapp_create_spec = spec;
_this.allLocalObjects[this.mulwapp_guid] = this;
}
// Call original constructor
THREE[backupName].apply(this, arguments);
}
// Extend the original class
THREE[name].prototype = Object.create(THREE[backupName].prototype);
});
}
/**
* Generate the specification that is used by remote peers to replay
* object creation.
* @param {string} name - The name of the object type
* @param {string} guid - The mulwapp_guid of the object
* @param {Array} argum - The arguments given to the local constructor
*/
ThreeAdapter.prototype.generateCreateSpec = function (name, guid, argum) {
var args = [];
// Argum is not an Array, but function parameters which is 'array like'
for (var i = 0; i < argum.length; i++) {
var arg = argum[i];
if ((typeof arg) == 'object' && arg.mulwapp_guid != undefined) {
args.push({primitive: false, value: arg.mulwapp_guid});
} else {
args.push({primitive: true, value: arg});
}
}
return {type: name, mulwapp_guid: guid, args: args}
}
/**
* Constructs an object from a specification made by Mulwapp.generateCreateSpec
* @param {Object} spec Specification needed to create the object.
* @return {Object} The object created
*/
ThreeAdapter.prototype.constructorReplayer = function (spec) {
function F(args) |
F.prototype = THREE[spec.type].prototype;
// Parse argument list
var args = [];
spec.args.forEach(function (e) {
if (e.primitive) args.push(e.value);
else args.push(this.lookupNodeByGuid(e.value));
}, this);
// Create object
var o = new F(args);
o.mulwapp_guid = spec.mulwapp_guid;
this.allLocalObjects[spec.mulwapp_guid] = o;
return o;
}
/**
*
*/
ThreeAdapter.prototype.modelUpdater = function (op) {
var setProp = function (node, prop, val) {
var propPath = prop.split('.');
propPath.slice(0, -1).forEach(function (step) {
node = node[step];
});
node[propPath[propPath.length - 1]] = val;
}
var node = this.lookupNodeByGuid(op.guid);
if (op.type == 'update prop') {
setProp(node, op.key, op.val);
}
else if (op.type == 'insert child') {
var child = this.lookupNodeByGuid(op.key);
node.add(child);
}
else if (op.type == 'delete child') {
var child = this.lookupNodeByGuid(op.key);
node.remove(child);
}
else if (op.type == 'insert object') {
this.constructorReplayer(op.val.extra);
}
else if (op.type == 'delete object') {
delete this.allLocalObjects[op.guid];
}
}
/**
*
*/
ThreeAdapter.prototype.lookupNodeByGuid = function (guid) {
return this.allLocalObjects[guid];
}
ThreeAdapter.prototype.getConstructors = function () {
return [
// "REVISION",
// "log",
// "warn",
// "error",
// "MOUSE",
// "CullFaceNone",
// "CullFaceBack",
// "CullFaceFront",
// "CullFaceFrontBack",
// "FrontFaceDirectionCW",
// "FrontFaceDirectionCCW",
// "BasicShadowMap",
// "PCFShadowMap",
// "PCFSoftShadowMap",
// "FrontSide",
// "BackSide",
// "DoubleSide",
// "NoShading",
// "FlatShading",
// "SmoothShading",
// "NoColors",
// "FaceColors",
// "VertexColors",
// "NoBlending",
// "NormalBlending",
// "AdditiveBlending",
// "SubtractiveBlending",
// "MultiplyBlending",
// "CustomBlending",
// "AddEquation",
// "SubtractEquation",
// "ReverseSubtractEquation",
// "MinEquation",
// "MaxEquation",
// "ZeroFactor",
// "OneFactor",
// "SrcColorFactor",
// "OneMinusSrcColorFactor",
// "SrcAlphaFactor",
// "OneMinusSrcAlphaFactor",
// "DstAlphaFactor",
// "OneMinusDstAlphaFactor",
// "DstColorFactor",
// "OneMinusDstColorFactor",
// "SrcAlphaSaturateFactor",
// "MultiplyOperation",
// "MixOperation",
// "AddOperation",
// "UVMapping",
// "CubeReflectionMapping",
// "CubeRefractionMapping",
// "EquirectangularReflectionMapping",
// "EquirectangularRefractionMapping",
// "SphericalReflectionMapping",
// "RepeatWrapping",
// "ClampToEdgeWrapping",
// "MirroredRepeatWrapping",
// "NearestFilter",
// "NearestMipMapNearestFilter",
// "NearestMipMapLinearFilter",
// "LinearFilter",
// "LinearMipMapNearestFilter",
// "LinearMipMapLinearFilter",
// "UnsignedByteType",
// "ByteType",
// "ShortType",
// "UnsignedShortType",
// "IntType",
// "UnsignedIntType",
// "FloatType",
// "HalfFloatType",
// "UnsignedShort4444Type",
// "UnsignedShort5551Type",
// "UnsignedShort565Type",
// "AlphaFormat",
// "RGBFormat",
// "RGBAFormat",
// "LuminanceFormat",
// "LuminanceAlphaFormat",
// "RGBEFormat",
// "RGB_S3TC_DXT1_Format",
// "RGBA_S3TC_DXT1_Format",
// "RGBA_S3TC_DXT3_Format",
// "RGBA_S3TC_DXT5_Format",
// "RGB_PVRTC_4BPPV1_Format",
// "RGB_PVRTC_2BPPV1_Format",
// "RGBA_PVRTC_4BPPV1_Format",
// "RGBA_PVRTC_2BPPV1_Format",
// "Projector",
// "CanvasRenderer",
// "Color",
// "ColorKeywords",
// "Quaternion",
// "Vector2",
// "Vector3",
// "Vector4",
| {
this._mulwapp_remote_create = true;
this.mulwapp_create_spec = spec;
return THREE[spec.type].apply(this, args);
} | identifier_body |
threejs_adapter.js | }).call(this, root, undefined);
return doc;
}
/**
* Intercepts constructor calls to create a create specification before
* creating the object.
* @param {Mulwapp} mulwapp - A reference to a Mulwapp object
* @param {Array} constructors - A list of constructors to intercept
*/
ThreeAdapter.prototype.setupConstructorInterceptors = function (mulwapp, constructors) {
if (CONSTRUCTORS_DECORATED) throw 'Constructors have already been decorated';
CONSTRUCTORS_DECORATED = true;
var _this = this;
constructors.forEach(function (name) {
var backupName = '_' + name;
// Backup the original constructor somewhere
THREE[backupName] = THREE[name];
// Override with your own, then call the original
THREE[name] = function () {
// Decorate constructor
if (!this._mulwapp_remote_create) {
if (mulwapp.applicationInitializationOngoing) {
this.mulwapp_guid = 'guid' + _this.nextIncrementalGuid;
_this.nextIncrementalGuid++;
}
var spec = _this.generateCreateSpec(name, this.mulwapp_guid, arguments);
this.mulwapp_create_spec = spec;
_this.allLocalObjects[this.mulwapp_guid] = this;
}
// Call original constructor
THREE[backupName].apply(this, arguments);
}
// Extend the original class
THREE[name].prototype = Object.create(THREE[backupName].prototype);
});
}
/**
* Generate the specification that is used by remote peers to replay
* object creation.
* @param {string} name - The name of the object type
* @param {string} guid - The mulwapp_guid of the object
* @param {Array} argum - The arguments given to the local constructor
*/
ThreeAdapter.prototype.generateCreateSpec = function (name, guid, argum) {
var args = [];
// Argum is not an Array, but function parameters which is 'array like'
for (var i = 0; i < argum.length; i++) {
var arg = argum[i];
if ((typeof arg) == 'object' && arg.mulwapp_guid != undefined) {
args.push({primitive: false, value: arg.mulwapp_guid});
} else {
args.push({primitive: true, value: arg});
}
}
return {type: name, mulwapp_guid: guid, args: args}
}
/**
* Constructs an object from a specification made by Mulwapp.generateCreateSpec
* @param {Object} spec Specification needed to create the object.
* @return {Object} The object created
*/
ThreeAdapter.prototype.constructorReplayer = function (spec) {
function F(args) {
this._mulwapp_remote_create = true;
this.mulwapp_create_spec = spec;
return THREE[spec.type].apply(this, args);
}
F.prototype = THREE[spec.type].prototype;
// Parse argument list
var args = [];
spec.args.forEach(function (e) {
if (e.primitive) args.push(e.value);
else args.push(this.lookupNodeByGuid(e.value));
}, this);
// Create object
var o = new F(args);
o.mulwapp_guid = spec.mulwapp_guid;
this.allLocalObjects[spec.mulwapp_guid] = o;
return o;
}
/**
*
*/
ThreeAdapter.prototype.modelUpdater = function (op) {
var setProp = function (node, prop, val) {
var propPath = prop.split('.');
propPath.slice(0, -1).forEach(function (step) {
node = node[step];
});
node[propPath[propPath.length - 1]] = val;
}
var node = this.lookupNodeByGuid(op.guid);
if (op.type == 'update prop') {
setProp(node, op.key, op.val);
}
else if (op.type == 'insert child') {
var child = this.lookupNodeByGuid(op.key);
node.add(child);
}
else if (op.type == 'delete child') {
var child = this.lookupNodeByGuid(op.key);
node.remove(child);
}
else if (op.type == 'insert object') {
this.constructorReplayer(op.val.extra);
}
else if (op.type == 'delete object') {
delete this.allLocalObjects[op.guid];
}
}
/**
*
*/
ThreeAdapter.prototype.lookupNodeByGuid = function (guid) {
return this.allLocalObjects[guid];
}
ThreeAdapter.prototype.getConstructors = function () {
return [
// "REVISION",
// "log",
// "warn",
// "error",
// "MOUSE",
// "CullFaceNone",
// "CullFaceBack",
// "CullFaceFront",
// "CullFaceFrontBack",
// "FrontFaceDirectionCW",
// "FrontFaceDirectionCCW",
// "BasicShadowMap",
// "PCFShadowMap",
// "PCFSoftShadowMap",
// "FrontSide",
// "BackSide",
// "DoubleSide",
// "NoShading",
// "FlatShading",
// "SmoothShading",
// "NoColors",
// "FaceColors",
// "VertexColors",
// "NoBlending",
// "NormalBlending",
// "AdditiveBlending",
// "SubtractiveBlending",
// "MultiplyBlending",
// "CustomBlending",
// "AddEquation",
// "SubtractEquation",
// "ReverseSubtractEquation",
// "MinEquation",
// "MaxEquation",
// "ZeroFactor",
// "OneFactor",
// "SrcColorFactor",
// "OneMinusSrcColorFactor",
// "SrcAlphaFactor",
// "OneMinusSrcAlphaFactor",
// "DstAlphaFactor",
// "OneMinusDstAlphaFactor",
// "DstColorFactor",
// "OneMinusDstColorFactor",
// "SrcAlphaSaturateFactor",
// "MultiplyOperation",
// "MixOperation",
// "AddOperation",
// "UVMapping",
// "CubeReflectionMapping",
// "CubeRefractionMapping",
// "EquirectangularReflectionMapping",
// "EquirectangularRefractionMapping",
// "SphericalReflectionMapping",
// "RepeatWrapping",
// "ClampToEdgeWrapping",
// "MirroredRepeatWrapping",
// "NearestFilter",
// "NearestMipMapNearestFilter",
// "NearestMipMapLinearFilter",
// "LinearFilter",
// "LinearMipMapNearestFilter",
// "LinearMipMapLinearFilter",
// "UnsignedByteType",
// "ByteType",
// "ShortType",
// "UnsignedShortType",
// "IntType",
// "UnsignedIntType",
// "FloatType",
// "HalfFloatType",
// "UnsignedShort4444Type",
// "UnsignedShort5551Type",
// "UnsignedShort565Type",
// "AlphaFormat",
// "RGBFormat",
// "RGBAFormat",
// "LuminanceFormat",
// "LuminanceAlphaFormat",
// "RGBEFormat",
// "RGB_S3TC_DXT1_Format",
// "RGBA_S3TC_DXT1_Format",
// "RGBA_S3TC_DXT3_Format",
// "RGBA_S3TC_DXT5_Format",
// "RGB_PVRTC_4BPPV1_Format",
// "RGB_PVRTC_2BPPV1_Format",
// "RGBA_PVRTC_4BPPV1_Format",
// "RGBA_PVRTC_2BPPV1_Format",
// "Projector",
// "CanvasRenderer",
// "Color",
// "ColorKeywords",
// "Quaternion",
// "Vector2",
// "Vector3",
// "Vector4",
// "Euler",
// "Line3",
// "Box2",
// "Box3",
// "Matrix3",
// "Matrix4",
// "Ray",
// "Sphere",
// "Frustum",
// "Plane",
// "Math",
// "Spline",
// "Triangle",
// "Clock",
// "EventDispatcher",
// "Raycaster",
// "Object3D",
// "Object3DIdCount",
// "Face3",
// "Face4",
// "BufferAttribute",
// "Int8Attribute",
// "Uint8Attribute",
// "Uint8ClampedAttribute",
// "Int16Attribute",
// "Uint16Attribute",
// "Int32Attribute",
// "Uint32Attribute",
// "Float32Attribute",
// "Float64Attribute",
// "DynamicBufferAttribute",
"BufferGeometry",
// "Geometry",
// "GeometryIdCount",
// "Camera",
// "CubeCamera",
"OrthographicCamera",
"PerspectiveCamera",
// "Light",
"AmbientLight",
// "AreaLight",
"DirectionalLight", | // "HemisphereLight",
"PointLight",
// "SpotLight",
// "Cache",
// "Loader", | random_line_split |
|
threejs_adapter.js | this.getDependencies(node.mulwapp_create_spec),
'props' : {},
'children' : {}
};
if (node instanceof THREE.Object3D) {
var conf = this.config.shareConf(node, undefined, root);
// Return if this object is not to be synchronized
if (!conf) return;
// If called by a parent, set the relation
if (parentNode) {
parentNode.children[node.mulwapp_guid] = true;
}
// Set properties in the doc node
if (conf.watch_props) {
for (var i = 0; i < conf.watch_props.length; i++) {
var prop = conf.watch_props[i];
var val = prop.split('.').reduce(function (prev, step) {
return prev[step];
}, node);
docNode.props[prop] = val;
}
}
// Recurse on children
for (var i = 0; i < node.children.length; i++) {
aux.call(this, node.children[i], docNode);
}
}
// Recurse on dependencies from create spec
for (var i = 0; i < docNode.dependencies.length; i++) {
aux.call(this, this.allLocalObjects[docNode.dependencies[i]], undefined);
}
doc[node.mulwapp_guid] = docNode;
}).call(this, root, undefined);
return doc;
}
/**
* Intercepts constructor calls to create a create specification before
* creating the object.
* @param {Mulwapp} mulwapp - A reference to a Mulwapp object
* @param {Array} constructors - A list of constructors to intercept
*/
ThreeAdapter.prototype.setupConstructorInterceptors = function (mulwapp, constructors) {
if (CONSTRUCTORS_DECORATED) throw 'Constructors have already been decorated';
CONSTRUCTORS_DECORATED = true;
var _this = this;
constructors.forEach(function (name) {
var backupName = '_' + name;
// Backup the original constructor somewhere
THREE[backupName] = THREE[name];
// Override with your own, then call the original
THREE[name] = function () {
// Decorate constructor
if (!this._mulwapp_remote_create) {
if (mulwapp.applicationInitializationOngoing) {
this.mulwapp_guid = 'guid' + _this.nextIncrementalGuid;
_this.nextIncrementalGuid++;
}
var spec = _this.generateCreateSpec(name, this.mulwapp_guid, arguments);
this.mulwapp_create_spec = spec;
_this.allLocalObjects[this.mulwapp_guid] = this;
}
// Call original constructor
THREE[backupName].apply(this, arguments);
}
// Extend the original class
THREE[name].prototype = Object.create(THREE[backupName].prototype);
});
}
/**
* Generate the specification that is used by remote peers to replay
* object creation.
* @param {string} name - The name of the object type
* @param {string} guid - The mulwapp_guid of the object
* @param {Array} argum - The arguments given to the local constructor
*/
ThreeAdapter.prototype.generateCreateSpec = function (name, guid, argum) {
var args = [];
// Argum is not an Array, but function parameters which is 'array like'
for (var i = 0; i < argum.length; i++) {
var arg = argum[i];
if ((typeof arg) == 'object' && arg.mulwapp_guid != undefined) {
args.push({primitive: false, value: arg.mulwapp_guid});
} else {
args.push({primitive: true, value: arg});
}
}
return {type: name, mulwapp_guid: guid, args: args}
}
/**
* Constructs an object from a specification made by Mulwapp.generateCreateSpec
* @param {Object} spec Specification needed to create the object.
* @return {Object} The object created
*/
ThreeAdapter.prototype.constructorReplayer = function (spec) {
function | (args) {
this._mulwapp_remote_create = true;
this.mulwapp_create_spec = spec;
return THREE[spec.type].apply(this, args);
}
F.prototype = THREE[spec.type].prototype;
// Parse argument list
var args = [];
spec.args.forEach(function (e) {
if (e.primitive) args.push(e.value);
else args.push(this.lookupNodeByGuid(e.value));
}, this);
// Create object
var o = new F(args);
o.mulwapp_guid = spec.mulwapp_guid;
this.allLocalObjects[spec.mulwapp_guid] = o;
return o;
}
/**
*
*/
ThreeAdapter.prototype.modelUpdater = function (op) {
var setProp = function (node, prop, val) {
var propPath = prop.split('.');
propPath.slice(0, -1).forEach(function (step) {
node = node[step];
});
node[propPath[propPath.length - 1]] = val;
}
var node = this.lookupNodeByGuid(op.guid);
if (op.type == 'update prop') {
setProp(node, op.key, op.val);
}
else if (op.type == 'insert child') {
var child = this.lookupNodeByGuid(op.key);
node.add(child);
}
else if (op.type == 'delete child') {
var child = this.lookupNodeByGuid(op.key);
node.remove(child);
}
else if (op.type == 'insert object') {
this.constructorReplayer(op.val.extra);
}
else if (op.type == 'delete object') {
delete this.allLocalObjects[op.guid];
}
}
/**
*
*/
ThreeAdapter.prototype.lookupNodeByGuid = function (guid) {
return this.allLocalObjects[guid];
}
ThreeAdapter.prototype.getConstructors = function () {
return [
// "REVISION",
// "log",
// "warn",
// "error",
// "MOUSE",
// "CullFaceNone",
// "CullFaceBack",
// "CullFaceFront",
// "CullFaceFrontBack",
// "FrontFaceDirectionCW",
// "FrontFaceDirectionCCW",
// "BasicShadowMap",
// "PCFShadowMap",
// "PCFSoftShadowMap",
// "FrontSide",
// "BackSide",
// "DoubleSide",
// "NoShading",
// "FlatShading",
// "SmoothShading",
// "NoColors",
// "FaceColors",
// "VertexColors",
// "NoBlending",
// "NormalBlending",
// "AdditiveBlending",
// "SubtractiveBlending",
// "MultiplyBlending",
// "CustomBlending",
// "AddEquation",
// "SubtractEquation",
// "ReverseSubtractEquation",
// "MinEquation",
// "MaxEquation",
// "ZeroFactor",
// "OneFactor",
// "SrcColorFactor",
// "OneMinusSrcColorFactor",
// "SrcAlphaFactor",
// "OneMinusSrcAlphaFactor",
// "DstAlphaFactor",
// "OneMinusDstAlphaFactor",
// "DstColorFactor",
// "OneMinusDstColorFactor",
// "SrcAlphaSaturateFactor",
// "MultiplyOperation",
// "MixOperation",
// "AddOperation",
// "UVMapping",
// "CubeReflectionMapping",
// "CubeRefractionMapping",
// "EquirectangularReflectionMapping",
// "EquirectangularRefractionMapping",
// "SphericalReflectionMapping",
// "RepeatWrapping",
// "ClampToEdgeWrapping",
// "MirroredRepeatWrapping",
// "NearestFilter",
// "NearestMipMapNearestFilter",
// "NearestMipMapLinearFilter",
// "LinearFilter",
// "LinearMipMapNearestFilter",
// "LinearMipMapLinearFilter",
// "UnsignedByteType",
// "ByteType",
// "ShortType",
// "UnsignedShortType",
// "IntType",
// "UnsignedIntType",
// "FloatType",
// "HalfFloatType",
// "UnsignedShort4444Type",
// "UnsignedShort5551Type",
// "UnsignedShort565Type",
// "AlphaFormat",
// "RGBFormat",
// "RGBAFormat",
// "LuminanceFormat",
// "LuminanceAlphaFormat",
// "RGBEFormat",
// "RGB_S3TC_DXT1_Format",
// "RGBA_S3TC_DXT1_Format",
// "RGBA_S3TC_DXT3_Format",
// "RGBA_S3TC_DXT5_Format",
// "RGB_PVRTC_4BPPV1_Format",
// "RGB_PVRTC_2BPPV1_Format",
// "RGBA_PVRTC_4BPPV1_Format",
// "RGBA_PVRTC_2BPPV1_Format",
// "Projector",
// "CanvasRenderer",
// "Color",
// "ColorKeywords",
// "Quaternion",
// "Vector2",
// "Vector3",
// "Vector4",
// | F | identifier_name |
main.py | IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the main part of the property widget.
"""
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory
from datafinder.gui.user.models.properties import PropertiesModel
from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget
__version__ = "$Revision-Id:$"
class PropertyWidget(QtGui.QWidget, Ui_propertyWidget):
""" Implements the main part of the property widget. """
def __init__(self, parent):
""" @see: L{QWidget<PyQt4.QtGui.QWidget>} """
QtGui.QWidget.__init__(self, parent)
Ui_propertyWidget.__init__(self)
self.setupUi(self)
self._model = None
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot)
self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot)
self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot)
self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot)
def _propertyStateChangedSlot(self):
"""
Handles changes of properties of the model and updates
the button enabled states in accordance to the selection.
"""
self._updateButtonStates()
def _updateSlot(self, index):
"""
Slot is called when data of property entry has changed.
@param index: The index of the selected index.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if index.isValid():
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
def _selectionChangedSlot(self, _):
"""
Slot is called when the selected property entries changed.
"""
self._updateButtonStates()
def _updateButtonStates(self):
"""
Updates the enabled state of the add, edit, clear, revert and delete buttons
in accordance to the selected properties.
"""
indexes = self.propertiesTableView.selectionModel().selectedIndexes()
self._setInitialButtonState()
if not self._model.isReadOnly and len(indexes) > 0:
canBeCleared = isDeletable = isRevertable = True
for index in indexes:
if index.isValid():
canBeCleared &= self._model.canBeCleared(index)
isDeletable &= self._model.isDeleteable(index)
isRevertable &= self._model.isRevertable(index)
# Enable / disable buttons
if len(indexes) == 1:
self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable)
self.clearValueButton.setEnabled(canBeCleared)
self.deleteButton.setEnabled(isDeletable)
self.revertButton.setEnabled(isRevertable)
self.addButton.setEnabled(True)
def _setInitialButtonState(self):
""" Sets the initial button state. """
self.addButton.setEnabled(not self._model.isReadOnly)
self.editButton.setEnabled(False)
self.clearValueButton.setEnabled(False)
self.deleteButton.setEnabled(False)
self.revertButton.setEnabled(False)
def _addClickedSlot(self):
""" Slot is called when the add button is used. """
index = self._model.add()
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model
self._editClickedSlot()
def _editClickedSlot(self):
""" Slot is called when the edit button is used. """
index = self.propertiesTableView.selectionModel().currentIndex()
if index.isValid():
self.propertiesTableView.edit(index)
def _clearValueClickedSlot(self):
""" Slot is called when the set empty button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.clearValue(index)
def _determinePropertyRows(self):
""" Determines the indexes of the property rows selected by the user. """
selectedIndexes = list()
rows = list() # used to check for / avoid multiple entries
for index in self.propertiesTableView.selectionModel().selectedIndexes():
if not index.row() in rows:
selectedIndexes.append(index)
rows.append(index.row())
selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True)
return selectedIndexes
def _deleteClickedSlot(self):
""" Slot is called when the delete button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.remove(index)
def _revertClickedSlot(self):
""" Slot is called when the revert button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.revert(index)
def _refreshClickedSlot(self):
""" Slot is called when the refresh button is used. """
if self._model.dirty:
button = QtGui.QMessageBox.information(self, self.tr("Refresh information"),
self.tr("All changes will be lost after the update.\n Do you want to continue?"),
QtGui.QMessageBox.Yes|QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if button == QtGui.QMessageBox.No:
return
self._model.refresh()
self.propertiesTableView.setSortingEnabled(True)
def _setModel(self, model):
"""
Sets the model.
@param model: Model representing a set of properties.
@type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>}
"""
self._model = model
self.propertiesTableView.setModel(model)
self._setInitialButtonState()
column, order = self._model.sortProperties
self.propertiesTableView.horizontalHeader().setSortIndicator(column, order)
self.propertiesTableView.setSortingEnabled(True)
propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE,
constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE]
self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self))
self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot)
self.connect(self.propertiesTableView.selectionModel(),
QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
self._selectionChangedSlot)
self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL),
self._propertyStateChangedSlot)
def _getModel(self):
|
def activateRefreshButton(self):
""" Activates the refresh button. """
self.refreshButton.show()
def deactivateRefreshButton(self):
""" De-activates the refresh button. """
self.refreshButton.hide()
model = property(_getModel, _setModel)
class _PropertyItemDelegate(QtGui.QStyledItemDelegate):
"""
This item delegate has to choose the right editor for the expected property type
and has to handle the conversion of the editor input to a proper model format.
"""
def __init__(self, propertyTypes, model, parent=None):
"""
Constructor.
@param propertyTypes: Property types available for this property
@type propertyTypes: C{list} of C{unicode}
@param parent: Parent object of the delegate.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QStyledItemDelegate.__init__(self, parent)
self._factory = EditorFactory()
self._propertyTypes = propertyTypes
self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot )
self._currentEditedRow = -1
self._currentEditedColumn = -1
self._model = model
def _handleEditorClosedSlot(self, _, hint):
""" Handles the closing of editor to remove added property entries without property name. """
if hint == QtGui.QAbstractItemDelegate.RevertModelCache \
and self._currentEditedColumn == 0:
index = self._model.index(self._currentEditedRow, self._currentEditedColumn)
index.model().setData(index, QtCore.QVariant(None))
def createEditor(self, parent, _, index):
""" @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor | """ Getter of the property model. """
return self._model | identifier_body |
main.py | WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the main part of the property widget.
"""
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory
from datafinder.gui.user.models.properties import PropertiesModel
from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget
__version__ = "$Revision-Id:$"
class PropertyWidget(QtGui.QWidget, Ui_propertyWidget):
""" Implements the main part of the property widget. """
def __init__(self, parent):
""" @see: L{QWidget<PyQt4.QtGui.QWidget>} """
QtGui.QWidget.__init__(self, parent)
Ui_propertyWidget.__init__(self)
self.setupUi(self)
self._model = None
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot)
self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot)
self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot)
self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot)
def _propertyStateChangedSlot(self):
"""
Handles changes of properties of the model and updates
the button enabled states in accordance to the selection.
"""
self._updateButtonStates()
def _updateSlot(self, index):
"""
Slot is called when data of property entry has changed.
@param index: The index of the selected index.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if index.isValid():
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
def _selectionChangedSlot(self, _):
"""
Slot is called when the selected property entries changed.
"""
self._updateButtonStates()
def _updateButtonStates(self):
"""
Updates the enabled state of the add, edit, clear, revert and delete buttons
in accordance to the selected properties.
"""
indexes = self.propertiesTableView.selectionModel().selectedIndexes()
self._setInitialButtonState()
if not self._model.isReadOnly and len(indexes) > 0:
canBeCleared = isDeletable = isRevertable = True
for index in indexes:
if index.isValid():
canBeCleared &= self._model.canBeCleared(index)
isDeletable &= self._model.isDeleteable(index)
isRevertable &= self._model.isRevertable(index)
# Enable / disable buttons
if len(indexes) == 1:
self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable)
self.clearValueButton.setEnabled(canBeCleared)
self.deleteButton.setEnabled(isDeletable)
self.revertButton.setEnabled(isRevertable)
self.addButton.setEnabled(True)
def _setInitialButtonState(self):
""" Sets the initial button state. """
self.addButton.setEnabled(not self._model.isReadOnly)
self.editButton.setEnabled(False)
self.clearValueButton.setEnabled(False)
self.deleteButton.setEnabled(False)
self.revertButton.setEnabled(False)
def _addClickedSlot(self):
""" Slot is called when the add button is used. """
index = self._model.add()
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model
self._editClickedSlot()
def _editClickedSlot(self):
""" Slot is called when the edit button is used. """
index = self.propertiesTableView.selectionModel().currentIndex()
if index.isValid():
self.propertiesTableView.edit(index)
def _clearValueClickedSlot(self):
""" Slot is called when the set empty button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.clearValue(index)
def _determinePropertyRows(self):
""" Determines the indexes of the property rows selected by the user. """
selectedIndexes = list()
rows = list() # used to check for / avoid multiple entries
for index in self.propertiesTableView.selectionModel().selectedIndexes():
if not index.row() in rows:
selectedIndexes.append(index)
rows.append(index.row())
selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True)
return selectedIndexes
def _deleteClickedSlot(self):
""" Slot is called when the delete button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.remove(index)
def _revertClickedSlot(self):
""" Slot is called when the revert button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.revert(index)
def _refreshClickedSlot(self):
""" Slot is called when the refresh button is used. """
if self._model.dirty:
button = QtGui.QMessageBox.information(self, self.tr("Refresh information"),
self.tr("All changes will be lost after the update.\n Do you want to continue?"),
QtGui.QMessageBox.Yes|QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if button == QtGui.QMessageBox.No:
return
self._model.refresh()
self.propertiesTableView.setSortingEnabled(True)
def _setModel(self, model):
"""
Sets the model.
@param model: Model representing a set of properties.
@type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>}
"""
self._model = model
self.propertiesTableView.setModel(model)
self._setInitialButtonState()
column, order = self._model.sortProperties
self.propertiesTableView.horizontalHeader().setSortIndicator(column, order)
self.propertiesTableView.setSortingEnabled(True)
propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE,
constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE]
self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self))
self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot)
self.connect(self.propertiesTableView.selectionModel(),
QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
self._selectionChangedSlot)
self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL),
self._propertyStateChangedSlot)
def _getModel(self):
""" Getter of the property model. """
return self._model
def activateRefreshButton(self):
""" Activates the refresh button. """
self.refreshButton.show()
def deactivateRefreshButton(self):
""" De-activates the refresh button. """
self.refreshButton.hide()
model = property(_getModel, _setModel)
class _PropertyItemDelegate(QtGui.QStyledItemDelegate):
"""
This item delegate has to choose the right editor for the expected property type
and has to handle the conversion of the editor input to a proper model format.
"""
def | (self, propertyTypes, model, parent=None):
"""
Constructor.
@param propertyTypes: Property types available for this property
@type propertyTypes: C{list} of C{unicode}
@param parent: Parent object of the delegate.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QStyledItemDelegate.__init__(self, parent)
self._factory = EditorFactory()
self._propertyTypes = propertyTypes
self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot )
self._currentEditedRow = -1
self._currentEditedColumn = -1
self._model = model
def _handleEditorClosedSlot(self, _, hint):
""" Handles the closing of editor to remove added property entries without property name. """
if hint == QtGui.QAbstractItemDelegate.RevertModelCache \
and self._currentEditedColumn == 0:
index = self._model.index(self._currentEditedRow, self._currentEditedColumn)
index.model().setData(index, QtCore.QVariant(None))
def createEditor(self, parent, _, index):
""" @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor | __init__ | identifier_name |
main.py | WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the main part of the property widget.
"""
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory
from datafinder.gui.user.models.properties import PropertiesModel
from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget
__version__ = "$Revision-Id:$"
class PropertyWidget(QtGui.QWidget, Ui_propertyWidget):
""" Implements the main part of the property widget. """
def __init__(self, parent):
""" @see: L{QWidget<PyQt4.QtGui.QWidget>} """
QtGui.QWidget.__init__(self, parent)
Ui_propertyWidget.__init__(self)
self.setupUi(self)
self._model = None
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot)
self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot)
self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot)
self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot)
def _propertyStateChangedSlot(self):
"""
Handles changes of properties of the model and updates
the button enabled states in accordance to the selection.
"""
self._updateButtonStates()
def _updateSlot(self, index):
"""
Slot is called when data of property entry has changed.
@param index: The index of the selected index.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if index.isValid():
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
def _selectionChangedSlot(self, _):
"""
Slot is called when the selected property entries changed.
"""
self._updateButtonStates()
def _updateButtonStates(self):
"""
Updates the enabled state of the add, edit, clear, revert and delete buttons
in accordance to the selected properties.
"""
indexes = self.propertiesTableView.selectionModel().selectedIndexes()
self._setInitialButtonState()
if not self._model.isReadOnly and len(indexes) > 0:
canBeCleared = isDeletable = isRevertable = True
for index in indexes:
if index.isValid():
canBeCleared &= self._model.canBeCleared(index)
isDeletable &= self._model.isDeleteable(index)
isRevertable &= self._model.isRevertable(index)
# Enable / disable buttons
if len(indexes) == 1:
self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable)
self.clearValueButton.setEnabled(canBeCleared)
self.deleteButton.setEnabled(isDeletable)
self.revertButton.setEnabled(isRevertable)
self.addButton.setEnabled(True)
def _setInitialButtonState(self):
""" Sets the initial button state. """
self.addButton.setEnabled(not self._model.isReadOnly)
self.editButton.setEnabled(False)
self.clearValueButton.setEnabled(False)
self.deleteButton.setEnabled(False)
self.revertButton.setEnabled(False)
def _addClickedSlot(self):
""" Slot is called when the add button is used. """
index = self._model.add()
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model
self._editClickedSlot()
def _editClickedSlot(self):
""" Slot is called when the edit button is used. """
index = self.propertiesTableView.selectionModel().currentIndex()
if index.isValid():
self.propertiesTableView.edit(index)
def _clearValueClickedSlot(self):
""" Slot is called when the set empty button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.clearValue(index)
def _determinePropertyRows(self):
""" Determines the indexes of the property rows selected by the user. """
selectedIndexes = list()
rows = list() # used to check for / avoid multiple entries
for index in self.propertiesTableView.selectionModel().selectedIndexes():
if not index.row() in rows:
selectedIndexes.append(index)
rows.append(index.row())
selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True)
return selectedIndexes
def _deleteClickedSlot(self):
""" Slot is called when the delete button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.remove(index)
def _revertClickedSlot(self):
""" Slot is called when the revert button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.revert(index)
def _refreshClickedSlot(self):
""" Slot is called when the refresh button is used. """
if self._model.dirty:
button = QtGui.QMessageBox.information(self, self.tr("Refresh information"),
self.tr("All changes will be lost after the update.\n Do you want to continue?"),
QtGui.QMessageBox.Yes|QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if button == QtGui.QMessageBox.No:
return
self._model.refresh()
self.propertiesTableView.setSortingEnabled(True)
def _setModel(self, model):
"""
Sets the model.
@param model: Model representing a set of properties.
@type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>}
"""
self._model = model
self.propertiesTableView.setModel(model)
self._setInitialButtonState()
column, order = self._model.sortProperties
self.propertiesTableView.horizontalHeader().setSortIndicator(column, order)
self.propertiesTableView.setSortingEnabled(True)
propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE,
constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE]
self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self))
self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot)
self.connect(self.propertiesTableView.selectionModel(),
QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
self._selectionChangedSlot)
self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL),
self._propertyStateChangedSlot)
def _getModel(self):
""" Getter of the property model. """
return self._model
def activateRefreshButton(self):
""" Activates the refresh button. """
self.refreshButton.show()
def deactivateRefreshButton(self):
""" De-activates the refresh button. """
self.refreshButton.hide()
model = property(_getModel, _setModel)
class _PropertyItemDelegate(QtGui.QStyledItemDelegate):
"""
| and has to handle the conversion of the editor input to a proper model format.
"""
def __init__(self, propertyTypes, model, parent=None):
"""
Constructor.
@param propertyTypes: Property types available for this property
@type propertyTypes: C{list} of C{unicode}
@param parent: Parent object of the delegate.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QStyledItemDelegate.__init__(self, parent)
self._factory = EditorFactory()
self._propertyTypes = propertyTypes
self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot )
self._currentEditedRow = -1
self._currentEditedColumn = -1
self._model = model
def _handleEditorClosedSlot(self, _, hint):
""" Handles the closing of editor to remove added property entries without property name. """
if hint == QtGui.QAbstractItemDelegate.RevertModelCache \
and self._currentEditedColumn == 0:
index = self._model.index(self._currentEditedRow, self._currentEditedColumn)
index.model().setData(index, QtCore.QVariant(None))
def createEditor(self, parent, _, index):
""" @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor | This item delegate has to choose the right editor for the expected property type
| random_line_split |
main.py | WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the main part of the property widget.
"""
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory
from datafinder.gui.user.models.properties import PropertiesModel
from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget
__version__ = "$Revision-Id:$"
class PropertyWidget(QtGui.QWidget, Ui_propertyWidget):
""" Implements the main part of the property widget. """
def __init__(self, parent):
""" @see: L{QWidget<PyQt4.QtGui.QWidget>} """
QtGui.QWidget.__init__(self, parent)
Ui_propertyWidget.__init__(self)
self.setupUi(self)
self._model = None
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot)
self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot)
self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot)
self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot)
def _propertyStateChangedSlot(self):
"""
Handles changes of properties of the model and updates
the button enabled states in accordance to the selection.
"""
self._updateButtonStates()
def _updateSlot(self, index):
"""
Slot is called when data of property entry has changed.
@param index: The index of the selected index.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if index.isValid():
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
def _selectionChangedSlot(self, _):
"""
Slot is called when the selected property entries changed.
"""
self._updateButtonStates()
def _updateButtonStates(self):
"""
Updates the enabled state of the add, edit, clear, revert and delete buttons
in accordance to the selected properties.
"""
indexes = self.propertiesTableView.selectionModel().selectedIndexes()
self._setInitialButtonState()
if not self._model.isReadOnly and len(indexes) > 0:
canBeCleared = isDeletable = isRevertable = True
for index in indexes:
if index.isValid():
canBeCleared &= self._model.canBeCleared(index)
isDeletable &= self._model.isDeleteable(index)
isRevertable &= self._model.isRevertable(index)
# Enable / disable buttons
if len(indexes) == 1:
self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable)
self.clearValueButton.setEnabled(canBeCleared)
self.deleteButton.setEnabled(isDeletable)
self.revertButton.setEnabled(isRevertable)
self.addButton.setEnabled(True)
def _setInitialButtonState(self):
""" Sets the initial button state. """
self.addButton.setEnabled(not self._model.isReadOnly)
self.editButton.setEnabled(False)
self.clearValueButton.setEnabled(False)
self.deleteButton.setEnabled(False)
self.revertButton.setEnabled(False)
def _addClickedSlot(self):
""" Slot is called when the add button is used. """
index = self._model.add()
self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model
self._editClickedSlot()
def _editClickedSlot(self):
""" Slot is called when the edit button is used. """
index = self.propertiesTableView.selectionModel().currentIndex()
if index.isValid():
self.propertiesTableView.edit(index)
def _clearValueClickedSlot(self):
""" Slot is called when the set empty button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.clearValue(index)
def _determinePropertyRows(self):
""" Determines the indexes of the property rows selected by the user. """
selectedIndexes = list()
rows = list() # used to check for / avoid multiple entries
for index in self.propertiesTableView.selectionModel().selectedIndexes():
if not index.row() in rows:
selectedIndexes.append(index)
rows.append(index.row())
selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True)
return selectedIndexes
def _deleteClickedSlot(self):
""" Slot is called when the delete button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
if index.isValid():
self._model.remove(index)
def _revertClickedSlot(self):
""" Slot is called when the revert button is used. """
selectedIndexes = self._determinePropertyRows()
for index in selectedIndexes:
|
def _refreshClickedSlot(self):
""" Slot is called when the refresh button is used. """
if self._model.dirty:
button = QtGui.QMessageBox.information(self, self.tr("Refresh information"),
self.tr("All changes will be lost after the update.\n Do you want to continue?"),
QtGui.QMessageBox.Yes|QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if button == QtGui.QMessageBox.No:
return
self._model.refresh()
self.propertiesTableView.setSortingEnabled(True)
def _setModel(self, model):
"""
Sets the model.
@param model: Model representing a set of properties.
@type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>}
"""
self._model = model
self.propertiesTableView.setModel(model)
self._setInitialButtonState()
column, order = self._model.sortProperties
self.propertiesTableView.horizontalHeader().setSortIndicator(column, order)
self.propertiesTableView.setSortingEnabled(True)
propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE,
constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE]
self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self))
self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot)
self.connect(self.propertiesTableView.selectionModel(),
QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
self._selectionChangedSlot)
self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL),
self._propertyStateChangedSlot)
def _getModel(self):
""" Getter of the property model. """
return self._model
def activateRefreshButton(self):
""" Activates the refresh button. """
self.refreshButton.show()
def deactivateRefreshButton(self):
""" De-activates the refresh button. """
self.refreshButton.hide()
model = property(_getModel, _setModel)
class _PropertyItemDelegate(QtGui.QStyledItemDelegate):
"""
This item delegate has to choose the right editor for the expected property type
and has to handle the conversion of the editor input to a proper model format.
"""
def __init__(self, propertyTypes, model, parent=None):
"""
Constructor.
@param propertyTypes: Property types available for this property
@type propertyTypes: C{list} of C{unicode}
@param parent: Parent object of the delegate.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QStyledItemDelegate.__init__(self, parent)
self._factory = EditorFactory()
self._propertyTypes = propertyTypes
self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot )
self._currentEditedRow = -1
self._currentEditedColumn = -1
self._model = model
def _handleEditorClosedSlot(self, _, hint):
""" Handles the closing of editor to remove added property entries without property name. """
if hint == QtGui.QAbstractItemDelegate.RevertModelCache \
and self._currentEditedColumn == 0:
index = self._model.index(self._currentEditedRow, self._currentEditedColumn)
index.model().setData(index, QtCore.QVariant(None))
def createEditor(self, parent, _, index):
""" @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor | if index.isValid():
self._model.revert(index) | conditional_block |
local.go | .Wrap(err, "adding base layer to image")
}
return nil
}
func (i *Image) Label(key string) (string, error) {
labels := i.inspect.Config.Labels
return labels[key], nil
}
func (i *Image) Labels() (map[string]string, error) {
copiedLabels := make(map[string]string)
for i, l := range i.inspect.Config.Labels {
copiedLabels[i] = l
}
return copiedLabels, nil
}
func (i *Image) Env(key string) (string, error) {
for _, envVar := range i.inspect.Config.Env {
parts := strings.Split(envVar, "=")
if parts[0] == key {
return parts[1], nil
}
}
return "", nil
}
func (i *Image) Entrypoint() ([]string, error) {
return i.inspect.Config.Entrypoint, nil
}
func (i *Image) OS() (string, error) {
return i.inspect.Os, nil
}
func (i *Image) OSVersion() (string, error) {
return i.inspect.OsVersion, nil
}
func (i *Image) Architecture() (string, error) {
return i.inspect.Architecture, nil
}
func (i *Image) Rename(name string) {
i.repoName = name
}
func (i *Image) Name() string {
return i.repoName
}
func (i *Image) Found() bool |
func (i *Image) Identifier() (imgutil.Identifier, error) {
return IDIdentifier{
ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"),
}, nil
}
func (i *Image) CreatedAt() (time.Time, error) {
createdAtTime := i.inspect.Created
createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime)
if err != nil {
return time.Time{}, err
}
return createdTime, nil
}
func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error {
ctx := context.Background()
// FIND TOP LAYER
var keepLayersIdx int
for idx, diffID := range i.inspect.RootFS.Layers {
if diffID == baseTopLayer {
keepLayersIdx = idx + 1
break
}
}
if keepLayersIdx == 0 {
return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName)
}
// DOWNLOAD IMAGE
if err := i.downloadBaseLayersOnce(); err != nil {
return err
}
// SWITCH BASE LAYERS
newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name())
if err != nil {
return errors.Wrapf(err, "read config for new base image %q", newBase)
}
i.inspect.ID = newBaseInspect.ID
i.downloadBaseOnce = &sync.Once{}
i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...)
i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...)
return nil
}
func (i *Image) SetLabel(key, val string) error {
if i.inspect.Config.Labels == nil {
i.inspect.Config.Labels = map[string]string{}
}
i.inspect.Config.Labels[key] = val
return nil
}
func (i *Image) SetOS(osVal string) error {
if osVal != i.inspect.Os {
return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os)
}
return nil
}
func (i *Image) SetOSVersion(osVersion string) error {
i.inspect.OsVersion = osVersion
return nil
}
func (i *Image) SetArchitecture(architecture string) error {
i.inspect.Architecture = architecture
return nil
}
func (i *Image) RemoveLabel(key string) error {
delete(i.inspect.Config.Labels, key)
return nil
}
func (i *Image) SetEnv(key, val string) error {
ignoreCase := i.inspect.Os == "windows"
for idx, kv := range i.inspect.Config.Env {
parts := strings.SplitN(kv, "=", 2)
foundKey := parts[0]
searchKey := key
if ignoreCase {
foundKey = strings.ToUpper(foundKey)
searchKey = strings.ToUpper(searchKey)
}
if foundKey == searchKey {
i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val)
return nil
}
}
i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val))
return nil
}
func (i *Image) SetWorkingDir(dir string) error {
i.inspect.Config.WorkingDir = dir
return nil
}
func (i *Image) SetEntrypoint(ep ...string) error {
i.inspect.Config.Entrypoint = ep
return nil
}
func (i *Image) SetCmd(cmd ...string) error {
i.inspect.Config.Cmd = cmd
return nil
}
func (i *Image) TopLayer() (string, error) {
all := i.inspect.RootFS.Layers
if len(all) == 0 {
return "", fmt.Errorf("image %q has no layers", i.repoName)
}
topLayer := all[len(all)-1]
return topLayer, nil
}
func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) {
for l := range i.inspect.RootFS.Layers {
if i.inspect.RootFS.Layers[l] != diffID {
continue
}
if i.layerPaths[l] == "" {
if err := i.downloadBaseLayersOnce(); err != nil {
return nil, err
}
if i.layerPaths[l] == "" {
return nil, fmt.Errorf("fetching layer %q from daemon", diffID)
}
}
return os.Open(i.layerPaths[l])
}
return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID)
}
func (i *Image) AddLayer(path string) error {
f, err := os.Open(filepath.Clean(path))
if err != nil {
return errors.Wrapf(err, "AddLayer: open layer: %s", path)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path)
}
diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
return i.AddLayerWithDiffID(path, diffID)
}
func (i *Image) AddLayerWithDiffID(path, diffID string) error {
i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID)
i.layerPaths = append(i.layerPaths, path)
return nil
}
func (i *Image) ReuseLayer(diffID string) error {
if i.prevImage == nil {
return errors.New("failed to reuse layer because no previous image was provided")
}
if !i.prevImage.Found() {
return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName)
}
if err := i.prevImage.downloadBaseLayersOnce(); err != nil {
return err
}
for l := range i.prevImage.inspect.RootFS.Layers {
if i.prevImage.inspect.RootFS.Layers[l] == diffID {
return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID)
}
}
return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name())
}
func (i *Image) Save(additionalNames ...string) error {
// during the first save attempt some layers may be excluded. The docker daemon allows this if the given set
// of layers already exists in the daemon in the given order
inspect, err := i.doSave()
if err != nil {
// populate all layer paths and try again without the above performance optimization.
if err := i.downloadBaseLayersOnce(); err != nil {
return err
}
inspect, err = i.doSave()
if err != nil {
saveErr := imgutil.SaveError{}
for _, n := range append([]string{i.Name()}, additionalNames...) {
saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err})
}
return saveErr
}
}
i.inspect = inspect
var errs []imgutil.SaveDiagnostic
for _, n := range append([]string{i.Name()}, additionalNames...) {
if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil {
errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err})
}
}
if len(errs) > 0 {
return imgutil.SaveError{Errors: errs}
}
return nil
}
func (i *Image) doSave() (types.ImageInspect, error) {
ctx := context.Background()
done := make(chan error)
t, err := name.NewTag(i.repoName, name.WeakValidation)
if err != nil {
return types.ImageInspect{}, err
}
// returns valid | {
return i.inspect.ID != ""
} | identifier_body |
local.go | }
return os.Open(i.layerPaths[l])
}
return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID)
}
func (i *Image) AddLayer(path string) error {
f, err := os.Open(filepath.Clean(path))
if err != nil {
return errors.Wrapf(err, "AddLayer: open layer: %s", path)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path)
}
diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
return i.AddLayerWithDiffID(path, diffID)
}
func (i *Image) AddLayerWithDiffID(path, diffID string) error {
i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID)
i.layerPaths = append(i.layerPaths, path)
return nil
}
func (i *Image) ReuseLayer(diffID string) error {
if i.prevImage == nil {
return errors.New("failed to reuse layer because no previous image was provided")
}
if !i.prevImage.Found() {
return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName)
}
if err := i.prevImage.downloadBaseLayersOnce(); err != nil {
return err
}
for l := range i.prevImage.inspect.RootFS.Layers {
if i.prevImage.inspect.RootFS.Layers[l] == diffID {
return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID)
}
}
return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name())
}
func (i *Image) Save(additionalNames ...string) error {
// during the first save attempt some layers may be excluded. The docker daemon allows this if the given set
// of layers already exists in the daemon in the given order
inspect, err := i.doSave()
if err != nil {
// populate all layer paths and try again without the above performance optimization.
if err := i.downloadBaseLayersOnce(); err != nil {
return err
}
inspect, err = i.doSave()
if err != nil {
saveErr := imgutil.SaveError{}
for _, n := range append([]string{i.Name()}, additionalNames...) {
saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err})
}
return saveErr
}
}
i.inspect = inspect
var errs []imgutil.SaveDiagnostic
for _, n := range append([]string{i.Name()}, additionalNames...) {
if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil {
errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err})
}
}
if len(errs) > 0 {
return imgutil.SaveError{Errors: errs}
}
return nil
}
func (i *Image) doSave() (types.ImageInspect, error) {
ctx := context.Background()
done := make(chan error)
t, err := name.NewTag(i.repoName, name.WeakValidation)
if err != nil {
return types.ImageInspect{}, err
}
// returns valid 'name:tag' appending 'latest', if missing tag
repoName := t.Name()
pr, pw := io.Pipe()
defer pw.Close()
go func() {
res, err := i.docker.ImageLoad(ctx, pr, true)
if err != nil {
done <- err
return
}
// only return response error after response is drained and closed
responseErr := checkResponseError(res.Body)
drainCloseErr := ensureReaderClosed(res.Body)
if responseErr != nil {
done <- responseErr
return
}
if drainCloseErr != nil {
done <- drainCloseErr
}
done <- nil
}()
tw := tar.NewWriter(pw)
defer tw.Close()
configFile, err := i.newConfigFile()
if err != nil {
return types.ImageInspect{}, errors.Wrap(err, "generating config file")
}
id := fmt.Sprintf("%x", sha256.Sum256(configFile))
if err := addTextToTar(tw, id+".json", configFile); err != nil {
return types.ImageInspect{}, err
}
var blankIdx int
var layerPaths []string
for _, path := range i.layerPaths {
if path == "" {
layerName := fmt.Sprintf("blank_%d", blankIdx)
blankIdx++
hdr := &tar.Header{Name: layerName, Mode: 0644, Size: 0}
if err := tw.WriteHeader(hdr); err != nil {
return types.ImageInspect{}, err
}
layerPaths = append(layerPaths, layerName)
} else {
layerName := fmt.Sprintf("/%x.tar", sha256.Sum256([]byte(path)))
f, err := os.Open(filepath.Clean(path))
if err != nil {
return types.ImageInspect{}, err
}
defer f.Close()
if err := addFileToTar(tw, layerName, f); err != nil {
return types.ImageInspect{}, err
}
f.Close()
layerPaths = append(layerPaths, layerName)
}
}
manifest, err := json.Marshal([]map[string]interface{}{
{
"Config": id + ".json",
"RepoTags": []string{repoName},
"Layers": layerPaths,
},
})
if err != nil {
return types.ImageInspect{}, err
}
if err := addTextToTar(tw, "manifest.json", manifest); err != nil {
return types.ImageInspect{}, err
}
tw.Close()
pw.Close()
err = <-done
if err != nil {
return types.ImageInspect{}, errors.Wrapf(err, "loading image %q. first error", i.repoName)
}
inspect, _, err := i.docker.ImageInspectWithRaw(context.Background(), id)
if err != nil {
if client.IsErrNotFound(err) {
return types.ImageInspect{}, errors.Wrapf(err, "saving image %q", i.repoName)
}
return types.ImageInspect{}, err
}
return inspect, nil
}
func (i *Image) newConfigFile() ([]byte, error) {
cfg, err := v1Config(i.inspect)
if err != nil {
return nil, err
}
return json.Marshal(cfg)
}
func (i *Image) Delete() error {
if !i.Found() {
return nil
}
options := types.ImageRemoveOptions{
Force: true,
PruneChildren: true,
}
_, err := i.docker.ImageRemove(context.Background(), i.inspect.ID, options)
return err
}
func (i *Image) ManifestSize() (int64, error) {
return 0, nil
}
// downloadBaseLayersOnce exports the base image from the daemon and populates layerPaths the first time it is called.
// subsequent calls do nothing.
func (i *Image) downloadBaseLayersOnce() error {
var err error
if !i.Found() {
return nil
}
i.downloadBaseOnce.Do(func() {
err = i.downloadBaseLayers()
})
if err != nil {
return errors.Wrap(err, "fetching base layers")
}
return err
}
func (i *Image) downloadBaseLayers() error {
ctx := context.Background()
imageReader, err := i.docker.ImageSave(ctx, []string{i.inspect.ID})
if err != nil {
return errors.Wrapf(err, "saving base image with ID %q from the docker daemon", i.inspect.ID)
}
defer ensureReaderClosed(imageReader)
tmpDir, err := ioutil.TempDir("", "imgutil.local.image.")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
err = untar(imageReader, tmpDir)
if err != nil {
return err
}
mf, err := os.Open(filepath.Clean(filepath.Join(tmpDir, "manifest.json")))
if err != nil {
return err
}
defer mf.Close()
var manifest []struct {
Config string
Layers []string
}
if err := json.NewDecoder(mf).Decode(&manifest); err != nil {
return err
}
if len(manifest) != 1 {
return fmt.Errorf("manifest.json had unexpected number of entries: %d", len(manifest))
}
df, err := os.Open(filepath.Clean(filepath.Join(tmpDir, manifest[0].Config)))
if err != nil {
return err
}
defer df.Close()
var details struct {
RootFS struct {
DiffIDs []string `json:"diff_ids"`
} `json:"rootfs"`
}
if err = json.NewDecoder(df).Decode(&details); err != nil {
return err
}
for l := range details.RootFS.DiffIDs {
i.layerPaths[l] = filepath.Join(tmpDir, manifest[0].Layers[l])
}
for l := range i.layerPaths {
if i.layerPaths[l] == "" | {
return errors.New("failed to download all base layers from daemon")
} | conditional_block |
|
local.go | (defaultPlatform imgutil.Platform, optionPlatform imgutil.Platform) error {
if optionPlatform.OS != "" && optionPlatform.OS != defaultPlatform.OS {
return fmt.Errorf("invalid os: platform os %q must match the daemon os %q", optionPlatform.OS, defaultPlatform.OS)
}
return nil
}
func processPreviousImageOption(image *Image, prevImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error {
if _, err := inspectOptionalImage(dockerClient, prevImageRepoName, platform); err != nil {
return err
}
prevImage, err := NewImage(prevImageRepoName, dockerClient, FromBaseImage(prevImageRepoName))
if err != nil {
return errors.Wrapf(err, "getting previous image %q", prevImageRepoName)
}
image.prevImage = prevImage
return nil
}
func processBaseImageOption(image *Image, baseImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error {
inspect, err := inspectOptionalImage(dockerClient, baseImageRepoName, platform)
if err != nil {
return err
}
image.inspect = inspect
image.layerPaths = make([]string, len(image.inspect.RootFS.Layers))
return nil
}
func prepareNewWindowsImage(image *Image) error {
// only append base layer to empty image
if len(image.inspect.RootFS.Layers) > 0 {
return nil
}
layerReader, err := layer.WindowsBaseLayer()
if err != nil {
return err
}
layerFile, err := ioutil.TempFile("", "imgutil.local.image.windowsbaselayer")
if err != nil {
return errors.Wrap(err, "creating temp file")
}
defer layerFile.Close()
hasher := sha256.New()
multiWriter := io.MultiWriter(layerFile, hasher)
if _, err := io.Copy(multiWriter, layerReader); err != nil {
return errors.Wrap(err, "copying base layer")
}
diffID := "sha256:" + hex.EncodeToString(hasher.Sum(nil))
if err := image.AddLayerWithDiffID(layerFile.Name(), diffID); err != nil {
return errors.Wrap(err, "adding base layer to image")
}
return nil
}
func (i *Image) Label(key string) (string, error) {
labels := i.inspect.Config.Labels
return labels[key], nil
}
func (i *Image) Labels() (map[string]string, error) {
copiedLabels := make(map[string]string)
for i, l := range i.inspect.Config.Labels {
copiedLabels[i] = l
}
return copiedLabels, nil
}
func (i *Image) Env(key string) (string, error) {
for _, envVar := range i.inspect.Config.Env {
parts := strings.Split(envVar, "=")
if parts[0] == key {
return parts[1], nil
}
}
return "", nil
}
func (i *Image) Entrypoint() ([]string, error) {
return i.inspect.Config.Entrypoint, nil
}
func (i *Image) OS() (string, error) {
return i.inspect.Os, nil
}
func (i *Image) OSVersion() (string, error) {
return i.inspect.OsVersion, nil
}
func (i *Image) Architecture() (string, error) {
return i.inspect.Architecture, nil
}
func (i *Image) Rename(name string) {
i.repoName = name
}
func (i *Image) Name() string {
return i.repoName
}
func (i *Image) Found() bool {
return i.inspect.ID != ""
}
func (i *Image) Identifier() (imgutil.Identifier, error) {
return IDIdentifier{
ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"),
}, nil
}
func (i *Image) CreatedAt() (time.Time, error) {
createdAtTime := i.inspect.Created
createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime)
if err != nil {
return time.Time{}, err
}
return createdTime, nil
}
func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error {
ctx := context.Background()
// FIND TOP LAYER
var keepLayersIdx int
for idx, diffID := range i.inspect.RootFS.Layers {
if diffID == baseTopLayer {
keepLayersIdx = idx + 1
break
}
}
if keepLayersIdx == 0 {
return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName)
}
// DOWNLOAD IMAGE
if err := i.downloadBaseLayersOnce(); err != nil {
return err
}
// SWITCH BASE LAYERS
newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name())
if err != nil {
return errors.Wrapf(err, "read config for new base image %q", newBase)
}
i.inspect.ID = newBaseInspect.ID
i.downloadBaseOnce = &sync.Once{}
i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...)
i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...)
return nil
}
func (i *Image) SetLabel(key, val string) error {
if i.inspect.Config.Labels == nil {
i.inspect.Config.Labels = map[string]string{}
}
i.inspect.Config.Labels[key] = val
return nil
}
func (i *Image) SetOS(osVal string) error {
if osVal != i.inspect.Os {
return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os)
}
return nil
}
func (i *Image) SetOSVersion(osVersion string) error {
i.inspect.OsVersion = osVersion
return nil
}
func (i *Image) SetArchitecture(architecture string) error {
i.inspect.Architecture = architecture
return nil
}
func (i *Image) RemoveLabel(key string) error {
delete(i.inspect.Config.Labels, key)
return nil
}
func (i *Image) SetEnv(key, val string) error {
ignoreCase := i.inspect.Os == "windows"
for idx, kv := range i.inspect.Config.Env {
parts := strings.SplitN(kv, "=", 2)
foundKey := parts[0]
searchKey := key
if ignoreCase {
foundKey = strings.ToUpper(foundKey)
searchKey = strings.ToUpper(searchKey)
}
if foundKey == searchKey {
i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val)
return nil
}
}
i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val))
return nil
}
func (i *Image) SetWorkingDir(dir string) error {
i.inspect.Config.WorkingDir = dir
return nil
}
func (i *Image) SetEntrypoint(ep ...string) error {
i.inspect.Config.Entrypoint = ep
return nil
}
func (i *Image) SetCmd(cmd ...string) error {
i.inspect.Config.Cmd = cmd
return nil
}
func (i *Image) TopLayer() (string, error) {
all := i.inspect.RootFS.Layers
if len(all) == 0 {
return "", fmt.Errorf("image %q has no layers", i.repoName)
}
topLayer := all[len(all)-1]
return topLayer, nil
}
func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) {
for l := range i.inspect.RootFS.Layers {
if i.inspect.RootFS.Layers[l] != diffID {
continue
}
if i.layerPaths[l] == "" {
if err := i.downloadBaseLayersOnce(); err != nil {
return nil, err
}
if i.layerPaths[l] == "" {
return nil, fmt.Errorf("fetching layer %q from daemon", diffID)
}
}
return os.Open(i.layerPaths[l])
}
return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID)
}
func (i *Image) AddLayer(path string) error {
f, err := os.Open(filepath.Clean(path))
if err != nil {
return errors.Wrapf(err, "AddLayer: open layer: %s", path)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path)
}
diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
return i.AddLayerWithDiffID(path, diffID)
}
func (i *Image) AddLayerWithDiffID(path, diffID string) error {
i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID)
i.layerPaths = append(i.layerPaths, path)
return nil
}
func (i *Image) ReuseLayer(diffID string) error {
if i.prevImage == nil {
return errors.New("failed to reuse layer because no previous image was provided")
}
if !i.prevImage.Found() {
return fmt.Errorf("failed to reuse layer | validatePlatformOption | identifier_name |
|
local.go | errors.Wrap(err, "adding base layer to image")
}
return nil
}
func (i *Image) Label(key string) (string, error) {
labels := i.inspect.Config.Labels
return labels[key], nil
}
func (i *Image) Labels() (map[string]string, error) {
copiedLabels := make(map[string]string)
for i, l := range i.inspect.Config.Labels {
copiedLabels[i] = l
}
return copiedLabels, nil
}
func (i *Image) Env(key string) (string, error) {
for _, envVar := range i.inspect.Config.Env {
parts := strings.Split(envVar, "=")
if parts[0] == key {
return parts[1], nil
}
}
return "", nil
}
func (i *Image) Entrypoint() ([]string, error) {
return i.inspect.Config.Entrypoint, nil
}
func (i *Image) OS() (string, error) {
return i.inspect.Os, nil
}
func (i *Image) OSVersion() (string, error) {
return i.inspect.OsVersion, nil
}
func (i *Image) Architecture() (string, error) {
return i.inspect.Architecture, nil
}
func (i *Image) Rename(name string) {
i.repoName = name
}
func (i *Image) Name() string {
return i.repoName
}
func (i *Image) Found() bool {
return i.inspect.ID != ""
}
func (i *Image) Identifier() (imgutil.Identifier, error) {
return IDIdentifier{
ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"),
}, nil
}
func (i *Image) CreatedAt() (time.Time, error) {
createdAtTime := i.inspect.Created
createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime)
if err != nil {
return time.Time{}, err
}
return createdTime, nil
}
func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error {
ctx := context.Background()
// FIND TOP LAYER
var keepLayersIdx int
for idx, diffID := range i.inspect.RootFS.Layers {
if diffID == baseTopLayer {
keepLayersIdx = idx + 1
break
}
}
if keepLayersIdx == 0 {
return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName)
}
// DOWNLOAD IMAGE
if err := i.downloadBaseLayersOnce(); err != nil {
return err
}
// SWITCH BASE LAYERS
newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name())
if err != nil {
return errors.Wrapf(err, "read config for new base image %q", newBase)
}
i.inspect.ID = newBaseInspect.ID
i.downloadBaseOnce = &sync.Once{}
i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...)
i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...)
return nil
}
func (i *Image) SetLabel(key, val string) error {
if i.inspect.Config.Labels == nil {
i.inspect.Config.Labels = map[string]string{}
}
i.inspect.Config.Labels[key] = val
return nil
}
func (i *Image) SetOS(osVal string) error {
if osVal != i.inspect.Os {
return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os)
}
return nil
}
func (i *Image) SetOSVersion(osVersion string) error {
i.inspect.OsVersion = osVersion
return nil
}
func (i *Image) SetArchitecture(architecture string) error {
i.inspect.Architecture = architecture
return nil
}
func (i *Image) RemoveLabel(key string) error { | ignoreCase := i.inspect.Os == "windows"
for idx, kv := range i.inspect.Config.Env {
parts := strings.SplitN(kv, "=", 2)
foundKey := parts[0]
searchKey := key
if ignoreCase {
foundKey = strings.ToUpper(foundKey)
searchKey = strings.ToUpper(searchKey)
}
if foundKey == searchKey {
i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val)
return nil
}
}
i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val))
return nil
}
func (i *Image) SetWorkingDir(dir string) error {
i.inspect.Config.WorkingDir = dir
return nil
}
func (i *Image) SetEntrypoint(ep ...string) error {
i.inspect.Config.Entrypoint = ep
return nil
}
func (i *Image) SetCmd(cmd ...string) error {
i.inspect.Config.Cmd = cmd
return nil
}
func (i *Image) TopLayer() (string, error) {
all := i.inspect.RootFS.Layers
if len(all) == 0 {
return "", fmt.Errorf("image %q has no layers", i.repoName)
}
topLayer := all[len(all)-1]
return topLayer, nil
}
func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) {
for l := range i.inspect.RootFS.Layers {
if i.inspect.RootFS.Layers[l] != diffID {
continue
}
if i.layerPaths[l] == "" {
if err := i.downloadBaseLayersOnce(); err != nil {
return nil, err
}
if i.layerPaths[l] == "" {
return nil, fmt.Errorf("fetching layer %q from daemon", diffID)
}
}
return os.Open(i.layerPaths[l])
}
return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID)
}
func (i *Image) AddLayer(path string) error {
f, err := os.Open(filepath.Clean(path))
if err != nil {
return errors.Wrapf(err, "AddLayer: open layer: %s", path)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path)
}
diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
return i.AddLayerWithDiffID(path, diffID)
}
func (i *Image) AddLayerWithDiffID(path, diffID string) error {
i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID)
i.layerPaths = append(i.layerPaths, path)
return nil
}
func (i *Image) ReuseLayer(diffID string) error {
if i.prevImage == nil {
return errors.New("failed to reuse layer because no previous image was provided")
}
if !i.prevImage.Found() {
return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName)
}
if err := i.prevImage.downloadBaseLayersOnce(); err != nil {
return err
}
for l := range i.prevImage.inspect.RootFS.Layers {
if i.prevImage.inspect.RootFS.Layers[l] == diffID {
return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID)
}
}
return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name())
}
func (i *Image) Save(additionalNames ...string) error {
// during the first save attempt some layers may be excluded. The docker daemon allows this if the given set
// of layers already exists in the daemon in the given order
inspect, err := i.doSave()
if err != nil {
// populate all layer paths and try again without the above performance optimization.
if err := i.downloadBaseLayersOnce(); err != nil {
return err
}
inspect, err = i.doSave()
if err != nil {
saveErr := imgutil.SaveError{}
for _, n := range append([]string{i.Name()}, additionalNames...) {
saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err})
}
return saveErr
}
}
i.inspect = inspect
var errs []imgutil.SaveDiagnostic
for _, n := range append([]string{i.Name()}, additionalNames...) {
if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil {
errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err})
}
}
if len(errs) > 0 {
return imgutil.SaveError{Errors: errs}
}
return nil
}
func (i *Image) doSave() (types.ImageInspect, error) {
ctx := context.Background()
done := make(chan error)
t, err := name.NewTag(i.repoName, name.WeakValidation)
if err != nil {
return types.ImageInspect{}, err
}
// returns valid ' | delete(i.inspect.Config.Labels, key)
return nil
}
func (i *Image) SetEnv(key, val string) error { | random_line_split |
_profile.py | if not self._converted:
if self._parent is None:
return "/"
return f"{self._name} ({self._level})"
else:
context = self._code_context
return f"sum {1000 * self._duration:.2f} ms {context}"
def __len__(self):
return len(self._children)
def _empty_parent_count(self):
for i, parent in enumerate(reversed(self._parents)):
if len(parent._children) > 1:
return i
return len(self._parents)
def _eff_parent_count(self):
return len([p for p in self._parents if len(p._children) > 1])
def | (self):
parent = self._parent
while parent._parent is not None:
if len(parent._children) > 1:
return parent
parent = parent._parent
return parent
def _calling_code(self, backtrack=0):
if self._level > backtrack + 1:
call: ExtCall = self._parents[-backtrack-1]
return call._code_context[0].strip(), call._file_name, call._function, call._line_number
else:
return "", "", "", -1
def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50):
if self._duration < min_duration:
return
if len(self._children) == 1 and isinstance(self._children[0], ExtCall):
self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len)
else:
funcs = [par._name for par in include_parents] + [self._name]
text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)"
if self._level > len(include_parents)+1:
code = self._calling_code(backtrack=len(include_parents))[0]
if len(code) > code_len:
code = code[:code_len-3] + "..."
text += " " + "." * max(0, (code_col - len(text))) + " > " + code
print(text)
for child in self._children:
child.print((), depth + 1, min_duration, code_col, code_len)
def children_to_properties(self) -> dict:
result = {}
for child in self._children:
name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}"
while isinstance(child, ExtCall) and len(child) == 1:
child = child._children[0]
name += " -> " + child._name
result[name] = child
if isinstance(child, ExtCall):
child.children_to_properties()
# finalize
for name, child in result.items():
setattr(self, name, child)
self._converted = True
return result
def trace_json_events(self, include_parents=()) -> list:
if len(self._children) == 1:
return self._children[0].trace_json_events(include_parents + (self,))
else:
name = ' -> '.join([par._name for par in include_parents] + [self._name])
eff_parent_count = self._eff_parent_count()
calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count())
result = [
{
'name': name,
'ph': "X", # complete event
'pid': 0,
'tid': eff_parent_count,
'ts': int(self._start * 1000000),
'dur': int((self._stop - self._start) * 1000000),
'args': {
"Calling code snippet": calling_code,
"Called by": f"{calling_function}() in {calling_filename}, line {lineno}",
"Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)",
"Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)"
}
}
]
for child in self._children:
result.extend(child.trace_json_events(()))
return result
class Profile:
"""
Stores information about calls to backends and their timing.
Profile may be created through `profile()` or `profile_function()`.
Profiles can be printed or saved to disc.
"""
def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool):
self._start = perf_counter()
self._stop = None
self._root = ExtCall(None, "", 0, "", "", "", -1)
self._last_ext_call = self._root
self._messages = []
self._trace = trace
self._backend_calls = []
self._retime_index = -1
self._accumulating = False
self._backends = backends
self._subtract_trace_time = subtract_trace_time
self._total_trace_time = 0
def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result):
if self._retime_index >= 0:
prev_call = self._backend_calls[self._retime_index]
assert prev_call._function_name == backend_call._function_name
if self._accumulating:
prev_call._start += backend_call._start
prev_call._stop += backend_call._stop
else:
prev_call._start = backend_call._start
prev_call._stop = backend_call._stop
self._retime_index = (self._retime_index + 1) % len(self._backend_calls)
else:
self._backend_calls.append(backend_call)
args = {i: arg for i, arg in enumerate(args)}
args.update(kwargs)
backend_call.add_arg("Inputs", _format_values(args, backend_call._backend))
if isinstance(result, (tuple, list)):
backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend))
else:
backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend))
if self._trace:
stack = inspect.stack()[2:]
call = self._last_ext_call.common_call(stack)
for i in range(call._level, len(stack)):
stack_frame = stack[len(stack) - i - 1]
name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else ""
sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno)
call.add(sub_call)
call = sub_call
call.add(backend_call)
self._last_ext_call = call
if self._subtract_trace_time:
delta_trace_time = perf_counter() - backend_call._stop
backend_call._start -= self._total_trace_time
backend_call._stop -= self._total_trace_time
self._total_trace_time += delta_trace_time
def _finish(self):
self._stop = perf_counter()
self._children_to_properties()
@property
def duration(self) -> float:
""" Total time passed from creation of the profile to the end of the last operation. """
return self._stop - self._start if self._stop is not None else None
def print(self, min_duration=1e-3, code_col=80, code_len=50):
"""
Prints this profile to the console.
Args:
min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds)
code_col: Formatting option for where the context code is printed.
code_len: Formatting option for cropping the context code
"""
print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms")
if self._messages:
print("External profiling:")
for message in self._messages:
print(f" {message}")
print()
self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len)
def save(self, json_file: str):
"""
Saves this profile to disc using the *trace event format* described at
https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit
This file can be viewed with external applications such as Google chrome.
Args:
json_file: filename
"""
data = [
{'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args | _closest_non_trivial_parent | identifier_name |
_profile.py | if not self._converted:
if self._parent is None:
return "/"
return f"{self._name} ({self._level})"
else:
context = self._code_context
return f"sum {1000 * self._duration:.2f} ms {context}"
def __len__(self):
return len(self._children)
def _empty_parent_count(self):
for i, parent in enumerate(reversed(self._parents)):
if len(parent._children) > 1:
return i
return len(self._parents)
def _eff_parent_count(self):
return len([p for p in self._parents if len(p._children) > 1])
def _closest_non_trivial_parent(self):
parent = self._parent
while parent._parent is not None:
if len(parent._children) > 1:
return parent
parent = parent._parent
return parent
def _calling_code(self, backtrack=0):
if self._level > backtrack + 1:
call: ExtCall = self._parents[-backtrack-1]
return call._code_context[0].strip(), call._file_name, call._function, call._line_number
else:
return "", "", "", -1
def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50):
if self._duration < min_duration:
return
if len(self._children) == 1 and isinstance(self._children[0], ExtCall):
self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len)
else:
funcs = [par._name for par in include_parents] + [self._name]
text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)"
if self._level > len(include_parents)+1:
code = self._calling_code(backtrack=len(include_parents))[0]
if len(code) > code_len:
code = code[:code_len-3] + "..."
text += " " + "." * max(0, (code_col - len(text))) + " > " + code
print(text)
for child in self._children:
child.print((), depth + 1, min_duration, code_col, code_len)
def children_to_properties(self) -> dict:
result = {}
for child in self._children:
name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}"
while isinstance(child, ExtCall) and len(child) == 1:
child = child._children[0]
name += " -> " + child._name
result[name] = child
if isinstance(child, ExtCall):
child.children_to_properties()
# finalize
for name, child in result.items():
setattr(self, name, child)
self._converted = True
return result
def trace_json_events(self, include_parents=()) -> list:
if len(self._children) == 1:
return self._children[0].trace_json_events(include_parents + (self,))
else:
name = ' -> '.join([par._name for par in include_parents] + [self._name])
eff_parent_count = self._eff_parent_count()
calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count())
result = [
{
'name': name,
'ph': "X", # complete event
'pid': 0,
'tid': eff_parent_count,
'ts': int(self._start * 1000000),
'dur': int((self._stop - self._start) * 1000000),
'args': {
"Calling code snippet": calling_code,
"Called by": f"{calling_function}() in {calling_filename}, line {lineno}",
"Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)",
"Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)"
}
}
]
for child in self._children:
result.extend(child.trace_json_events(()))
return result
class Profile:
| self._total_trace_time = 0
def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result):
if self._retime_index >= 0:
prev_call = self._backend_calls[self._retime_index]
assert prev_call._function_name == backend_call._function_name
if self._accumulating:
prev_call._start += backend_call._start
prev_call._stop += backend_call._stop
else:
prev_call._start = backend_call._start
prev_call._stop = backend_call._stop
self._retime_index = (self._retime_index + 1) % len(self._backend_calls)
else:
self._backend_calls.append(backend_call)
args = {i: arg for i, arg in enumerate(args)}
args.update(kwargs)
backend_call.add_arg("Inputs", _format_values(args, backend_call._backend))
if isinstance(result, (tuple, list)):
backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend))
else:
backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend))
if self._trace:
stack = inspect.stack()[2:]
call = self._last_ext_call.common_call(stack)
for i in range(call._level, len(stack)):
stack_frame = stack[len(stack) - i - 1]
name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else ""
sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno)
call.add(sub_call)
call = sub_call
call.add(backend_call)
self._last_ext_call = call
if self._subtract_trace_time:
delta_trace_time = perf_counter() - backend_call._stop
backend_call._start -= self._total_trace_time
backend_call._stop -= self._total_trace_time
self._total_trace_time += delta_trace_time
def _finish(self):
self._stop = perf_counter()
self._children_to_properties()
@property
def duration(self) -> float:
""" Total time passed from creation of the profile to the end of the last operation. """
return self._stop - self._start if self._stop is not None else None
def print(self, min_duration=1e-3, code_col=80, code_len=50):
"""
Prints this profile to the console.
Args:
min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds)
code_col: Formatting option for where the context code is printed.
code_len: Formatting option for cropping the context code
"""
print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms")
if self._messages:
print("External profiling:")
for message in self._messages:
print(f" {message}")
print()
self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len)
def save(self, json_file: str):
"""
Saves this profile to disc using the *trace event format* described at
https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit
This file can be viewed with external applications such as Google chrome.
Args:
json_file: filename
"""
data = [
{'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": | """
Stores information about calls to backends and their timing.
Profile may be created through `profile()` or `profile_function()`.
Profiles can be printed or saved to disc.
"""
def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool):
self._start = perf_counter()
self._stop = None
self._root = ExtCall(None, "", 0, "", "", "", -1)
self._last_ext_call = self._root
self._messages = []
self._trace = trace
self._backend_calls = []
self._retime_index = -1
self._accumulating = False
self._backends = backends
self._subtract_trace_time = subtract_trace_time | identifier_body |
_profile.py | if not self._converted:
if self._parent is None:
return "/"
return f"{self._name} ({self._level})"
else:
context = self._code_context
return f"sum {1000 * self._duration:.2f} ms {context}"
def __len__(self):
return len(self._children)
def _empty_parent_count(self):
for i, parent in enumerate(reversed(self._parents)):
if len(parent._children) > 1:
return i
return len(self._parents)
def _eff_parent_count(self):
return len([p for p in self._parents if len(p._children) > 1])
def _closest_non_trivial_parent(self):
parent = self._parent
while parent._parent is not None:
if len(parent._children) > 1:
return parent
parent = parent._parent
return parent
def _calling_code(self, backtrack=0):
if self._level > backtrack + 1:
call: ExtCall = self._parents[-backtrack-1]
return call._code_context[0].strip(), call._file_name, call._function, call._line_number
else:
return "", "", "", -1
def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50):
if self._duration < min_duration:
return
if len(self._children) == 1 and isinstance(self._children[0], ExtCall):
self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len)
else:
funcs = [par._name for par in include_parents] + [self._name]
text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)"
if self._level > len(include_parents)+1:
code = self._calling_code(backtrack=len(include_parents))[0]
if len(code) > code_len:
code = code[:code_len-3] + "..."
text += " " + "." * max(0, (code_col - len(text))) + " > " + code
print(text)
for child in self._children:
child.print((), depth + 1, min_duration, code_col, code_len)
def children_to_properties(self) -> dict:
result = {}
for child in self._children:
name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}"
while isinstance(child, ExtCall) and len(child) == 1:
child = child._children[0]
name += " -> " + child._name
result[name] = child
if isinstance(child, ExtCall):
child.children_to_properties()
# finalize
for name, child in result.items():
setattr(self, name, child)
self._converted = True
return result
def trace_json_events(self, include_parents=()) -> list:
if len(self._children) == 1:
return self._children[0].trace_json_events(include_parents + (self,))
else:
name = ' -> '.join([par._name for par in include_parents] + [self._name])
eff_parent_count = self._eff_parent_count()
calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count())
result = [
{
'name': name,
'ph': "X", # complete event
'pid': 0,
'tid': eff_parent_count,
'ts': int(self._start * 1000000),
'dur': int((self._stop - self._start) * 1000000),
'args': {
"Calling code snippet": calling_code,
"Called by": f"{calling_function}() in {calling_filename}, line {lineno}",
"Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)",
"Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)"
}
}
]
for child in self._children:
result.extend(child.trace_json_events(()))
return result
class Profile:
"""
Stores information about calls to backends and their timing.
Profile may be created through `profile()` or `profile_function()`.
Profiles can be printed or saved to disc.
"""
def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool):
self._start = perf_counter()
self._stop = None
self._root = ExtCall(None, "", 0, "", "", "", -1)
self._last_ext_call = self._root
self._messages = []
self._trace = trace
self._backend_calls = []
self._retime_index = -1
self._accumulating = False
self._backends = backends
self._subtract_trace_time = subtract_trace_time
self._total_trace_time = 0
def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result):
if self._retime_index >= 0:
prev_call = self._backend_calls[self._retime_index]
assert prev_call._function_name == backend_call._function_name
if self._accumulating:
prev_call._start += backend_call._start
prev_call._stop += backend_call._stop
else:
prev_call._start = backend_call._start
prev_call._stop = backend_call._stop
self._retime_index = (self._retime_index + 1) % len(self._backend_calls)
else:
self._backend_calls.append(backend_call)
args = {i: arg for i, arg in enumerate(args)}
args.update(kwargs)
backend_call.add_arg("Inputs", _format_values(args, backend_call._backend))
if isinstance(result, (tuple, list)):
backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend))
else:
backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend))
if self._trace:
stack = inspect.stack()[2:]
call = self._last_ext_call.common_call(stack)
for i in range(call._level, len(stack)):
stack_frame = stack[len(stack) - i - 1]
name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else ""
sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno)
call.add(sub_call)
call = sub_call
call.add(backend_call)
self._last_ext_call = call
if self._subtract_trace_time:
delta_trace_time = perf_counter() - backend_call._stop
backend_call._start -= self._total_trace_time
backend_call._stop -= self._total_trace_time
self._total_trace_time += delta_trace_time
def _finish(self): | self._stop = perf_counter()
self._children_to_properties()
@property
def duration(self) -> float:
""" Total time passed from creation of the profile to the end of the last operation. """
return self._stop - self._start if self._stop is not None else None
def print(self, min_duration=1e-3, code_col=80, code_len=50):
"""
Prints this profile to the console.
Args:
min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds)
code_col: Formatting option for where the context code is printed.
code_len: Formatting option for cropping the context code
"""
print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms")
if self._messages:
print("External profiling:")
for message in self._messages:
print(f" {message}")
print()
self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len)
def save(self, json_file: str):
"""
Saves this profile to disc using the *trace event format* described at
https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit
This file can be viewed with external applications such as Google chrome.
Args:
json_file: filename
"""
data = [
{'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args | random_line_split |
|
_profile.py | if not self._converted:
if self._parent is None:
return "/"
return f"{self._name} ({self._level})"
else:
context = self._code_context
return f"sum {1000 * self._duration:.2f} ms {context}"
def __len__(self):
return len(self._children)
def _empty_parent_count(self):
for i, parent in enumerate(reversed(self._parents)):
if len(parent._children) > 1:
return i
return len(self._parents)
def _eff_parent_count(self):
return len([p for p in self._parents if len(p._children) > 1])
def _closest_non_trivial_parent(self):
parent = self._parent
while parent._parent is not None:
if len(parent._children) > 1:
return parent
parent = parent._parent
return parent
def _calling_code(self, backtrack=0):
if self._level > backtrack + 1:
call: ExtCall = self._parents[-backtrack-1]
return call._code_context[0].strip(), call._file_name, call._function, call._line_number
else:
return "", "", "", -1
def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50):
if self._duration < min_duration:
return
if len(self._children) == 1 and isinstance(self._children[0], ExtCall):
self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len)
else:
funcs = [par._name for par in include_parents] + [self._name]
text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)"
if self._level > len(include_parents)+1:
code = self._calling_code(backtrack=len(include_parents))[0]
if len(code) > code_len:
code = code[:code_len-3] + "..."
text += " " + "." * max(0, (code_col - len(text))) + " > " + code
print(text)
for child in self._children:
child.print((), depth + 1, min_duration, code_col, code_len)
def children_to_properties(self) -> dict:
result = {}
for child in self._children:
name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}"
while isinstance(child, ExtCall) and len(child) == 1:
child = child._children[0]
name += " -> " + child._name
result[name] = child
if isinstance(child, ExtCall):
child.children_to_properties()
# finalize
for name, child in result.items():
setattr(self, name, child)
self._converted = True
return result
def trace_json_events(self, include_parents=()) -> list:
if len(self._children) == 1:
return self._children[0].trace_json_events(include_parents + (self,))
else:
name = ' -> '.join([par._name for par in include_parents] + [self._name])
eff_parent_count = self._eff_parent_count()
calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count())
result = [
{
'name': name,
'ph': "X", # complete event
'pid': 0,
'tid': eff_parent_count,
'ts': int(self._start * 1000000),
'dur': int((self._stop - self._start) * 1000000),
'args': {
"Calling code snippet": calling_code,
"Called by": f"{calling_function}() in {calling_filename}, line {lineno}",
"Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)",
"Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)"
}
}
]
for child in self._children:
result.extend(child.trace_json_events(()))
return result
class Profile:
"""
Stores information about calls to backends and their timing.
Profile may be created through `profile()` or `profile_function()`.
Profiles can be printed or saved to disc.
"""
def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool):
self._start = perf_counter()
self._stop = None
self._root = ExtCall(None, "", 0, "", "", "", -1)
self._last_ext_call = self._root
self._messages = []
self._trace = trace
self._backend_calls = []
self._retime_index = -1
self._accumulating = False
self._backends = backends
self._subtract_trace_time = subtract_trace_time
self._total_trace_time = 0
def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result):
if self._retime_index >= 0:
prev_call = self._backend_calls[self._retime_index]
assert prev_call._function_name == backend_call._function_name
if self._accumulating:
prev_call._start += backend_call._start
prev_call._stop += backend_call._stop
else:
prev_call._start = backend_call._start
prev_call._stop = backend_call._stop
self._retime_index = (self._retime_index + 1) % len(self._backend_calls)
else:
self._backend_calls.append(backend_call)
args = {i: arg for i, arg in enumerate(args)}
args.update(kwargs)
backend_call.add_arg("Inputs", _format_values(args, backend_call._backend))
if isinstance(result, (tuple, list)):
backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend))
else:
|
if self._trace:
stack = inspect.stack()[2:]
call = self._last_ext_call.common_call(stack)
for i in range(call._level, len(stack)):
stack_frame = stack[len(stack) - i - 1]
name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else ""
sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno)
call.add(sub_call)
call = sub_call
call.add(backend_call)
self._last_ext_call = call
if self._subtract_trace_time:
delta_trace_time = perf_counter() - backend_call._stop
backend_call._start -= self._total_trace_time
backend_call._stop -= self._total_trace_time
self._total_trace_time += delta_trace_time
def _finish(self):
self._stop = perf_counter()
self._children_to_properties()
@property
def duration(self) -> float:
""" Total time passed from creation of the profile to the end of the last operation. """
return self._stop - self._start if self._stop is not None else None
def print(self, min_duration=1e-3, code_col=80, code_len=50):
"""
Prints this profile to the console.
Args:
min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds)
code_col: Formatting option for where the context code is printed.
code_len: Formatting option for cropping the context code
"""
print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms")
if self._messages:
print("External profiling:")
for message in self._messages:
print(f" {message}")
print()
self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len)
def save(self, json_file: str):
"""
Saves this profile to disc using the *trace event format* described at
https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit
This file can be viewed with external applications such as Google chrome.
Args:
json_file: filename
"""
data = [
{'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args | backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend)) | conditional_block |
wps.go | type ByteBuilder struct { // object:
func (self TYPE) from_ary(ary interface{}){
return ary[0]
func (self TYPE) to_ary(value interface{}){
return array.array('B', [value])
type StringBuilder struct { // object:
func (self TYPE) from_ary(ary interface{}){
return ary.tostring()
func (self TYPE) to_ary(value interface{}){
return array.array('B', value)
type NumBuilder struct { // object:
"""Converts back and forth between arrays and numbers in network byte-order"""
func (self TYPE) __init__(size interface{}){
"""size: number of bytes in the field"""
self.size = size
func (self TYPE) from_ary(ary interface{}){
if len(ary) != self.size {
raise Exception("Expected %s size but got %s" % (self.size, len(ary)))
return reduce( lambda ac, x: ac * 256 + x, ary, 0)
func (self TYPE) to_ary(value0 interface{}){
value = value0
rv = array.array("B")
for _ in range(self.size):
value, mod = divmod(value, 256)
rv.append(mod)
if value != 0 {
raise Exception("%s is too big. Max size: %s" % (value0, self.size))
rv.reverse()
return rv
type TLVContainer struct { // object:
func (self TYPE) builder(kind interface{}){
return self.builders.get(kind, self.default_builder)
func (self TYPE) from_ary(ary interface{}){
i = 0
while i<len(ary):
kind = self.ary2n(ary, i)
length = self.ary2n(ary, i+2)
i+=4
value = ary[i:i+length]
self.elems.append((kind, value))
i += length
return self
func (self TYPE) __init__(builders, default_builder = ArrayBuilder(), descs=nil interface{}){
self.builders = builders
self.default_builder = default_builder
self.elems = []
self.descs = descs or {}
func (self TYPE) append(kind, value interface{}){
self.elems.append((kind, self.builder(kind).to_ary(value)))
func (self TYPE) __iter__(){
return ((k, self.builder(k).from_ary(v)) for k,v in self.elems)
func (self TYPE) all(kind interface{}){
return [e[1] for e in self if e[0] == kind]
func (self TYPE) __contains__(kind interface{}){
return len(self.all(kind)) != 0
func (self TYPE) first(kind interface{}){
return self.all(kind)[0]
func (self TYPE) to_ary(){
ary = array.array("B")
for k,v in self.elems:
ary.extend(self.n2ary(k))
ary.extend(self.n2ary(len(v)))
ary.extend(v)
return ary
func (self TYPE) get_packet(){
return self.to_ary().tostring()
func (self TYPE) set_parent(my_parent interface{}){
self.__parent = my_parent
func (self TYPE) parent(){
return self.__parent
func (self TYPE) n2ary(n interface{}){
return array.array("B", struct.pack(">H",n))
func (self TYPE) ary2n(ary, i=0 interface{}){
return struct.unpack(">H", ary[i:i+2].tostring())[0]
func (self TYPE) __repr__(){
func desc(kind interface{}){
return self.descs[kind] if kind in self.descs else kind
return "<TLVContainer %s>" % repr([(desc(k), self.builder(k).from_ary(v)) for (k,v) in self.elems])
func (self TYPE) child(){
return nil
type SCElem struct { // object:
//Data elements as defined in section 11 of the WPS 1.0h spec.
AP_CHANNEL = 0x1001
ASSOCIATION_STATE = 0x1002
AUTHENTICATION_TYPE = 0x1003
AUTHENTICATION_TYPE_FLAGS = 0x1004
AUTHENTICATOR = 0x1005
CONFIG_METHODS = 0x1008
CONFIGURATION_ERROR = 0x1009
CONFIRMATION_URL4 = 0x100A
CONFIRMATION_URL6 = 0x100B
CONNECTION_TYPE = 0X100C
CONNECTION_TYPE_FLAGS = 0X100D
CREDENTIAL = 0X100E
DEVICE_NAME = 0x1011
DEVICE_PASSWORD_ID = 0x1012
E_HASH1 = 0x1014
E_HASH2 = 0x1015
E_SNONCE1 = 0x1016
E_SNONCE2 = 0x1017
ENCRYPTED_SETTINGS = 0x1018
ENCRYPTION_TYPE = 0X100F
ENCRYPTION_TYPE_FLAGS = 0x1010
ENROLLEE_NONCE = 0x101A
FEATURE_ID = 0x101B
IDENTITY = 0X101C
INDENTITY_PROOF = 0X101D
KEY_WRAP_AUTHENTICATOR = 0x101E
KEY_IDENTIFIER = 0X101F
MAC_ADDRESS = 0x1020
MANUFACTURER = 0x1021
MESSAGE_TYPE = 0x1022
MODEL_NAME = 0x1023
MODEL_NUMBER = 0x1024
NETWORK_INDEX = 0x1026
NETWORK_KEY = 0x1027
NETWORK_KEY_INDEX = 0x1028
NEW_DEVICE_NAME = 0x1029
NEW_PASSWORD = 0x102A
OOB_DEVICE_PASSWORD = 0X102C
OS_VERSION= 0X102D
POWER_LEVEL = 0X102F
PSK_CURRENT = 0x1030
PSK_MAX = 0x1031
PUBLIC_KEY = 0x1032
RADIO_ENABLED = 0x1033
REBOOT = 0x1034
REGISTRAR_CURRENT = 0x1035
REGISTRAR_ESTABLISHED = 0x1036
REGISTRAR_LIST = 0x1037
REGISTRAR_MAX = 0x1038
REGISTRAR_NONCE = 0x1039
REQUEST_TYPE = 0x103A
RESPONSE_TYPE = 0x103B
RF_BANDS = 0X103C
R_HASH1 = 0X103D
R_HASH2 = 0X103E
R_SNONCE1 = 0X103F
R_SNONCE2 = 0x1040
SELECTED_REGISTRAR = 0x1041
SERIAL_NUMBER = 0x1042
WPS_STATE = 0x1044
SSID = 0x1045
TOTAL_NETWORKS = 0x1046
UUID_E = 0x1047
UUID_R = 0x1048
VENDOR_EXTENSION = 0x1049
VERSION = 0x104A
X_509_CERTIFICATE_REQUEST = 0x104B
X_509_CERTIFICATE = 0x104C
EAP_IDENTITY = 0x104D
MESSAGE_COUNTER = 0x104E
PUBLIC_KEY_HASH = 0x104F
REKEY_KEY = 0x1050
KEY_LIFETIME = 0x1051
PERMITTED_CONFIG_METHODS = 0x1052
SELECTED_REGISTRAR_CONFIG_METHODS= 0x1053
PRIMARY_DEVICE_TYPE = 0x1054
SECONDARY_DEVICE_TYPE_LIST = 0x1055
PORTABLE_DEVICE = 0x1056
AP_SETUP_LOCKED = 0x1057
APPLICATION_EXTENSION = 0x1058
EAP_TYPE = 0x1059
INITIALIZATION_VECTOR = 0x1060
KEY_PROVIDED_AUTOMATICALLY = 0x106 | return ary
func (self TYPE) to_ary(value interface{}){
return array.array("B", value)
| random_line_split |
|
vmctx.rs | the instance and its `Alloc`.
globals_view: RefCell<Box<[GlobalValue]>>,
}
impl Drop for Vmctx {
fn drop(&mut self) {
let heap_view = self.heap_view.replace(Box::new([]));
let globals_view = self.globals_view.replace(Box::new([]));
// as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap
// and globals to be dropped
Box::leak(heap_view);
Box::leak(globals_view);
}
}
pub trait VmctxInternal {
/// Get a reference to the `Instance` for this guest.
fn instance(&self) -> &Instance;
/// Get a mutable reference to the `Instance` for this guest.
///
/// ### Safety
///
/// Using this method, you could hold on to multiple mutable references to the same
/// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise
/// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or
/// terminating the instance.
unsafe fn instance_mut(&self) -> &mut Instance;
}
impl VmctxInternal for Vmctx {
fn instance(&self) -> &Instance {
unsafe { instance_from_vmctx(self.vmctx) }
}
unsafe fn instance_mut(&self) -> &mut Instance {
instance_from_vmctx(self.vmctx)
}
}
impl Vmctx {
/// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function.
///
/// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut
/// Vmctx` argument to a `lucet_hostcalls!`-wrapped function.
pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx |
/// Return the underlying `vmctx` pointer.
pub fn as_raw(&self) -> *mut lucet_vmctx {
self.vmctx
}
/// Return the WebAssembly heap as a slice of bytes.
///
/// If the heap is already mutably borrowed by `heap_mut()`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn heap(&self) -> Ref<'_, [u8]> {
unsafe {
self.reconstitute_heap_view_if_needed();
}
let r = self
.heap_view
.try_borrow()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap")));
Ref::map(r, |b| b.borrow())
}
/// Return the WebAssembly heap as a mutable slice of bytes.
///
/// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate
/// with `TerminationDetails::BorrowError`.
pub fn heap_mut(&self) -> RefMut<'_, [u8]> {
unsafe {
self.reconstitute_heap_view_if_needed();
}
let r = self
.heap_view
.try_borrow_mut()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut")));
RefMut::map(r, |b| b.borrow_mut())
}
/// Check whether the heap has grown, and replace the heap view if it has.
///
/// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in
/// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across
/// it.
///
/// TODO: There is still an unsound case, though, when a heap reference is held across a call
/// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as
/// well, causing any outstanding heap references to become invalid. We will address this when
/// we rework the interface for calling back into the guest.
unsafe fn reconstitute_heap_view_if_needed(&self) {
let inst = self.instance_mut();
if inst.heap_mut().len() != self.heap_view.borrow().len() {
let old_heap_view = self
.heap_view
.replace(Box::<[u8]>::from_raw(inst.heap_mut()));
// as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap
// to be dropped
Box::leak(old_heap_view);
}
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.instance().check_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.instance().contains_embed_ctx::<T>()
}
/// Get a reference to a context value of a particular type.
///
/// If a context of that type does not exist, the instance will terminate with
/// `TerminationDetails::CtxNotFound`.
///
/// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> {
match self.instance().embed_ctx.try_get::<T>() {
Some(Ok(t)) => t,
Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")),
None => panic!(TerminationDetails::CtxNotFound),
}
}
/// Get a mutable reference to a context value of a particular type.
///
/// If a context of that type does not exist, the instance will terminate with
/// `TerminationDetails::CtxNotFound`.
///
/// If the context is already borrowed by some other use of `get_embed_ctx` or
/// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`.
pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> {
match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } {
Some(Ok(t)) => t,
Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")),
None => panic!(TerminationDetails::CtxNotFound),
}
}
/// Terminate this guest and return to the host context without unwinding.
///
/// This is almost certainly not what you want to use to terminate an instance from a hostcall,
/// as any resources currently in scope will not be dropped. Instead, use
/// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body.
pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) -> ! {
self.instance_mut().terminate(details)
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
unsafe { self.instance_mut().grow_memory(additional_pages) }
}
/// Return the WebAssembly globals as a slice of `i64`s.
///
/// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate
/// with `TerminationDetails::BorrowError`.
pub fn globals(&self) -> Ref<'_, [GlobalValue]> {
let r = self
.globals_view
.try_borrow()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals")));
Ref::map(r, |b| b.borrow())
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
///
/// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn globals_mut(&self) -> RefMut<'_, [GlobalValue]> {
let r = self
.globals_view
.try_borrow_mut()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut")));
RefMut::map(r, |b| b.borrow_mut())
}
/// Get a function pointer by WebAssembly table and function index.
///
/// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses
/// table indices as its runtime representation of function pointers.
///
/// We do not currently reflect function type information into the Rust type system, so callers
/// of the returned function must take care to cast it to the correct type before calling. The
/// correct type will include the `vmctx` argument, which the caller is responsible for passing
/// from its own context.
///
| {
let inst = instance_from_vmctx(vmctx);
assert!(inst.valid_magic());
let res = Vmctx {
vmctx,
heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())),
globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())),
};
res
} | identifier_body |
vmctx.rs | self
.heap_view
.try_borrow_mut()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut")));
RefMut::map(r, |b| b.borrow_mut())
}
/// Check whether the heap has grown, and replace the heap view if it has.
///
/// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in
/// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across
/// it.
///
/// TODO: There is still an unsound case, though, when a heap reference is held across a call
/// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as
/// well, causing any outstanding heap references to become invalid. We will address this when
/// we rework the interface for calling back into the guest.
unsafe fn reconstitute_heap_view_if_needed(&self) {
let inst = self.instance_mut();
if inst.heap_mut().len() != self.heap_view.borrow().len() {
let old_heap_view = self
.heap_view
.replace(Box::<[u8]>::from_raw(inst.heap_mut()));
// as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap
// to be dropped
Box::leak(old_heap_view);
}
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.instance().check_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.instance().contains_embed_ctx::<T>()
}
/// Get a reference to a context value of a particular type.
///
/// If a context of that type does not exist, the instance will terminate with
/// `TerminationDetails::CtxNotFound`.
///
/// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> {
match self.instance().embed_ctx.try_get::<T>() {
Some(Ok(t)) => t,
Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")),
None => panic!(TerminationDetails::CtxNotFound),
}
}
/// Get a mutable reference to a context value of a particular type.
///
/// If a context of that type does not exist, the instance will terminate with
/// `TerminationDetails::CtxNotFound`.
///
/// If the context is already borrowed by some other use of `get_embed_ctx` or
/// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`.
pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> {
match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } {
Some(Ok(t)) => t,
Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")),
None => panic!(TerminationDetails::CtxNotFound),
}
}
/// Terminate this guest and return to the host context without unwinding.
///
/// This is almost certainly not what you want to use to terminate an instance from a hostcall,
/// as any resources currently in scope will not be dropped. Instead, use
/// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body.
pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) -> ! {
self.instance_mut().terminate(details)
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
unsafe { self.instance_mut().grow_memory(additional_pages) }
}
/// Return the WebAssembly globals as a slice of `i64`s.
///
/// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate
/// with `TerminationDetails::BorrowError`.
pub fn globals(&self) -> Ref<'_, [GlobalValue]> {
let r = self
.globals_view
.try_borrow()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals")));
Ref::map(r, |b| b.borrow())
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
///
/// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn globals_mut(&self) -> RefMut<'_, [GlobalValue]> {
let r = self
.globals_view
.try_borrow_mut()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut")));
RefMut::map(r, |b| b.borrow_mut())
}
/// Get a function pointer by WebAssembly table and function index.
///
/// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses
/// table indices as its runtime representation of function pointers.
///
/// We do not currently reflect function type information into the Rust type system, so callers
/// of the returned function must take care to cast it to the correct type before calling. The
/// correct type will include the `vmctx` argument, which the caller is responsible for passing
/// from its own context.
///
/// ```no_run
/// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate};
/// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx};
///
/// lucet_hostcalls! {
/// #[no_mangle]
/// pub unsafe extern "C" fn hostcall_call_binop(
/// &mut vmctx,
/// binop_table_idx: u32,
/// binop_func_idx: u32,
/// operand1: u32,
/// operand2: u32,
/// ) -> u32 {
/// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) {
/// let typed_binop = std::mem::transmute::<
/// usize,
/// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32
/// >(binop.ptr.as_usize());
/// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) }
/// } else {
/// lucet_hostcall_terminate!("invalid function index")
/// }
/// }
/// }
pub fn get_func_from_idx(
&self,
table_idx: u32,
func_idx: u32,
) -> Result<FunctionHandle, Error> {
self.instance()
.module()
.get_func_from_idx(table_idx, func_idx)
}
}
/// Get an `Instance` from the `vmctx` pointer.
///
/// Only safe to call from within the guest context.
pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance {
assert!(!vmctx.is_null(), "vmctx is not null");
let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance;
// We shouldn't actually need to access the thread local, only the exception handler should
// need to. But, as long as the thread local exists, we should make sure that the guest
// hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest
// cant pull any shenanigans but there have been bugs before.)
CURRENT_INSTANCE.with(|current_instance| {
if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) {
assert_eq!(
inst_ptr, current_inst_ptr,
"vmctx corresponds to current instance"
);
} else {
panic!(
"current instance is not set; thread local storage failure can indicate \
dynamic linking issues"
);
}
});
let inst = inst_ptr.as_mut().unwrap();
assert!(inst.valid_magic());
inst
}
impl Instance {
/// Terminate the guest and swap back to the host context without unwinding.
///
/// This is almost certainly not what you want to use to terminate from a hostcall; use panics
/// with `TerminationDetails` instead.
unsafe fn terminate(&mut self, details: TerminationDetails) -> ! {
self.state = State::Terminated { details };
#[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused | HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) })
} | random_line_split |
|
vmctx.rs | the instance and its `Alloc`.
globals_view: RefCell<Box<[GlobalValue]>>,
}
impl Drop for Vmctx {
fn drop(&mut self) {
let heap_view = self.heap_view.replace(Box::new([]));
let globals_view = self.globals_view.replace(Box::new([]));
// as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap
// and globals to be dropped
Box::leak(heap_view);
Box::leak(globals_view);
}
}
pub trait VmctxInternal {
/// Get a reference to the `Instance` for this guest.
fn instance(&self) -> &Instance;
/// Get a mutable reference to the `Instance` for this guest.
///
/// ### Safety
///
/// Using this method, you could hold on to multiple mutable references to the same
/// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise
/// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or
/// terminating the instance.
unsafe fn instance_mut(&self) -> &mut Instance;
}
impl VmctxInternal for Vmctx {
fn instance(&self) -> &Instance {
unsafe { instance_from_vmctx(self.vmctx) }
}
unsafe fn instance_mut(&self) -> &mut Instance {
instance_from_vmctx(self.vmctx)
}
}
impl Vmctx {
/// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function.
///
/// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut
/// Vmctx` argument to a `lucet_hostcalls!`-wrapped function.
pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx {
let inst = instance_from_vmctx(vmctx);
assert!(inst.valid_magic());
let res = Vmctx {
vmctx,
heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())),
globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())),
};
res
}
/// Return the underlying `vmctx` pointer.
pub fn as_raw(&self) -> *mut lucet_vmctx {
self.vmctx
}
/// Return the WebAssembly heap as a slice of bytes.
///
/// If the heap is already mutably borrowed by `heap_mut()`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn heap(&self) -> Ref<'_, [u8]> {
unsafe {
self.reconstitute_heap_view_if_needed();
}
let r = self
.heap_view
.try_borrow()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap")));
Ref::map(r, |b| b.borrow())
}
/// Return the WebAssembly heap as a mutable slice of bytes.
///
/// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate
/// with `TerminationDetails::BorrowError`.
pub fn heap_mut(&self) -> RefMut<'_, [u8]> {
unsafe {
self.reconstitute_heap_view_if_needed();
}
let r = self
.heap_view
.try_borrow_mut()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut")));
RefMut::map(r, |b| b.borrow_mut())
}
/// Check whether the heap has grown, and replace the heap view if it has.
///
/// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in
/// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across
/// it.
///
/// TODO: There is still an unsound case, though, when a heap reference is held across a call
/// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as
/// well, causing any outstanding heap references to become invalid. We will address this when
/// we rework the interface for calling back into the guest.
unsafe fn reconstitute_heap_view_if_needed(&self) {
let inst = self.instance_mut();
if inst.heap_mut().len() != self.heap_view.borrow().len() {
let old_heap_view = self
.heap_view
.replace(Box::<[u8]>::from_raw(inst.heap_mut()));
// as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap
// to be dropped
Box::leak(old_heap_view);
}
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.instance().check_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.instance().contains_embed_ctx::<T>()
}
/// Get a reference to a context value of a particular type.
///
/// If a context of that type does not exist, the instance will terminate with
/// `TerminationDetails::CtxNotFound`.
///
/// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> {
match self.instance().embed_ctx.try_get::<T>() {
Some(Ok(t)) => t,
Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")),
None => panic!(TerminationDetails::CtxNotFound),
}
}
/// Get a mutable reference to a context value of a particular type.
///
/// If a context of that type does not exist, the instance will terminate with
/// `TerminationDetails::CtxNotFound`.
///
/// If the context is already borrowed by some other use of `get_embed_ctx` or
/// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`.
pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> {
match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } {
Some(Ok(t)) => t,
Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")),
None => panic!(TerminationDetails::CtxNotFound),
}
}
/// Terminate this guest and return to the host context without unwinding.
///
/// This is almost certainly not what you want to use to terminate an instance from a hostcall,
/// as any resources currently in scope will not be dropped. Instead, use
/// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body.
pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) -> ! {
self.instance_mut().terminate(details)
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
unsafe { self.instance_mut().grow_memory(additional_pages) }
}
/// Return the WebAssembly globals as a slice of `i64`s.
///
/// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate
/// with `TerminationDetails::BorrowError`.
pub fn globals(&self) -> Ref<'_, [GlobalValue]> {
let r = self
.globals_view
.try_borrow()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals")));
Ref::map(r, |b| b.borrow())
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
///
/// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will
/// terminate with `TerminationDetails::BorrowError`.
pub fn | (&self) -> RefMut<'_, [GlobalValue]> {
let r = self
.globals_view
.try_borrow_mut()
.unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut")));
RefMut::map(r, |b| b.borrow_mut())
}
/// Get a function pointer by WebAssembly table and function index.
///
/// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses
/// table indices as its runtime representation of function pointers.
///
/// We do not currently reflect function type information into the Rust type system, so callers
/// of the returned function must take care to cast it to the correct type before calling. The
/// correct type will include the `vmctx` argument, which the caller is responsible for passing
/// from its own context.
///
| globals_mut | identifier_name |
tombfix.js | FileProtocolHandler = getService(
'network/protocol;1?name=file',
Ci.nsIFileProtocolHandler
),
{nsILocalFile: ILocalFile} = Ci;
const SCRIPT_FILES = [
// library/third_party
'MochiKit.js',
'twitter-text.js',
// library
'component.js',
'expand.js',
'utility.js',
'tabWatcher.js',
'repository.js',
'models.js',
'Tombfix.Service.js',
'actions.js',
'extractors.js',
'ui.js'
];
// https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties
Cu.importGlobalProperties(['File']);
var getContentDir, Module, ModuleImpl;
// ----[Application]--------------------------------------------
function getScriptFiles(dir) {
var scripts = [];
simpleIterator(dir.directoryEntries, ILocalFile, file => {
if (/\.js$/.test(file.leafName)) {
scripts.push(file);
}
});
return scripts;
}
function getLibraries() {
var libDir, thirdPartyDir, scripts;
libDir = getContentDir();
libDir.append('library');
thirdPartyDir = getContentDir();
thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library');
thirdPartyDir.append('third_party');
scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir));
return SCRIPT_FILES.map(scriptName => {
return scripts.find(file => file.leafName === scriptName);
});
}
function setupEnvironment(env) {
var win = AppShellService.hiddenDOMWindow;
// 変数/定数はhiddenDOMWindowのものを直接使う
[
'navigator', 'document', 'window', 'screen', 'XMLHttpRequest',
'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser',
'XSLTProcessor', 'XMLSerializer', 'URL'
].forEach(propName => {
env[propName] = win[propName];
});
// メソッドはthisが変わるとエラーになることがあるためbindして使う
[
'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open',
'openDialog', 'atob', 'btoa'
].forEach(propName => {
env[propName] = win[propName].bind(win);
});
// モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う
[
'alert', 'confirm', 'prompt'
].forEach(propName => {
env[propName] = forwardToWindow.bind(null, propName);
});
}
function forwardToWindow(propName, ...args) {
var win = WindowMediator.getMostRecentWindow('navigator:browser');
return win[propName].apply(win, args);
}
// ----[Utility]--------------------------------------------
/* jshint ignore:start */ |
function getService(clsName, ifc) {
try {
let cls = Cc['@mozilla.org/' + clsName];
return cls ? (ifc ? cls.getService(ifc) : cls.getService()) : null;
} catch (err) {
return null;
}
}
function loadAllSubScripts() {
/* jshint validthis:true */
// libraryの読み込み
loadSubScripts(getLibraries(), this);
if (!this.getPref('disableAllScripts')) {
// パッチの読み込み
loadSubScripts(getScriptFiles(this.getPatchDir()), this);
}
}
function loadSubScripts(files, global = function () {}) {
var now = Date.now();
for (let file of files) {
// クエリを付加しキャッシュを避ける
ScriptLoader.loadSubScript(
FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now,
global,
'UTF-8'
);
}
}
function simpleIterator(directoryEntries, ifc, func) {
if (typeof ifc === 'string') {
ifc = Ci[ifc];
}
try {
while (directoryEntries.hasMoreElements()) {
let value = directoryEntries.getNext();
func(ifc ? value.QueryInterface(ifc) : value);
}
} catch (err) {}
}
function copy(target, obj, re) {
for (let propName in obj) {
if (!re || re.test(propName)) {
target[propName] = obj[propName];
}
}
return target;
}
function exposeProperties(obj, recursive) {
if (obj == null) {
return;
}
Object.defineProperty(obj, '__exposedProps__', {
value : {},
enumerable : false,
writable : true,
configurable : true
});
for (let propName in obj) {
obj.__exposedProps__[propName] = 'r';
if (recursive && typeof obj[propName] === 'object') {
exposeProperties(obj[propName], true);
}
}
}
getContentDir = (function executeFunc() {
var {AddonManager} = Cu.import(
'resource://gre/modules/AddonManager.jsm',
{}
),
dir = null,
thread;
AddonManager.getAddonByID(EXTENSION_ID, addon => {
var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL)
.file.QueryInterface(ILocalFile);
target.setRelativeDescriptor(target, 'chrome/content');
dir = target;
});
// using id:piro (http://piro.sakura.ne.jp/) method
thread = getService('thread-manager;1').mainThread;
while (dir === null) {
thread.processNextEvent(true);
}
return function getContentDir() {
return dir.clone();
};
}());
Module = {
CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'),
NAME : 'TombfixService',
PID : '@tombfix.github.io/tombfix-service;1',
initialized : false,
onRegister : function onRegister() {
XPCOMUtils.categoryManager.addCategoryEntry(
'content-policy',
this.NAME,
this.PID,
true,
true
);
},
instance : {
// http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl
shouldLoad : function shouldLoad() {
return Ci.nsIContentPolicy.ACCEPT;
},
shouldProcess : function shouldProcess() {
return Ci.nsIContentPolicy.ACCEPT;
},
QueryInterface : function queryInterface(iid) {
if (
iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) ||
iid.equals(Ci.nsISupportsWeakReference)
) {
return this;
}
throw Cr.NS_NOINTERFACE;
}
},
createInstance : function initialize(outer, iid) {
var env, GM_Tombloo, GM_Tombfix;
// nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される
// 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする
if (iid.equals(Ci.nsIContentPolicy)) {
return this.instance;
}
// ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する
// 2個目以降のウィンドウからは生成済みの環境を返す
if (this.initialized) {
return this.instance;
}
// 以降のコードはアプリケーション起動後に一度だけ通過する
env = this.instance;
// アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ
env.loadSubScripts = loadSubScripts;
env.loadAllSubScripts = loadAllSubScripts;
env.getContentDir = getContentDir;
env.getLibraries = getLibraries;
env.PID = this.PID;
env.CID = this.CID;
env.NAME = this.NAME;
// ここでwindowやdocumentなどをenvに持ってくる
setupEnvironment(env);
// MochiKit内部で使用しているinstanceofで異常が発生するのを避ける
env.MochiKit = {};
// for twttr
env.twttr = env.window.twttr = {};
// libraryとパッチを読み込む
env.loadAllSubScripts();
/* ここから他拡張用の処理 */
GM_Tombloo = copy({
Tombloo : {
Service : copy(
{},
env.Tombloo.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|DeferredHash|copyString|notify)/);
GM_Tombfix = copy({
Tombfix : {
Service : copy(
{},
env.Tombfix.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|DeferredHash | function log(msg) {
console[typeof msg === 'object' ? 'dir' : 'log'](msg);
}
/* jshint ignore:end */ | random_line_split |
tombfix.js | FileProtocolHandler = getService(
'network/protocol;1?name=file',
Ci.nsIFileProtocolHandler
),
{nsILocalFile: ILocalFile} = Ci;
const SCRIPT_FILES = [
// library/third_party
'MochiKit.js',
'twitter-text.js',
// library
'component.js',
'expand.js',
'utility.js',
'tabWatcher.js',
'repository.js',
'models.js',
'Tombfix.Service.js',
'actions.js',
'extractors.js',
'ui.js'
];
// https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties
Cu.importGlobalProperties(['File']);
var getContentDir, Module, ModuleImpl;
// ----[Application]--------------------------------------------
function getScriptFiles(dir) {
var scripts = [];
simpleIterator(dir.directoryEntries, ILocalFile, file => {
if (/\.js$/.test(file.leafName)) {
scripts.push(file);
}
});
return scripts;
}
function getLibraries() {
var libDir, thirdPartyDir, scripts;
libDir = getContentDir();
libDir.append('library');
thirdPartyDir = getContentDir();
thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library');
thirdPartyDir.append('third_party');
scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir));
return SCRIPT_FILES.map(scriptName => {
return scripts.find(file => file.leafName === scriptName);
});
}
function setupEnvironment(env) {
var win = AppShellService.hiddenDOMWindow;
// 変数/定数はhiddenDOMWindowのものを直接使う
[
'navigator', 'document', 'window', 'screen', 'XMLHttpRequest',
'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser',
'XSLTProcessor', 'XMLSerializer', 'URL'
].forEach(propName => {
env[propName] = win[propName];
});
// メソッドはthisが変わるとエラーになることがあるためbindして使う
[
'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open',
'openDialog', 'atob', 'btoa'
].forEach(propName => {
env[propName] = win[propName].bind(win);
});
// モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う
[
'alert', 'confirm', 'prompt'
].forEach(propName => {
env[propName] = forwardToWindow.bind(null, propName);
});
}
function forwardToWindow(propName, ...args) {
var win = WindowMediator.getMostRecentWindow('navigator:browser');
return win[propName].apply(win, args);
}
// ----[Utility]--------------------------------------------
/* jshint ignore:start */
function log(msg) {
console[typeof msg === 'object' ? 'dir' : 'log'](msg);
}
/* jshint ignore:end */
function getService(clsName, ifc) {
try {
let cls = Cc['@mozilla.org/' + clsName];
return cls ? (ifc ? cls.getService(ifc) : cls.getService()) : null;
} catch (err) {
return null;
}
}
function loadAllSubScripts() {
/* jshint validthis:true */
// libraryの読み込み
loadSubScripts(getLibraries(), this);
if (!this.getPref | ts')) {
// パッチの読み込み
loadSubScripts(getScriptFiles(this.getPatchDir()), this);
}
}
function loadSubScripts(files, global = function () {}) {
var now = Date.now();
for (let file of files) {
// クエリを付加しキャッシュを避ける
ScriptLoader.loadSubScript(
FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now,
global,
'UTF-8'
);
}
}
function simpleIterator(directoryEntries, ifc, func) {
if (typeof ifc === 'string') {
ifc = Ci[ifc];
}
try {
while (directoryEntries.hasMoreElements()) {
let value = directoryEntries.getNext();
func(ifc ? value.QueryInterface(ifc) : value);
}
} catch (err) {}
}
function copy(target, obj, re) {
for (let propName in obj) {
if (!re || re.test(propName)) {
target[propName] = obj[propName];
}
}
return target;
}
function exposeProperties(obj, recursive) {
if (obj == null) {
return;
}
Object.defineProperty(obj, '__exposedProps__', {
value : {},
enumerable : false,
writable : true,
configurable : true
});
for (let propName in obj) {
obj.__exposedProps__[propName] = 'r';
if (recursive && typeof obj[propName] === 'object') {
exposeProperties(obj[propName], true);
}
}
}
getContentDir = (function executeFunc() {
var {AddonManager} = Cu.import(
'resource://gre/modules/AddonManager.jsm',
{}
),
dir = null,
thread;
AddonManager.getAddonByID(EXTENSION_ID, addon => {
var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL)
.file.QueryInterface(ILocalFile);
target.setRelativeDescriptor(target, 'chrome/content');
dir = target;
});
// using id:piro (http://piro.sakura.ne.jp/) method
thread = getService('thread-manager;1').mainThread;
while (dir === null) {
thread.processNextEvent(true);
}
return function getContentDir() {
return dir.clone();
};
}());
Module = {
CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'),
NAME : 'TombfixService',
PID : '@tombfix.github.io/tombfix-service;1',
initialized : false,
onRegister : function onRegister() {
XPCOMUtils.categoryManager.addCategoryEntry(
'content-policy',
this.NAME,
this.PID,
true,
true
);
},
instance : {
// http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl
shouldLoad : function shouldLoad() {
return Ci.nsIContentPolicy.ACCEPT;
},
shouldProcess : function shouldProcess() {
return Ci.nsIContentPolicy.ACCEPT;
},
QueryInterface : function queryInterface(iid) {
if (
iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) ||
iid.equals(Ci.nsISupportsWeakReference)
) {
return this;
}
throw Cr.NS_NOINTERFACE;
}
},
createInstance : function initialize(outer, iid) {
var env, GM_Tombloo, GM_Tombfix;
// nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される
// 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする
if (iid.equals(Ci.nsIContentPolicy)) {
return this.instance;
}
// ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する
// 2個目以降のウィンドウからは生成済みの環境を返す
if (this.initialized) {
return this.instance;
}
// 以降のコードはアプリケーション起動後に一度だけ通過する
env = this.instance;
// アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ
env.loadSubScripts = loadSubScripts;
env.loadAllSubScripts = loadAllSubScripts;
env.getContentDir = getContentDir;
env.getLibraries = getLibraries;
env.PID = this.PID;
env.CID = this.CID;
env.NAME = this.NAME;
// ここでwindowやdocumentなどをenvに持ってくる
setupEnvironment(env);
// MochiKit内部で使用しているinstanceofで異常が発生するのを避ける
env.MochiKit = {};
// for twttr
env.twttr = env.window.twttr = {};
// libraryとパッチを読み込む
env.loadAllSubScripts();
/* ここから他拡張用の処理 */
GM_Tombloo = copy({
Tombloo : {
Service : copy(
{},
env.Tombloo.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|DeferredHash|copyString|notify)/);
GM_Tombfix = copy({
Tombfix : {
Service : copy(
{},
env.Tombfix.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred| | ('disableAllScrip | identifier_name |
tombfix.js | FileProtocolHandler = getService(
'network/protocol;1?name=file',
Ci.nsIFileProtocolHandler
),
{nsILocalFile: ILocalFile} = Ci;
const SCRIPT_FILES = [
// library/third_party
'MochiKit.js',
'twitter-text.js',
// library
'component.js',
'expand.js',
'utility.js',
'tabWatcher.js',
'repository.js',
'models.js',
'Tombfix.Service.js',
'actions.js',
'extractors.js',
'ui.js'
];
// https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties
Cu.importGlobalProperties(['File']);
var getContentDir, Module, ModuleImpl;
// ----[Application]--------------------------------------------
function getScriptFiles(dir) {
var scripts = [];
simpleIterator(dir.directoryEntries, ILocalFile, file => {
if (/\.js$/.test(file.leafName)) {
scripts.push(file);
}
});
return scripts;
}
function getLibraries() {
var libDir, thirdPartyDir, scripts;
libDir = getContentDir();
libDir.append('library');
thirdPartyDir = getContentDir();
thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library');
thirdPartyDir.append('third_party');
scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir));
return SCRIPT_FILES.map(scriptName => {
return scripts.find(file => file.leafName === scriptName);
});
}
function setupEnvironment(env) {
var win = AppShellService.hiddenDOMWindow;
// 変数/定数はhiddenDOMWindowのものを直接使う
[
'navigator', 'document', 'window', 'screen', 'XMLHttpRequest',
'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser',
'XSLTProcessor', 'XMLSerializer', 'URL'
].forEach(propName => {
env[propName] = win[propName];
});
// メソッドはthisが変わるとエラーになることがあるためbindして使う
[
'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open',
'openDialog', 'atob', 'btoa'
].forEach(propName => {
env[propName] = win[propName].bind(win);
});
// モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う
[
'alert', 'confirm', 'prompt'
].forEach(propName => {
env[propName] = forwardToWindow.bind(null, propName);
});
}
function forwardToWindow(propName, ...args) {
var win = WindowMediator.getMostRecentWindow('navigator:browser');
return win[propName].apply(win, args);
}
// ----[Utility]--------------------------------------------
/* jshint ignore:start */
function log(msg) {
console[typeof msg === 'object' ? 'dir' : 'log'](msg);
}
/* jshint ignore:end */
function getService(clsName, ifc) {
try | ls ? (ifc ? cls.getService(ifc) : cls.getService()) : null;
} catch (err) {
return null;
}
}
function loadAllSubScripts() {
/* jshint validthis:true */
// libraryの読み込み
loadSubScripts(getLibraries(), this);
if (!this.getPref('disableAllScripts')) {
// パッチの読み込み
loadSubScripts(getScriptFiles(this.getPatchDir()), this);
}
}
function loadSubScripts(files, global = function () {}) {
var now = Date.now();
for (let file of files) {
// クエリを付加しキャッシュを避ける
ScriptLoader.loadSubScript(
FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now,
global,
'UTF-8'
);
}
}
function simpleIterator(directoryEntries, ifc, func) {
if (typeof ifc === 'string') {
ifc = Ci[ifc];
}
try {
while (directoryEntries.hasMoreElements()) {
let value = directoryEntries.getNext();
func(ifc ? value.QueryInterface(ifc) : value);
}
} catch (err) {}
}
function copy(target, obj, re) {
for (let propName in obj) {
if (!re || re.test(propName)) {
target[propName] = obj[propName];
}
}
return target;
}
function exposeProperties(obj, recursive) {
if (obj == null) {
return;
}
Object.defineProperty(obj, '__exposedProps__', {
value : {},
enumerable : false,
writable : true,
configurable : true
});
for (let propName in obj) {
obj.__exposedProps__[propName] = 'r';
if (recursive && typeof obj[propName] === 'object') {
exposeProperties(obj[propName], true);
}
}
}
getContentDir = (function executeFunc() {
var {AddonManager} = Cu.import(
'resource://gre/modules/AddonManager.jsm',
{}
),
dir = null,
thread;
AddonManager.getAddonByID(EXTENSION_ID, addon => {
var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL)
.file.QueryInterface(ILocalFile);
target.setRelativeDescriptor(target, 'chrome/content');
dir = target;
});
// using id:piro (http://piro.sakura.ne.jp/) method
thread = getService('thread-manager;1').mainThread;
while (dir === null) {
thread.processNextEvent(true);
}
return function getContentDir() {
return dir.clone();
};
}());
Module = {
CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'),
NAME : 'TombfixService',
PID : '@tombfix.github.io/tombfix-service;1',
initialized : false,
onRegister : function onRegister() {
XPCOMUtils.categoryManager.addCategoryEntry(
'content-policy',
this.NAME,
this.PID,
true,
true
);
},
instance : {
// http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl
shouldLoad : function shouldLoad() {
return Ci.nsIContentPolicy.ACCEPT;
},
shouldProcess : function shouldProcess() {
return Ci.nsIContentPolicy.ACCEPT;
},
QueryInterface : function queryInterface(iid) {
if (
iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) ||
iid.equals(Ci.nsISupportsWeakReference)
) {
return this;
}
throw Cr.NS_NOINTERFACE;
}
},
createInstance : function initialize(outer, iid) {
var env, GM_Tombloo, GM_Tombfix;
// nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される
// 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする
if (iid.equals(Ci.nsIContentPolicy)) {
return this.instance;
}
// ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する
// 2個目以降のウィンドウからは生成済みの環境を返す
if (this.initialized) {
return this.instance;
}
// 以降のコードはアプリケーション起動後に一度だけ通過する
env = this.instance;
// アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ
env.loadSubScripts = loadSubScripts;
env.loadAllSubScripts = loadAllSubScripts;
env.getContentDir = getContentDir;
env.getLibraries = getLibraries;
env.PID = this.PID;
env.CID = this.CID;
env.NAME = this.NAME;
// ここでwindowやdocumentなどをenvに持ってくる
setupEnvironment(env);
// MochiKit内部で使用しているinstanceofで異常が発生するのを避ける
env.MochiKit = {};
// for twttr
env.twttr = env.window.twttr = {};
// libraryとパッチを読み込む
env.loadAllSubScripts();
/* ここから他拡張用の処理 */
GM_Tombloo = copy({
Tombloo : {
Service : copy(
{},
env.Tombloo.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|DeferredHash|copyString|notify)/);
GM_Tombfix = copy({
Tombfix : {
Service : copy(
{},
env.Tombfix.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|Deferred | {
let cls = Cc['@mozilla.org/' + clsName];
return c | identifier_body |
tombfix.js | FileProtocolHandler = getService(
'network/protocol;1?name=file',
Ci.nsIFileProtocolHandler
),
{nsILocalFile: ILocalFile} = Ci;
const SCRIPT_FILES = [
// library/third_party
'MochiKit.js',
'twitter-text.js',
// library
'component.js',
'expand.js',
'utility.js',
'tabWatcher.js',
'repository.js',
'models.js',
'Tombfix.Service.js',
'actions.js',
'extractors.js',
'ui.js'
];
// https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties
Cu.importGlobalProperties(['File']);
var getContentDir, Module, ModuleImpl;
// ----[Application]--------------------------------------------
function getScriptFiles(dir) {
var scripts = [];
simpleIterator(dir.directoryEntries, ILocalFile, file => {
if (/\.js$/.test(file.leafName)) |
});
return scripts;
}
function getLibraries() {
var libDir, thirdPartyDir, scripts;
libDir = getContentDir();
libDir.append('library');
thirdPartyDir = getContentDir();
thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library');
thirdPartyDir.append('third_party');
scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir));
return SCRIPT_FILES.map(scriptName => {
return scripts.find(file => file.leafName === scriptName);
});
}
function setupEnvironment(env) {
var win = AppShellService.hiddenDOMWindow;
// 変数/定数はhiddenDOMWindowのものを直接使う
[
'navigator', 'document', 'window', 'screen', 'XMLHttpRequest',
'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser',
'XSLTProcessor', 'XMLSerializer', 'URL'
].forEach(propName => {
env[propName] = win[propName];
});
// メソッドはthisが変わるとエラーになることがあるためbindして使う
[
'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open',
'openDialog', 'atob', 'btoa'
].forEach(propName => {
env[propName] = win[propName].bind(win);
});
// モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う
[
'alert', 'confirm', 'prompt'
].forEach(propName => {
env[propName] = forwardToWindow.bind(null, propName);
});
}
function forwardToWindow(propName, ...args) {
var win = WindowMediator.getMostRecentWindow('navigator:browser');
return win[propName].apply(win, args);
}
// ----[Utility]--------------------------------------------
/* jshint ignore:start */
function log(msg) {
console[typeof msg === 'object' ? 'dir' : 'log'](msg);
}
/* jshint ignore:end */
function getService(clsName, ifc) {
try {
let cls = Cc['@mozilla.org/' + clsName];
return cls ? (ifc ? cls.getService(ifc) : cls.getService()) : null;
} catch (err) {
return null;
}
}
function loadAllSubScripts() {
/* jshint validthis:true */
// libraryの読み込み
loadSubScripts(getLibraries(), this);
if (!this.getPref('disableAllScripts')) {
// パッチの読み込み
loadSubScripts(getScriptFiles(this.getPatchDir()), this);
}
}
function loadSubScripts(files, global = function () {}) {
var now = Date.now();
for (let file of files) {
// クエリを付加しキャッシュを避ける
ScriptLoader.loadSubScript(
FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now,
global,
'UTF-8'
);
}
}
function simpleIterator(directoryEntries, ifc, func) {
if (typeof ifc === 'string') {
ifc = Ci[ifc];
}
try {
while (directoryEntries.hasMoreElements()) {
let value = directoryEntries.getNext();
func(ifc ? value.QueryInterface(ifc) : value);
}
} catch (err) {}
}
function copy(target, obj, re) {
for (let propName in obj) {
if (!re || re.test(propName)) {
target[propName] = obj[propName];
}
}
return target;
}
function exposeProperties(obj, recursive) {
if (obj == null) {
return;
}
Object.defineProperty(obj, '__exposedProps__', {
value : {},
enumerable : false,
writable : true,
configurable : true
});
for (let propName in obj) {
obj.__exposedProps__[propName] = 'r';
if (recursive && typeof obj[propName] === 'object') {
exposeProperties(obj[propName], true);
}
}
}
getContentDir = (function executeFunc() {
var {AddonManager} = Cu.import(
'resource://gre/modules/AddonManager.jsm',
{}
),
dir = null,
thread;
AddonManager.getAddonByID(EXTENSION_ID, addon => {
var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL)
.file.QueryInterface(ILocalFile);
target.setRelativeDescriptor(target, 'chrome/content');
dir = target;
});
// using id:piro (http://piro.sakura.ne.jp/) method
thread = getService('thread-manager;1').mainThread;
while (dir === null) {
thread.processNextEvent(true);
}
return function getContentDir() {
return dir.clone();
};
}());
Module = {
CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'),
NAME : 'TombfixService',
PID : '@tombfix.github.io/tombfix-service;1',
initialized : false,
onRegister : function onRegister() {
XPCOMUtils.categoryManager.addCategoryEntry(
'content-policy',
this.NAME,
this.PID,
true,
true
);
},
instance : {
// http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl
shouldLoad : function shouldLoad() {
return Ci.nsIContentPolicy.ACCEPT;
},
shouldProcess : function shouldProcess() {
return Ci.nsIContentPolicy.ACCEPT;
},
QueryInterface : function queryInterface(iid) {
if (
iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) ||
iid.equals(Ci.nsISupportsWeakReference)
) {
return this;
}
throw Cr.NS_NOINTERFACE;
}
},
createInstance : function initialize(outer, iid) {
var env, GM_Tombloo, GM_Tombfix;
// nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される
// 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする
if (iid.equals(Ci.nsIContentPolicy)) {
return this.instance;
}
// ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する
// 2個目以降のウィンドウからは生成済みの環境を返す
if (this.initialized) {
return this.instance;
}
// 以降のコードはアプリケーション起動後に一度だけ通過する
env = this.instance;
// アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ
env.loadSubScripts = loadSubScripts;
env.loadAllSubScripts = loadAllSubScripts;
env.getContentDir = getContentDir;
env.getLibraries = getLibraries;
env.PID = this.PID;
env.CID = this.CID;
env.NAME = this.NAME;
// ここでwindowやdocumentなどをenvに持ってくる
setupEnvironment(env);
// MochiKit内部で使用しているinstanceofで異常が発生するのを避ける
env.MochiKit = {};
// for twttr
env.twttr = env.window.twttr = {};
// libraryとパッチを読み込む
env.loadAllSubScripts();
/* ここから他拡張用の処理 */
GM_Tombloo = copy({
Tombloo : {
Service : copy(
{},
env.Tombloo.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|DeferredHash|copyString|notify)/);
GM_Tombfix = copy({
Tombfix : {
Service : copy(
{},
env.Tombfix.Service,
/(check|share|posters|extractors)/
),
},
}, env, /(Deferred|Deferred | {
scripts.push(file);
} | conditional_block |
game.py | .
import pygame
import os
import sys
# Local imports.
import components
import drawing
import ecs
import input_handling
import physics
import resource
import systems
import utils
class SpaceGameServices(ecs.GameServices):
""" The services exposed to the entities. This is separate from
the game class itself to try and keep control of the interface - since
this is basically global state you can get at from anywhere. """
def __init__(self, game):
|
def get_renderer(self):
return self.game.renderer
def get_entity_manager(self):
""" Return the entity manager. """
return self.game.entity_manager
def get_resource_loader(self):
""" Get the resource loader. """
return self.game.resource_loader
def get_info(self):
""" Return the information. """
return self.info
def end_game(self):
""" Stop the game from running. """
self.game.stop_running()
def get_debug_level(self):
""" Return the debug level. """
return self.debug_level
def load(self):
""" Load the game. """
self.game.load()
def save(self):
""" Save the game. """
self.game.save()
def toggle_pause(self):
""" Pause the game. """
self.game.toggle_pause()
def step(self):
""" Simulate one frame and then pause. """
self.game.step()
class Game(object):
""" Class glueing all of the building blocks together into an actual
game. """
def __init__(self):
""" Initialise the game systems. """
# Change directory into the directory above this file - the
# one containng the 'res' tree. Note that if we've been built via
# py2exe, we will actually be in a zip file so account for that.
path = os.path.dirname(os.path.dirname(__file__))
if (os.path.basename(path) == "library.zip"):
path = os.path.dirname(path)
os.chdir( path )
sys.path += ["."]
# Services exposed to the entities.
self.game_services = SpaceGameServices(self)
# The resource loader.
self.resource_loader = resource.ResourceLoader()
# The configuration.
if os.path.isfile("./config.txt"):
self.config = self.resource_loader.load_config_file_from("./config.txt")
else:
self.config = self.resource_loader.load_config_file("base_config.txt")
# Create the renderer.
renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer")
renderer_class = utils.lookup_type(renderer_name)
screen_size = (self.config.get_or_default("screen_width", 1024),
self.config.get_or_default("screen_height", 768))
self.renderer = renderer_class(screen_size, self.config, data_path="./res")
# The resource loaded needs a renderer to load images etc.
self.resource_loader.set_renderer(self.renderer)
# The input handling system.
self.input_handling = None
# The enemy.
self.wave_spawner = None
# Create the entity manager.
self.entity_manager = ecs.EntityManager(self.game_services)
# Configure the resource loader.
self.resource_loader.set_minimise_image_loading(
self.config.get_or_default("minimise_image_loading", False)
)
# The drawing visitor.
self.drawing = drawing.Drawing(self.game_services)
# Is the game running?
self.running = False
# Should we load the game?
self.want_load = False
# Should we pause the game?
self.want_pause = False
# Should we unpause the game?
self.want_resume = False
# Should we simulate one frame and then pause?
self.want_step = False
def stop_running(self):
""" Stop the game from running. """
self.running = False
def run(self):
""" The game loop. This performs initialisation including setting
up pygame, and shows a loading screen while certain resources are
preloaded. Then, we enter the game loop wherein we remain until the
game is over. """
# Initialise the pygame display.
pygame.init()
pygame.mixer.init()
self.renderer.initialise()
# Create the game systems.
self.entity_manager.register_component_system(physics.Physics())
self.entity_manager.register_component_system(systems.FollowsTrackedSystem())
self.entity_manager.register_component_system(systems.TrackingSystem())
self.entity_manager.register_component_system(systems.LaunchesFightersSystem())
self.entity_manager.register_component_system(systems.KillOnTimerSystem())
self.entity_manager.register_component_system(systems.PowerSystem())
self.entity_manager.register_component_system(systems.ShieldSystem())
self.entity_manager.register_component_system(systems.TextSystem())
self.entity_manager.register_component_system(systems.AnimSystem())
self.entity_manager.register_component_system(systems.ThrusterSystem())
self.entity_manager.register_component_system(systems.ThrustersSystem())
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
self.entity_manager.register_component_system(systems.CameraSystem())
self.entity_manager.register_component_system(systems.TurretSystem())
self.entity_manager.register_component_system(systems.TurretsSystem())
self.entity_manager.register_component_system(systems.WeaponSystem())
# Preload certain images.
self.resource_loader.preload()
# Make the camera.
camera = self.entity_manager.create_entity_with(components.Camera,
components.Body,
components.Tracking,
components.FollowsTracked)
camera.get_component(components.FollowsTracked).follow_type = "instant"
# Draw debug info if requested.
self.game_services.debug_level = self.config.get_or_default("debug", 0)
# Make the player
player = self.entity_manager.create_entity("player.txt")
camera.get_component(components.Tracking).tracked.entity = player
# Create a view to pass to the input handling - this lets it map between
# world and screen coordinates.
view = drawing.CameraView(self.renderer, camera)
# Make the input handling system.
self.input_handling = input_handling.InputHandling(view, self.game_services)
# Create the wave spawner.
if not self.config.get_or_default("peaceful_mode", False):
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
# Make it so that bullets can damage things.
self.entity_manager.get_system(physics.Physics).add_collision_handler(
DamageCollisionHandler()
)
# Set the scrolling background.
self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg")
# Run the game loop.
self.running = True
fps = 60
clock = pygame.time.Clock()
tick_time = 1.0/fps
while self.running:
# Has a load been requested?
if self.want_load:
self.entity_manager.load(open("space_game.save", "r"))
self.want_load = False
## Create any queued objects
self.entity_manager.create_queued_objects()
# If a pause has been scheduled then pause the game.
if self.want_pause:
self.want_pause = False
self.entity_manager.pause()
# If an unpause has been scheduled then unpause the game.
if self.want_resume:
self.want_resume = False
self.entity_manager.unpause()
# If a step has been scheduled then advance a frame and schedule a
# pause.
if self.want_step:
self.entity_manager.unpause()
self.want_pause = True
self.want_step = False
# Input
for e in pygame.event.get():
response = self.input_handling.handle_input(e)
if response.quit_requested:
self.running = False
# Update the systems.
self.entity_manager.update(tick_time)
# Draw
self.renderer.pre_render(view)
self.drawing.draw(view)
self.renderer.post_render()
self.renderer.flip_buffers()
# Maintain frame rate.
clock.tick(fps)
# Remember how long the frame took.
limited_fps = 1.0/(clock.get_time() / 1000.0)
raw_fps = 1.0/(clock.get_rawtime() / 1000.0)
time_ratio = (1.0/fps) / (clock.get_time()/1000.0)
self.game_services.info.update_framerate(limited_fps,
raw_fps,
time_ratio)
# Finalise
pygame.quit()
def load(self):
""" Schedule a load. """
self.want_load = True
def save(self):
""" Save the game. """
self.entity_manager.save(open("space_game.save", "w"))
def toggle_pause(self):
""" Schedule a pause. """
if self.entity_manager.paused():
self.want_resume = True
else:
self.want_pause = True
def step(self):
""" Schedule a step. """
self.want_step = True
class DamageCollisionHandler(physics.CollisionHandler):
""" Collision handler to apply bullet damage. """
def __init__(self):
""" Constructor. """
# Match entities that cause damage on contact to entities that can be
# damaged.
physics.CollisionHandler.__init__(
self,
components.DamageOnContact,
components.Hitpoints
)
def handle_matching_collision(self, dmg, hp):
""" Apply the logical effect of the collision and return the result. """
# Delegate to the function in 'systems'.
systems.handle | self.game = game
self.info = ecs.GameInfo()
self.debug_level = 0 | identifier_body |
game.py | .
import pygame
import os
import sys
# Local imports.
import components
import drawing
import ecs
import input_handling
import physics
import resource
import systems
import utils
class SpaceGameServices(ecs.GameServices):
""" The services exposed to the entities. This is separate from
the game class itself to try and keep control of the interface - since
this is basically global state you can get at from anywhere. """
def __init__(self, game):
self.game = game
self.info = ecs.GameInfo()
self.debug_level = 0
def get_renderer(self):
return self.game.renderer
def get_entity_manager(self):
""" Return the entity manager. """
return self.game.entity_manager
def get_resource_loader(self):
""" Get the resource loader. """
return self.game.resource_loader
def get_info(self):
""" Return the information. """
return self.info
def end_game(self):
""" Stop the game from running. """
self.game.stop_running()
def get_debug_level(self):
""" Return the debug level. """
return self.debug_level
def load(self):
""" Load the game. """
self.game.load()
def save(self):
""" Save the game. """
self.game.save()
def toggle_pause(self):
""" Pause the game. """
self.game.toggle_pause()
def step(self):
""" Simulate one frame and then pause. """
self.game.step()
class Game(object):
""" Class glueing all of the building blocks together into an actual
game. """
def __init__(self):
""" Initialise the game systems. """
# Change directory into the directory above this file - the
# one containng the 'res' tree. Note that if we've been built via
# py2exe, we will actually be in a zip file so account for that.
path = os.path.dirname(os.path.dirname(__file__))
if (os.path.basename(path) == "library.zip"):
path = os.path.dirname(path)
os.chdir( path )
sys.path += ["."]
# Services exposed to the entities.
self.game_services = SpaceGameServices(self)
# The resource loader.
self.resource_loader = resource.ResourceLoader()
# The configuration.
if os.path.isfile("./config.txt"):
self.config = self.resource_loader.load_config_file_from("./config.txt")
else:
self.config = self.resource_loader.load_config_file("base_config.txt")
# Create the renderer.
renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer")
renderer_class = utils.lookup_type(renderer_name)
screen_size = (self.config.get_or_default("screen_width", 1024),
self.config.get_or_default("screen_height", 768))
self.renderer = renderer_class(screen_size, self.config, data_path="./res")
# The resource loaded needs a renderer to load images etc.
self.resource_loader.set_renderer(self.renderer)
# The input handling system.
self.input_handling = None
# The enemy.
self.wave_spawner = None
# Create the entity manager.
self.entity_manager = ecs.EntityManager(self.game_services)
# Configure the resource loader.
self.resource_loader.set_minimise_image_loading(
self.config.get_or_default("minimise_image_loading", False)
)
# The drawing visitor.
self.drawing = drawing.Drawing(self.game_services)
# Is the game running?
self.running = False
# Should we load the game?
self.want_load = False
# Should we pause the game?
self.want_pause = False
# Should we unpause the game?
self.want_resume = False
# Should we simulate one frame and then pause?
self.want_step = False
def stop_running(self):
""" Stop the game from running. """
self.running = False
def run(self):
""" The game loop. This performs initialisation including setting
up pygame, and shows a loading screen while certain resources are
preloaded. Then, we enter the game loop wherein we remain until the
game is over. """
# Initialise the pygame display.
pygame.init()
pygame.mixer.init()
self.renderer.initialise()
# Create the game systems.
self.entity_manager.register_component_system(physics.Physics())
self.entity_manager.register_component_system(systems.FollowsTrackedSystem())
self.entity_manager.register_component_system(systems.TrackingSystem())
self.entity_manager.register_component_system(systems.LaunchesFightersSystem())
self.entity_manager.register_component_system(systems.KillOnTimerSystem())
self.entity_manager.register_component_system(systems.PowerSystem())
self.entity_manager.register_component_system(systems.ShieldSystem())
self.entity_manager.register_component_system(systems.TextSystem())
self.entity_manager.register_component_system(systems.AnimSystem())
self.entity_manager.register_component_system(systems.ThrusterSystem())
self.entity_manager.register_component_system(systems.ThrustersSystem())
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
self.entity_manager.register_component_system(systems.CameraSystem())
self.entity_manager.register_component_system(systems.TurretSystem())
self.entity_manager.register_component_system(systems.TurretsSystem())
self.entity_manager.register_component_system(systems.WeaponSystem())
# Preload certain images.
self.resource_loader.preload()
# Make the camera.
camera = self.entity_manager.create_entity_with(components.Camera,
components.Body,
components.Tracking,
components.FollowsTracked)
camera.get_component(components.FollowsTracked).follow_type = "instant"
# Draw debug info if requested.
self.game_services.debug_level = self.config.get_or_default("debug", 0)
# Make the player
player = self.entity_manager.create_entity("player.txt")
camera.get_component(components.Tracking).tracked.entity = player
# Create a view to pass to the input handling - this lets it map between
# world and screen coordinates.
view = drawing.CameraView(self.renderer, camera)
# Make the input handling system.
self.input_handling = input_handling.InputHandling(view, self.game_services)
# Create the wave spawner.
if not self.config.get_or_default("peaceful_mode", False):
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
# Make it so that bullets can damage things.
self.entity_manager.get_system(physics.Physics).add_collision_handler(
DamageCollisionHandler()
)
# Set the scrolling background.
self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg")
# Run the game loop.
self.running = True
fps = 60
clock = pygame.time.Clock()
tick_time = 1.0/fps
while self.running:
# Has a load been requested?
if self.want_load:
self.entity_manager.load(open("space_game.save", "r"))
self.want_load = False
## Create any queued objects
self.entity_manager.create_queued_objects()
# If a pause has been scheduled then pause the game.
if self.want_pause:
self.want_pause = False
self.entity_manager.pause()
# If an unpause has been scheduled then unpause the game.
if self.want_resume:
self.want_resume = False
self.entity_manager.unpause()
# If a step has been scheduled then advance a frame and schedule a
# pause.
if self.want_step:
|
# Input
for e in pygame.event.get():
response = self.input_handling.handle_input(e)
if response.quit_requested:
self.running = False
# Update the systems.
self.entity_manager.update(tick_time)
# Draw
self.renderer.pre_render(view)
self.drawing.draw(view)
self.renderer.post_render()
self.renderer.flip_buffers()
# Maintain frame rate.
clock.tick(fps)
# Remember how long the frame took.
limited_fps = 1.0/(clock.get_time() / 1000.0)
raw_fps = 1.0/(clock.get_rawtime() / 1000.0)
time_ratio = (1.0/fps) / (clock.get_time()/1000.0)
self.game_services.info.update_framerate(limited_fps,
raw_fps,
time_ratio)
# Finalise
pygame.quit()
def load(self):
""" Schedule a load. """
self.want_load = True
def save(self):
""" Save the game. """
self.entity_manager.save(open("space_game.save", "w"))
def toggle_pause(self):
""" Schedule a pause. """
if self.entity_manager.paused():
self.want_resume = True
else:
self.want_pause = True
def step(self):
""" Schedule a step. """
self.want_step = True
class DamageCollisionHandler(physics.CollisionHandler):
""" Collision handler to apply bullet damage. """
def __init__(self):
""" Constructor. """
# Match entities that cause damage on contact to entities that can be
# damaged.
physics.CollisionHandler.__init__(
self,
components.DamageOnContact,
components.Hitpoints
)
def handle_matching_collision(self, dmg, hp):
""" Apply the logical effect of the collision and return the result. """
# Delegate to the function in 'systems'.
systems.handle | self.entity_manager.unpause()
self.want_pause = True
self.want_step = False | conditional_block |
game.py | .
import pygame
import os
import sys
# Local imports.
import components
import drawing
import ecs
import input_handling
import physics
import resource
import systems
import utils
class SpaceGameServices(ecs.GameServices):
""" The services exposed to the entities. This is separate from
the game class itself to try and keep control of the interface - since
this is basically global state you can get at from anywhere. """
def __init__(self, game):
self.game = game
self.info = ecs.GameInfo()
self.debug_level = 0
def get_renderer(self):
return self.game.renderer
def get_entity_manager(self):
""" Return the entity manager. """
return self.game.entity_manager
def get_resource_loader(self):
""" Get the resource loader. """
return self.game.resource_loader
def get_info(self):
""" Return the information. """
return self.info
def end_game(self):
""" Stop the game from running. """
self.game.stop_running()
def get_debug_level(self):
""" Return the debug level. """
return self.debug_level
def load(self):
""" Load the game. """
self.game.load()
def save(self):
""" Save the game. """
self.game.save()
def toggle_pause(self):
""" Pause the game. """
self.game.toggle_pause()
def step(self):
""" Simulate one frame and then pause. """
self.game.step()
class Game(object):
""" Class glueing all of the building blocks together into an actual
game. """
def __init__(self):
""" Initialise the game systems. """
# Change directory into the directory above this file - the
# one containng the 'res' tree. Note that if we've been built via
# py2exe, we will actually be in a zip file so account for that.
path = os.path.dirname(os.path.dirname(__file__))
if (os.path.basename(path) == "library.zip"):
path = os.path.dirname(path)
os.chdir( path )
sys.path += ["."]
# Services exposed to the entities.
self.game_services = SpaceGameServices(self)
# The resource loader.
self.resource_loader = resource.ResourceLoader()
# The configuration.
if os.path.isfile("./config.txt"):
self.config = self.resource_loader.load_config_file_from("./config.txt")
else:
self.config = self.resource_loader.load_config_file("base_config.txt")
# Create the renderer.
renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer")
renderer_class = utils.lookup_type(renderer_name)
screen_size = (self.config.get_or_default("screen_width", 1024),
self.config.get_or_default("screen_height", 768))
self.renderer = renderer_class(screen_size, self.config, data_path="./res")
# The resource loaded needs a renderer to load images etc.
self.resource_loader.set_renderer(self.renderer)
# The input handling system.
self.input_handling = None
# The enemy.
self.wave_spawner = None
# Create the entity manager.
self.entity_manager = ecs.EntityManager(self.game_services)
# Configure the resource loader.
self.resource_loader.set_minimise_image_loading(
self.config.get_or_default("minimise_image_loading", False)
)
# The drawing visitor.
self.drawing = drawing.Drawing(self.game_services)
# Is the game running?
self.running = False
# Should we load the game?
self.want_load = False
# Should we pause the game?
self.want_pause = False
# Should we unpause the game?
self.want_resume = False
# Should we simulate one frame and then pause?
self.want_step = False
def stop_running(self):
""" Stop the game from running. """
self.running = False
def run(self):
""" The game loop. This performs initialisation including setting
up pygame, and shows a loading screen while certain resources are
preloaded. Then, we enter the game loop wherein we remain until the
game is over. """
# Initialise the pygame display.
pygame.init()
pygame.mixer.init()
self.renderer.initialise()
# Create the game systems.
self.entity_manager.register_component_system(physics.Physics())
self.entity_manager.register_component_system(systems.FollowsTrackedSystem())
self.entity_manager.register_component_system(systems.TrackingSystem())
self.entity_manager.register_component_system(systems.LaunchesFightersSystem())
self.entity_manager.register_component_system(systems.KillOnTimerSystem())
self.entity_manager.register_component_system(systems.PowerSystem())
self.entity_manager.register_component_system(systems.ShieldSystem())
self.entity_manager.register_component_system(systems.TextSystem())
self.entity_manager.register_component_system(systems.AnimSystem())
self.entity_manager.register_component_system(systems.ThrusterSystem())
self.entity_manager.register_component_system(systems.ThrustersSystem())
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
self.entity_manager.register_component_system(systems.CameraSystem())
self.entity_manager.register_component_system(systems.TurretSystem())
self.entity_manager.register_component_system(systems.TurretsSystem())
self.entity_manager.register_component_system(systems.WeaponSystem())
# Preload certain images.
self.resource_loader.preload()
# Make the camera.
camera = self.entity_manager.create_entity_with(components.Camera,
components.Body,
components.Tracking,
components.FollowsTracked)
camera.get_component(components.FollowsTracked).follow_type = "instant"
# Draw debug info if requested.
self.game_services.debug_level = self.config.get_or_default("debug", 0)
# Make the player
player = self.entity_manager.create_entity("player.txt")
camera.get_component(components.Tracking).tracked.entity = player
# Create a view to pass to the input handling - this lets it map between
# world and screen coordinates.
view = drawing.CameraView(self.renderer, camera)
# Make the input handling system.
self.input_handling = input_handling.InputHandling(view, self.game_services)
# Create the wave spawner.
if not self.config.get_or_default("peaceful_mode", False):
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
# Make it so that bullets can damage things.
self.entity_manager.get_system(physics.Physics).add_collision_handler(
DamageCollisionHandler()
)
# Set the scrolling background.
self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg")
# Run the game loop.
self.running = True
fps = 60
clock = pygame.time.Clock()
tick_time = 1.0/fps
while self.running:
# Has a load been requested?
if self.want_load:
self.entity_manager.load(open("space_game.save", "r"))
self.want_load = False
## Create any queued objects
self.entity_manager.create_queued_objects()
# If a pause has been scheduled then pause the game.
if self.want_pause:
self.want_pause = False
self.entity_manager.pause()
# If an unpause has been scheduled then unpause the game.
if self.want_resume:
self.want_resume = False
self.entity_manager.unpause()
# If a step has been scheduled then advance a frame and schedule a
# pause.
if self.want_step:
self.entity_manager.unpause()
self.want_pause = True
self.want_step = False
# Input
for e in pygame.event.get():
response = self.input_handling.handle_input(e)
if response.quit_requested:
self.running = False
# Update the systems.
self.entity_manager.update(tick_time)
# Draw
self.renderer.pre_render(view)
self.drawing.draw(view)
self.renderer.post_render()
self.renderer.flip_buffers() | limited_fps = 1.0/(clock.get_time() / 1000.0)
raw_fps = 1.0/(clock.get_rawtime() / 1000.0)
time_ratio = (1.0/fps) / (clock.get_time()/1000.0)
self.game_services.info.update_framerate(limited_fps,
raw_fps,
time_ratio)
# Finalise
pygame.quit()
def load(self):
""" Schedule a load. """
self.want_load = True
def save(self):
""" Save the game. """
self.entity_manager.save(open("space_game.save", "w"))
def toggle_pause(self):
""" Schedule a pause. """
if self.entity_manager.paused():
self.want_resume = True
else:
self.want_pause = True
def step(self):
""" Schedule a step. """
self.want_step = True
class DamageCollisionHandler(physics.CollisionHandler):
""" Collision handler to apply bullet damage. """
def __init__(self):
""" Constructor. """
# Match entities that cause damage on contact to entities that can be
# damaged.
physics.CollisionHandler.__init__(
self,
components.DamageOnContact,
components.Hitpoints
)
def handle_matching_collision(self, dmg, hp):
""" Apply the logical effect of the collision and return the result. """
# Delegate to the function in 'systems'.
systems |
# Maintain frame rate.
clock.tick(fps)
# Remember how long the frame took. | random_line_split |
game.py | .
import pygame
import os
import sys
# Local imports.
import components
import drawing
import ecs
import input_handling
import physics
import resource
import systems
import utils
class SpaceGameServices(ecs.GameServices):
""" The services exposed to the entities. This is separate from
the game class itself to try and keep control of the interface - since
this is basically global state you can get at from anywhere. """
def __init__(self, game):
self.game = game
self.info = ecs.GameInfo()
self.debug_level = 0
def get_renderer(self):
return self.game.renderer
def | (self):
""" Return the entity manager. """
return self.game.entity_manager
def get_resource_loader(self):
""" Get the resource loader. """
return self.game.resource_loader
def get_info(self):
""" Return the information. """
return self.info
def end_game(self):
""" Stop the game from running. """
self.game.stop_running()
def get_debug_level(self):
""" Return the debug level. """
return self.debug_level
def load(self):
""" Load the game. """
self.game.load()
def save(self):
""" Save the game. """
self.game.save()
def toggle_pause(self):
""" Pause the game. """
self.game.toggle_pause()
def step(self):
""" Simulate one frame and then pause. """
self.game.step()
class Game(object):
""" Class glueing all of the building blocks together into an actual
game. """
def __init__(self):
""" Initialise the game systems. """
# Change directory into the directory above this file - the
# one containng the 'res' tree. Note that if we've been built via
# py2exe, we will actually be in a zip file so account for that.
path = os.path.dirname(os.path.dirname(__file__))
if (os.path.basename(path) == "library.zip"):
path = os.path.dirname(path)
os.chdir( path )
sys.path += ["."]
# Services exposed to the entities.
self.game_services = SpaceGameServices(self)
# The resource loader.
self.resource_loader = resource.ResourceLoader()
# The configuration.
if os.path.isfile("./config.txt"):
self.config = self.resource_loader.load_config_file_from("./config.txt")
else:
self.config = self.resource_loader.load_config_file("base_config.txt")
# Create the renderer.
renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer")
renderer_class = utils.lookup_type(renderer_name)
screen_size = (self.config.get_or_default("screen_width", 1024),
self.config.get_or_default("screen_height", 768))
self.renderer = renderer_class(screen_size, self.config, data_path="./res")
# The resource loaded needs a renderer to load images etc.
self.resource_loader.set_renderer(self.renderer)
# The input handling system.
self.input_handling = None
# The enemy.
self.wave_spawner = None
# Create the entity manager.
self.entity_manager = ecs.EntityManager(self.game_services)
# Configure the resource loader.
self.resource_loader.set_minimise_image_loading(
self.config.get_or_default("minimise_image_loading", False)
)
# The drawing visitor.
self.drawing = drawing.Drawing(self.game_services)
# Is the game running?
self.running = False
# Should we load the game?
self.want_load = False
# Should we pause the game?
self.want_pause = False
# Should we unpause the game?
self.want_resume = False
# Should we simulate one frame and then pause?
self.want_step = False
def stop_running(self):
""" Stop the game from running. """
self.running = False
def run(self):
""" The game loop. This performs initialisation including setting
up pygame, and shows a loading screen while certain resources are
preloaded. Then, we enter the game loop wherein we remain until the
game is over. """
# Initialise the pygame display.
pygame.init()
pygame.mixer.init()
self.renderer.initialise()
# Create the game systems.
self.entity_manager.register_component_system(physics.Physics())
self.entity_manager.register_component_system(systems.FollowsTrackedSystem())
self.entity_manager.register_component_system(systems.TrackingSystem())
self.entity_manager.register_component_system(systems.LaunchesFightersSystem())
self.entity_manager.register_component_system(systems.KillOnTimerSystem())
self.entity_manager.register_component_system(systems.PowerSystem())
self.entity_manager.register_component_system(systems.ShieldSystem())
self.entity_manager.register_component_system(systems.TextSystem())
self.entity_manager.register_component_system(systems.AnimSystem())
self.entity_manager.register_component_system(systems.ThrusterSystem())
self.entity_manager.register_component_system(systems.ThrustersSystem())
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
self.entity_manager.register_component_system(systems.CameraSystem())
self.entity_manager.register_component_system(systems.TurretSystem())
self.entity_manager.register_component_system(systems.TurretsSystem())
self.entity_manager.register_component_system(systems.WeaponSystem())
# Preload certain images.
self.resource_loader.preload()
# Make the camera.
camera = self.entity_manager.create_entity_with(components.Camera,
components.Body,
components.Tracking,
components.FollowsTracked)
camera.get_component(components.FollowsTracked).follow_type = "instant"
# Draw debug info if requested.
self.game_services.debug_level = self.config.get_or_default("debug", 0)
# Make the player
player = self.entity_manager.create_entity("player.txt")
camera.get_component(components.Tracking).tracked.entity = player
# Create a view to pass to the input handling - this lets it map between
# world and screen coordinates.
view = drawing.CameraView(self.renderer, camera)
# Make the input handling system.
self.input_handling = input_handling.InputHandling(view, self.game_services)
# Create the wave spawner.
if not self.config.get_or_default("peaceful_mode", False):
self.entity_manager.register_component_system(systems.WaveSpawnerSystem())
# Make it so that bullets can damage things.
self.entity_manager.get_system(physics.Physics).add_collision_handler(
DamageCollisionHandler()
)
# Set the scrolling background.
self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg")
# Run the game loop.
self.running = True
fps = 60
clock = pygame.time.Clock()
tick_time = 1.0/fps
while self.running:
# Has a load been requested?
if self.want_load:
self.entity_manager.load(open("space_game.save", "r"))
self.want_load = False
## Create any queued objects
self.entity_manager.create_queued_objects()
# If a pause has been scheduled then pause the game.
if self.want_pause:
self.want_pause = False
self.entity_manager.pause()
# If an unpause has been scheduled then unpause the game.
if self.want_resume:
self.want_resume = False
self.entity_manager.unpause()
# If a step has been scheduled then advance a frame and schedule a
# pause.
if self.want_step:
self.entity_manager.unpause()
self.want_pause = True
self.want_step = False
# Input
for e in pygame.event.get():
response = self.input_handling.handle_input(e)
if response.quit_requested:
self.running = False
# Update the systems.
self.entity_manager.update(tick_time)
# Draw
self.renderer.pre_render(view)
self.drawing.draw(view)
self.renderer.post_render()
self.renderer.flip_buffers()
# Maintain frame rate.
clock.tick(fps)
# Remember how long the frame took.
limited_fps = 1.0/(clock.get_time() / 1000.0)
raw_fps = 1.0/(clock.get_rawtime() / 1000.0)
time_ratio = (1.0/fps) / (clock.get_time()/1000.0)
self.game_services.info.update_framerate(limited_fps,
raw_fps,
time_ratio)
# Finalise
pygame.quit()
def load(self):
""" Schedule a load. """
self.want_load = True
def save(self):
""" Save the game. """
self.entity_manager.save(open("space_game.save", "w"))
def toggle_pause(self):
""" Schedule a pause. """
if self.entity_manager.paused():
self.want_resume = True
else:
self.want_pause = True
def step(self):
""" Schedule a step. """
self.want_step = True
class DamageCollisionHandler(physics.CollisionHandler):
""" Collision handler to apply bullet damage. """
def __init__(self):
""" Constructor. """
# Match entities that cause damage on contact to entities that can be
# damaged.
physics.CollisionHandler.__init__(
self,
components.DamageOnContact,
components.Hitpoints
)
def handle_matching_collision(self, dmg, hp):
""" Apply the logical effect of the collision and return the result. """
# Delegate to the function in 'systems'.
systems | get_entity_manager | identifier_name |
hwdetect.rs | crate::common::format::human_size;
use crate::common::parser::{consume_all, p_u32, NomResult};
pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> {
read_linux_numa()
.map(|numa_nodes| {
let filtered = filter_masked_cpus(numa_nodes.clone());
if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() {
log::info!(
"Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.",
numa_nodes
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>(),
filtered
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>()
);
}
filtered
})
.and_then(|groups| {
ResourceDescriptorKind::groups_numeric(groups)
.map_err(|_| anyhow!("Inconsistent CPU naming got from detection"))
})
.or_else(|e| {
log::debug!("Detecting linux failed: {}", e);
let n_cpus = num_cpus::get() as u32;
if n_cpus < 1 {
anyhow::bail!("Cpu detection failed");
};
Ok(ResourceDescriptorKind::simple_indices(n_cpus))
})
}
/// Filter cores that are not allowed because of CPU affinity mask.
fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> {
match core_affinity::get_core_ids() {
Some(allowed) => {
let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect();
numa_nodes
.into_iter()
.map(|mut numa_node| {
numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize()));
numa_node
})
.collect()
}
None => {
log::error!("Failed to found CPU mask. Allowing all cores.");
numa_nodes
}
}
}
pub fn prune_hyper_threading(
kind: &ResourceDescriptorKind,
) -> anyhow::Result<ResourceDescriptorKind> {
let groups = kind.as_groups();
let mut new_desc = Vec::new();
for group in groups {
let mut new_group = Vec::new();
for cpu_id in group {
if read_linux_thread_siblings(&cpu_id)?
.iter()
.min()
.ok_or_else(|| anyhow::anyhow!("Thread siblings are empty"))
.map(|v| *v == cpu_id)?
{
new_group.push(cpu_id);
}
}
new_desc.push(new_group);
}
Ok(ResourceDescriptorKind::groups(new_desc).unwrap())
}
/// Detects additional resources (apart from CPU) on this worker.
/// Also returns the detected GPU families.
pub fn detect_additional_resources(
items: &mut Vec<ResourceDescriptorItem>,
) -> anyhow::Result<Set<GpuFamily>> {
let mut gpu_families = Set::new();
let has_resource =
|items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name);
let detected_gpus = detect_gpus_from_env();
if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) {
if let Ok(count) = read_nvidia_linux_gpu_count() {
if count > 0 {
gpu_families.insert(GpuFamily::Nvidia);
log::info!("Detected {} GPUs from procs", count);
items.push(ResourceDescriptorItem {
name: NVIDIA_GPU_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::simple_indices(count as u32),
});
}
}
} else {
for gpu in detected_gpus {
if !has_resource(items, gpu.resource_name) {
gpu_families.insert(gpu.family);
items.push(ResourceDescriptorItem {
name: gpu.resource_name.to_string(),
kind: gpu.resource,
});
}
}
}
if !has_resource(items, MEM_RESOURCE_NAME) {
if let Ok(mem) = read_linux_memory() {
log::info!("Detected {mem}B of memory ({})", human_size(mem));
items.push(ResourceDescriptorItem {
name: MEM_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::Sum { size: mem },
});
}
}
Ok(gpu_families)
}
/// GPU resource that can be detected from an environment variable.
pub struct GpuEnvironmentRecord {
env_var: &'static str,
pub resource_name: &'static str,
pub family: GpuFamily,
}
impl GpuEnvironmentRecord {
const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self {
Self {
env_var,
resource_name,
family,
}
}
}
pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[
GpuEnvironmentRecord::new(
"CUDA_VISIBLE_DEVICES",
NVIDIA_GPU_RESOURCE_NAME,
GpuFamily::Nvidia,
),
GpuEnvironmentRecord::new(
"ROCR_VISIBLE_DEVICES",
AMD_GPU_RESOURCE_NAME,
GpuFamily::Amd,
),
];
struct DetectedGpu {
resource_name: &'static str,
resource: ResourceDescriptorKind,
family: GpuFamily,
}
/// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables.
fn detect_gpus_from_env() -> Vec<DetectedGpu> {
let mut gpus = Vec::new();
for gpu_env in GPU_ENVIRONMENTS {
if let Ok(devices_str) = std::env::var(gpu_env.env_var) | }
}
}
gpus
}
/// Try to find out how many Nvidia GPUs are available on the current node.
fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> {
Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count())
}
/// Try to get total memory on the current node.
fn read_linux_memory() -> anyhow::Result<u64> {
Ok(psutil::memory::virtual_memory()?.total())
}
/// Try to find the CPU NUMA configuration.
///
/// Returns a list of NUMA nodes, each node contains a list of assigned CPUs.
fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> {
let nodes = parse_range(&std::fs::read_to_string(
"/sys/devices/system/node/possible",
)?)?;
let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new();
for numa_index in nodes {
let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist");
numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?);
}
log::debug!("Linux numa detection is successful");
Ok(numa_nodes)
}
fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> {
let filename = format!(
"/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list",
cpu_id
);
log::debug!("Reading {}", filename);
parse_range(&std::fs::read_to_string(filename)?)
.map(|indices| indices.into_iter().map(|i| i.to_string()).collect())
}
fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> {
map_res(
tuple((
terminated(p_u32, space0),
opt(terminated(
preceded(tuple((tag("-"), space0)), p_u32),
space0,
)),
)),
|(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()),
)
.parse(input)
}
fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> {
separated_list1(terminated(tag(","), space0), p_cpu_range)(input)
.map(|(a, b)| (a, b.into_iter().flatten().collect()))
}
fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> {
let parser = terminated(p_cpu_ranges, opt(newline));
consume_all(parser, input)
}
fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> {
let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| {
items.into_iter().collect::<String>()
});
consume_all(separated_list1(tag(","), any_except_comma), input)
}
#[cfg(test)]
mod tests {
use tako::AsIdVec;
use super::{parse_range, read_linux_numa};
#[test]
fn test_parse_range() {
assert_eq | {
if let Ok(devices) = parse_comma_separated_values(&devices_str) {
log::info!(
"Detected GPUs {} from `{}`",
format_comma_delimited(&devices),
gpu_env.env_var,
);
if !has_unique_elements(&devices) {
log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var);
continue;
}
let list =
ResourceDescriptorKind::list(devices).expect("List values were not unique");
gpus.push(DetectedGpu {
resource_name: gpu_env.resource_name,
resource: list,
family: gpu_env.family,
}); | conditional_block |
hwdetect.rs | crate::common::format::human_size;
use crate::common::parser::{consume_all, p_u32, NomResult};
pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> {
read_linux_numa()
.map(|numa_nodes| {
let filtered = filter_masked_cpus(numa_nodes.clone());
if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() {
log::info!(
"Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.",
numa_nodes
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>(),
filtered
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>()
);
}
filtered
})
.and_then(|groups| {
ResourceDescriptorKind::groups_numeric(groups)
.map_err(|_| anyhow!("Inconsistent CPU naming got from detection"))
})
.or_else(|e| {
log::debug!("Detecting linux failed: {}", e);
let n_cpus = num_cpus::get() as u32;
if n_cpus < 1 {
anyhow::bail!("Cpu detection failed");
};
Ok(ResourceDescriptorKind::simple_indices(n_cpus))
})
}
/// Filter cores that are not allowed because of CPU affinity mask.
fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> {
match core_affinity::get_core_ids() {
Some(allowed) => {
let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect();
numa_nodes
.into_iter()
.map(|mut numa_node| {
numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize()));
numa_node
})
.collect()
}
None => {
log::error!("Failed to found CPU mask. Allowing all cores.");
numa_nodes
}
}
}
pub fn prune_hyper_threading(
kind: &ResourceDescriptorKind,
) -> anyhow::Result<ResourceDescriptorKind> {
let groups = kind.as_groups();
let mut new_desc = Vec::new();
for group in groups {
let mut new_group = Vec::new();
for cpu_id in group {
if read_linux_thread_siblings(&cpu_id)?
.iter()
.min()
.ok_or_else(|| anyhow::anyhow!("Thread siblings are empty"))
.map(|v| *v == cpu_id)?
{
new_group.push(cpu_id);
}
}
new_desc.push(new_group);
}
Ok(ResourceDescriptorKind::groups(new_desc).unwrap())
}
/// Detects additional resources (apart from CPU) on this worker.
/// Also returns the detected GPU families.
pub fn detect_additional_resources(
items: &mut Vec<ResourceDescriptorItem>,
) -> anyhow::Result<Set<GpuFamily>> | gpu_families.insert(gpu.family);
items.push(ResourceDescriptorItem {
name: gpu.resource_name.to_string(),
kind: gpu.resource,
});
}
}
}
if !has_resource(items, MEM_RESOURCE_NAME) {
if let Ok(mem) = read_linux_memory() {
log::info!("Detected {mem}B of memory ({})", human_size(mem));
items.push(ResourceDescriptorItem {
name: MEM_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::Sum { size: mem },
});
}
}
Ok(gpu_families)
}
/// GPU resource that can be detected from an environment variable.
pub struct GpuEnvironmentRecord {
env_var: &'static str,
pub resource_name: &'static str,
pub family: GpuFamily,
}
impl GpuEnvironmentRecord {
const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self {
Self {
env_var,
resource_name,
family,
}
}
}
pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[
GpuEnvironmentRecord::new(
"CUDA_VISIBLE_DEVICES",
NVIDIA_GPU_RESOURCE_NAME,
GpuFamily::Nvidia,
),
GpuEnvironmentRecord::new(
"ROCR_VISIBLE_DEVICES",
AMD_GPU_RESOURCE_NAME,
GpuFamily::Amd,
),
];
struct DetectedGpu {
resource_name: &'static str,
resource: ResourceDescriptorKind,
family: GpuFamily,
}
/// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables.
fn detect_gpus_from_env() -> Vec<DetectedGpu> {
let mut gpus = Vec::new();
for gpu_env in GPU_ENVIRONMENTS {
if let Ok(devices_str) = std::env::var(gpu_env.env_var) {
if let Ok(devices) = parse_comma_separated_values(&devices_str) {
log::info!(
"Detected GPUs {} from `{}`",
format_comma_delimited(&devices),
gpu_env.env_var,
);
if !has_unique_elements(&devices) {
log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var);
continue;
}
let list =
ResourceDescriptorKind::list(devices).expect("List values were not unique");
gpus.push(DetectedGpu {
resource_name: gpu_env.resource_name,
resource: list,
family: gpu_env.family,
});
}
}
}
gpus
}
/// Try to find out how many Nvidia GPUs are available on the current node.
fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> {
Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count())
}
/// Try to get total memory on the current node.
fn read_linux_memory() -> anyhow::Result<u64> {
Ok(psutil::memory::virtual_memory()?.total())
}
/// Try to find the CPU NUMA configuration.
///
/// Returns a list of NUMA nodes, each node contains a list of assigned CPUs.
fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> {
let nodes = parse_range(&std::fs::read_to_string(
"/sys/devices/system/node/possible",
)?)?;
let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new();
for numa_index in nodes {
let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist");
numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?);
}
log::debug!("Linux numa detection is successful");
Ok(numa_nodes)
}
fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> {
let filename = format!(
"/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list",
cpu_id
);
log::debug!("Reading {}", filename);
parse_range(&std::fs::read_to_string(filename)?)
.map(|indices| indices.into_iter().map(|i| i.to_string()).collect())
}
fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> {
map_res(
tuple((
terminated(p_u32, space0),
opt(terminated(
preceded(tuple((tag("-"), space0)), p_u32),
space0,
)),
)),
|(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()),
)
.parse(input)
}
fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> {
separated_list1(terminated(tag(","), space0), p_cpu_range)(input)
.map(|(a, b)| (a, b.into_iter().flatten().collect()))
}
fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> {
let parser = terminated(p_cpu_ranges, opt(newline));
consume_all(parser, input)
}
fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> {
let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| {
items.into_iter().collect::<String>()
});
consume_all(separated_list1(tag(","), any_except_comma), input)
}
#[cfg(test)]
mod tests {
use tako::AsIdVec;
use super::{parse_range, read_linux_numa};
#[test]
fn test_parse_range() {
assert_eq | {
let mut gpu_families = Set::new();
let has_resource =
|items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name);
let detected_gpus = detect_gpus_from_env();
if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) {
if let Ok(count) = read_nvidia_linux_gpu_count() {
if count > 0 {
gpu_families.insert(GpuFamily::Nvidia);
log::info!("Detected {} GPUs from procs", count);
items.push(ResourceDescriptorItem {
name: NVIDIA_GPU_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::simple_indices(count as u32),
});
}
}
} else {
for gpu in detected_gpus {
if !has_resource(items, gpu.resource_name) { | identifier_body |
hwdetect.rs | use nom::sequence::{preceded, terminated, tuple};
use nom::Parser;
use nom_supreme::tag::complete::tag;
use tako::hwstats::GpuFamily;
use tako::internal::has_unique_elements;
use tako::resources::{
ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel,
AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME,
};
use tako::{format_comma_delimited, Set};
use crate::common::format::human_size;
use crate::common::parser::{consume_all, p_u32, NomResult};
pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> {
read_linux_numa()
.map(|numa_nodes| {
let filtered = filter_masked_cpus(numa_nodes.clone());
if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() {
log::info!(
"Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.",
numa_nodes
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>(),
filtered
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>()
);
}
filtered
})
.and_then(|groups| {
ResourceDescriptorKind::groups_numeric(groups)
.map_err(|_| anyhow!("Inconsistent CPU naming got from detection"))
})
.or_else(|e| {
log::debug!("Detecting linux failed: {}", e);
let n_cpus = num_cpus::get() as u32;
if n_cpus < 1 {
anyhow::bail!("Cpu detection failed");
};
Ok(ResourceDescriptorKind::simple_indices(n_cpus))
})
}
/// Filter cores that are not allowed because of CPU affinity mask.
fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> {
match core_affinity::get_core_ids() {
Some(allowed) => {
let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect();
numa_nodes
.into_iter()
.map(|mut numa_node| {
numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize()));
numa_node
})
.collect()
}
None => {
log::error!("Failed to found CPU mask. Allowing all cores.");
numa_nodes
}
}
}
pub fn prune_hyper_threading(
kind: &ResourceDescriptorKind,
) -> anyhow::Result<ResourceDescriptorKind> {
let groups = kind.as_groups();
let mut new_desc = Vec::new();
for group in groups {
let mut new_group = Vec::new();
for cpu_id in group {
if read_linux_thread_siblings(&cpu_id)?
.iter()
.min()
.ok_or_else(|| anyhow::anyhow!("Thread siblings are empty"))
.map(|v| *v == cpu_id)?
{
new_group.push(cpu_id);
}
}
new_desc.push(new_group);
}
Ok(ResourceDescriptorKind::groups(new_desc).unwrap())
}
/// Detects additional resources (apart from CPU) on this worker.
/// Also returns the detected GPU families.
pub fn detect_additional_resources(
items: &mut Vec<ResourceDescriptorItem>,
) -> anyhow::Result<Set<GpuFamily>> {
let mut gpu_families = Set::new();
let has_resource =
|items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name);
let detected_gpus = detect_gpus_from_env();
if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) {
if let Ok(count) = read_nvidia_linux_gpu_count() {
if count > 0 {
gpu_families.insert(GpuFamily::Nvidia);
log::info!("Detected {} GPUs from procs", count);
items.push(ResourceDescriptorItem {
name: NVIDIA_GPU_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::simple_indices(count as u32),
});
}
}
} else {
for gpu in detected_gpus {
if !has_resource(items, gpu.resource_name) {
gpu_families.insert(gpu.family);
items.push(ResourceDescriptorItem {
name: gpu.resource_name.to_string(),
kind: gpu.resource,
});
}
}
}
if !has_resource(items, MEM_RESOURCE_NAME) {
if let Ok(mem) = read_linux_memory() {
log::info!("Detected {mem}B of memory ({})", human_size(mem));
items.push(ResourceDescriptorItem {
name: MEM_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::Sum { size: mem },
});
}
}
Ok(gpu_families)
}
/// GPU resource that can be detected from an environment variable.
pub struct GpuEnvironmentRecord {
env_var: &'static str,
pub resource_name: &'static str,
pub family: GpuFamily,
}
impl GpuEnvironmentRecord {
const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self {
Self {
env_var,
resource_name,
family,
}
}
}
pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[
GpuEnvironmentRecord::new(
"CUDA_VISIBLE_DEVICES",
NVIDIA_GPU_RESOURCE_NAME,
GpuFamily::Nvidia,
),
GpuEnvironmentRecord::new(
"ROCR_VISIBLE_DEVICES",
AMD_GPU_RESOURCE_NAME,
GpuFamily::Amd,
),
];
struct DetectedGpu {
resource_name: &'static str,
resource: ResourceDescriptorKind,
family: GpuFamily,
}
/// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables.
fn detect_gpus_from_env() -> Vec<DetectedGpu> {
let mut gpus = Vec::new();
for gpu_env in GPU_ENVIRONMENTS {
if let Ok(devices_str) = std::env::var(gpu_env.env_var) {
if let Ok(devices) = parse_comma_separated_values(&devices_str) {
log::info!(
"Detected GPUs {} from `{}`",
format_comma_delimited(&devices),
gpu_env.env_var,
);
if !has_unique_elements(&devices) {
log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var);
continue;
}
let list =
ResourceDescriptorKind::list(devices).expect("List values were not unique");
gpus.push(DetectedGpu {
resource_name: gpu_env.resource_name,
resource: list,
family: gpu_env.family,
});
}
}
}
gpus
}
/// Try to find out how many Nvidia GPUs are available on the current node.
fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> {
Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count())
}
/// Try to get total memory on the current node.
fn read_linux_memory() -> anyhow::Result<u64> {
Ok(psutil::memory::virtual_memory()?.total())
}
/// Try to find the CPU NUMA configuration.
///
/// Returns a list of NUMA nodes, each node contains a list of assigned CPUs.
fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> {
let nodes = parse_range(&std::fs::read_to_string(
"/sys/devices/system/node/possible",
)?)?;
let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new();
for numa_index in nodes {
let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist");
numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?);
}
log::debug!("Linux numa detection is successful");
Ok(numa_nodes)
}
fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> {
let filename = format!(
"/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list",
cpu_id
);
log::debug!("Reading {}", filename);
parse_range(&std::fs::read_to_string(filename)?)
.map(|indices| indices.into_iter().map(|i| i.to_string()).collect())
}
fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> {
map_res(
tuple((
terminated(p_u32, space0),
opt(terminated(
preceded(tuple((tag("-"), space0)), p_u32),
space0,
)),
)),
|(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()),
)
.parse(input)
}
fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> {
separated_list1(terminated(tag(","), space0), p_cpu_range)(input)
.map(|(a, b)| (a, b.into_iter().flatten().collect()))
}
fn parse_range(input: &str) -> anyhow::Result<Vec | use anyhow::anyhow;
use nom::character::complete::{newline, satisfy, space0};
use nom::combinator::{map, map_res, opt};
use nom::multi::{many1, separated_list1}; | random_line_split |
|
hwdetect.rs | crate::common::format::human_size;
use crate::common::parser::{consume_all, p_u32, NomResult};
pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> {
read_linux_numa()
.map(|numa_nodes| {
let filtered = filter_masked_cpus(numa_nodes.clone());
if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() {
log::info!(
"Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.",
numa_nodes
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>(),
filtered
.iter()
.map(|c| format_comma_delimited(c.iter().map(|c| c.as_num())))
.collect::<Vec<_>>()
);
}
filtered
})
.and_then(|groups| {
ResourceDescriptorKind::groups_numeric(groups)
.map_err(|_| anyhow!("Inconsistent CPU naming got from detection"))
})
.or_else(|e| {
log::debug!("Detecting linux failed: {}", e);
let n_cpus = num_cpus::get() as u32;
if n_cpus < 1 {
anyhow::bail!("Cpu detection failed");
};
Ok(ResourceDescriptorKind::simple_indices(n_cpus))
})
}
/// Filter cores that are not allowed because of CPU affinity mask.
fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> {
match core_affinity::get_core_ids() {
Some(allowed) => {
let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect();
numa_nodes
.into_iter()
.map(|mut numa_node| {
numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize()));
numa_node
})
.collect()
}
None => {
log::error!("Failed to found CPU mask. Allowing all cores.");
numa_nodes
}
}
}
pub fn prune_hyper_threading(
kind: &ResourceDescriptorKind,
) -> anyhow::Result<ResourceDescriptorKind> {
let groups = kind.as_groups();
let mut new_desc = Vec::new();
for group in groups {
let mut new_group = Vec::new();
for cpu_id in group {
if read_linux_thread_siblings(&cpu_id)?
.iter()
.min()
.ok_or_else(|| anyhow::anyhow!("Thread siblings are empty"))
.map(|v| *v == cpu_id)?
{
new_group.push(cpu_id);
}
}
new_desc.push(new_group);
}
Ok(ResourceDescriptorKind::groups(new_desc).unwrap())
}
/// Detects additional resources (apart from CPU) on this worker.
/// Also returns the detected GPU families.
pub fn detect_additional_resources(
items: &mut Vec<ResourceDescriptorItem>,
) -> anyhow::Result<Set<GpuFamily>> {
let mut gpu_families = Set::new();
let has_resource =
|items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name);
let detected_gpus = detect_gpus_from_env();
if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) {
if let Ok(count) = read_nvidia_linux_gpu_count() {
if count > 0 {
gpu_families.insert(GpuFamily::Nvidia);
log::info!("Detected {} GPUs from procs", count);
items.push(ResourceDescriptorItem {
name: NVIDIA_GPU_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::simple_indices(count as u32),
});
}
}
} else {
for gpu in detected_gpus {
if !has_resource(items, gpu.resource_name) {
gpu_families.insert(gpu.family);
items.push(ResourceDescriptorItem {
name: gpu.resource_name.to_string(),
kind: gpu.resource,
});
}
}
}
if !has_resource(items, MEM_RESOURCE_NAME) {
if let Ok(mem) = read_linux_memory() {
log::info!("Detected {mem}B of memory ({})", human_size(mem));
items.push(ResourceDescriptorItem {
name: MEM_RESOURCE_NAME.to_string(),
kind: ResourceDescriptorKind::Sum { size: mem },
});
}
}
Ok(gpu_families)
}
/// GPU resource that can be detected from an environment variable.
pub struct GpuEnvironmentRecord {
env_var: &'static str,
pub resource_name: &'static str,
pub family: GpuFamily,
}
impl GpuEnvironmentRecord {
const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self {
Self {
env_var,
resource_name,
family,
}
}
}
pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[
GpuEnvironmentRecord::new(
"CUDA_VISIBLE_DEVICES",
NVIDIA_GPU_RESOURCE_NAME,
GpuFamily::Nvidia,
),
GpuEnvironmentRecord::new(
"ROCR_VISIBLE_DEVICES",
AMD_GPU_RESOURCE_NAME,
GpuFamily::Amd,
),
];
struct DetectedGpu {
resource_name: &'static str,
resource: ResourceDescriptorKind,
family: GpuFamily,
}
/// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables.
fn detect_gpus_from_env() -> Vec<DetectedGpu> {
let mut gpus = Vec::new();
for gpu_env in GPU_ENVIRONMENTS {
if let Ok(devices_str) = std::env::var(gpu_env.env_var) {
if let Ok(devices) = parse_comma_separated_values(&devices_str) {
log::info!(
"Detected GPUs {} from `{}`",
format_comma_delimited(&devices),
gpu_env.env_var,
);
if !has_unique_elements(&devices) {
log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var);
continue;
}
let list =
ResourceDescriptorKind::list(devices).expect("List values were not unique");
gpus.push(DetectedGpu {
resource_name: gpu_env.resource_name,
resource: list,
family: gpu_env.family,
});
}
}
}
gpus
}
/// Try to find out how many Nvidia GPUs are available on the current node.
fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> {
Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count())
}
/// Try to get total memory on the current node.
fn | () -> anyhow::Result<u64> {
Ok(psutil::memory::virtual_memory()?.total())
}
/// Try to find the CPU NUMA configuration.
///
/// Returns a list of NUMA nodes, each node contains a list of assigned CPUs.
fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> {
let nodes = parse_range(&std::fs::read_to_string(
"/sys/devices/system/node/possible",
)?)?;
let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new();
for numa_index in nodes {
let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist");
numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?);
}
log::debug!("Linux numa detection is successful");
Ok(numa_nodes)
}
fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> {
let filename = format!(
"/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list",
cpu_id
);
log::debug!("Reading {}", filename);
parse_range(&std::fs::read_to_string(filename)?)
.map(|indices| indices.into_iter().map(|i| i.to_string()).collect())
}
fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> {
map_res(
tuple((
terminated(p_u32, space0),
opt(terminated(
preceded(tuple((tag("-"), space0)), p_u32),
space0,
)),
)),
|(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()),
)
.parse(input)
}
fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> {
separated_list1(terminated(tag(","), space0), p_cpu_range)(input)
.map(|(a, b)| (a, b.into_iter().flatten().collect()))
}
fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> {
let parser = terminated(p_cpu_ranges, opt(newline));
consume_all(parser, input)
}
fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> {
let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| {
items.into_iter().collect::<String>()
});
consume_all(separated_list1(tag(","), any_except_comma), input)
}
#[cfg(test)]
mod tests {
use tako::AsIdVec;
use super::{parse_range, read_linux_numa};
#[test]
fn test_parse_range() {
assert_eq | read_linux_memory | identifier_name |
server.rs | .
anti_replay: AntiReplay,
/// A function for determining if 0-RTT can be accepted.
zero_rtt_checker: ServerZeroRttChecker,
/// A connection ID generator.
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
/// Connection parameters.
conn_params: ConnectionParameters,
/// Active connection attempts, keyed by `AttemptKey`. Initial packets with
/// the same key are routed to the connection that was first accepted.
/// This is cleared out when the connection is closed or established.
active_attempts: HashMap<AttemptKey, StateRef>,
/// All connections, keyed by ConnectionId.
connections: ConnectionTableRef,
/// The connections that have new events.
active: HashSet<ActiveConnectionRef>,
/// The set of connections that need immediate processing.
waiting: VecDeque<StateRef>,
/// Outstanding timers for connections.
timers: Timer<StateRef>,
/// Address validation logic, which determines whether we send a Retry.
address_validation: Rc<RefCell<AddressValidation>>,
/// Directory to create qlog traces in
qlog_dir: Option<PathBuf>,
/// Encrypted client hello (ECH) configuration.
ech_config: Option<EchConfig>,
}
impl Server {
/// Construct a new server.
/// * `now` is the time that the server is instantiated.
/// * `certs` is a list of the certificates that should be configured.
/// * `protocols` is the preference list of ALPN values.
/// * `anti_replay` is an anti-replay context.
/// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This
/// will be passed the value of the `extra` argument that was passed to
/// `Connection::send_ticket` to see if it is OK.
/// * `cid_generator` is responsible for generating connection IDs and parsing them;
/// connection IDs produced by the manager cannot be zero-length.
pub fn new(
now: Instant,
certs: &[impl AsRef<str>],
protocols: &[impl AsRef<str>],
anti_replay: AntiReplay,
zero_rtt_checker: Box<dyn ZeroRttChecker>,
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
conn_params: ConnectionParameters,
) -> Res<Self> {
let validation = AddressValidation::new(now, ValidateAddress::Never)?;
Ok(Self {
certs: certs.iter().map(|x| String::from(x.as_ref())).collect(),
protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(),
ciphers: Vec::new(),
anti_replay,
zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker),
cid_generator,
conn_params,
active_attempts: HashMap::default(),
connections: Rc::default(),
active: HashSet::default(),
waiting: VecDeque::default(),
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
address_validation: Rc::new(RefCell::new(validation)),
qlog_dir: None,
ech_config: None,
})
}
/// Set or clear directory to create logs of connection events in QLOG format.
pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) {
self.qlog_dir = dir;
}
/// Set the policy for address validation.
pub fn set_validation(&mut self, v: ValidateAddress) {
self.address_validation.borrow_mut().set_validation(v);
}
/// Set the cipher suites that should be used. Set an empty value to use
/// default values.
pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) {
self.ciphers = Vec::from(ciphers.as_ref());
}
pub fn enable_ech(
&mut self,
config: u8,
public_name: &str,
sk: &PrivateKey,
pk: &PublicKey,
) -> Res<()> {
self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?);
Ok(())
}
pub fn ech_config(&self) -> &[u8] {
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
}
fn remove_timer(&mut self, c: &StateRef) {
let last = c.borrow().last_timer;
self.timers.remove(last, |t| Rc::ptr_eq(t, c));
}
fn process_connection(
&mut self,
c: StateRef,
dgram: Option<Datagram>,
now: Instant,
) -> Option<Datagram> {
qtrace!([self], "Process connection {:?}", c);
let out = c.borrow_mut().process(dgram, now);
match out {
Output::Datagram(_) => {
qtrace!([self], "Sending packet, added to waiting connections");
self.waiting.push_back(Rc::clone(&c));
}
Output::Callback(delay) => {
let next = now + delay;
if next != c.borrow().last_timer {
qtrace!([self], "Change timer to {:?}", next);
self.remove_timer(&c);
c.borrow_mut().last_timer = next;
self.timers.add(next, Rc::clone(&c));
}
}
_ => |
}
if c.borrow().has_events() {
qtrace!([self], "Connection active: {:?}", c);
self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) });
}
if *c.borrow().state() > State::Handshaking {
// Remove any active connection attempt now that this is no longer handshaking.
if let Some(k) = c.borrow_mut().active_attempt.take() {
self.active_attempts.remove(&k);
}
}
if matches!(c.borrow().state(), State::Closed(_)) {
c.borrow_mut().set_qlog(NeqoQlog::disabled());
self.connections
.borrow_mut()
.retain(|_, v| !Rc::ptr_eq(v, &c));
}
out.dgram()
}
fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> {
self.connections.borrow().get(&cid[..]).map(Rc::clone)
}
fn handle_initial(
&mut self,
initial: InitialDetails,
dgram: Datagram,
now: Instant,
) -> Option<Datagram> {
qdebug!([self], "Handle initial");
let res = self
.address_validation
.borrow()
.validate(&initial.token, dgram.source(), now);
match res {
AddressValidationResult::Invalid => None,
AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now),
AddressValidationResult::ValidRetry(orig_dcid) => {
self.connection_attempt(initial, dgram, Some(orig_dcid), now)
}
AddressValidationResult::Validate => {
qinfo!([self], "Send retry for {:?}", initial.dst_cid);
let res = self.address_validation.borrow().generate_retry_token(
&initial.dst_cid,
dgram.source(),
now,
);
let token = if let Ok(t) = res {
t
} else {
qerror!([self], "unable to generate token, dropping packet");
return None;
};
if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() {
let packet = PacketBuilder::retry(
initial.version,
&initial.src_cid,
&new_dcid,
&token,
&initial.dst_cid,
);
if let Ok(p) = packet {
let retry = Datagram::new(dgram.destination(), dgram.source(), p);
Some(retry)
} else {
qerror!([self], "unable to encode retry, dropping packet");
None
}
} else {
qerror!([self], "no connection ID for retry, dropping packet");
None
}
}
}
}
fn connection_attempt(
&mut self,
initial: InitialDetails,
dgram: Datagram,
orig_dcid: Option<ConnectionId>,
now: Instant,
) -> Option<Datagram> {
let attempt_key = AttemptKey {
remote_address: dgram.source(),
odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(),
};
if let Some(c) = self.active_attempts.get(&attempt_key) {
qdebug!(
[self],
"Handle Initial for existing connection attempt {:?}",
attempt_key
);
let c = Rc::clone(c);
self.process_connection(c, Some(dgram), now)
} else {
self.accept_connection(attempt_key, initial, dgram, orig_dcid, now)
}
}
fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog {
if let Some(qlog_dir) = &self.qlog_dir {
let mut qlog_path = qlog_dir.to_path_buf();
qlog_path.push(format!("{}.qlog", attempt_key.odcid));
// The original DCID is chosen by the client. Using create_new()
// prevents attackers from overwriting existing logs.
match OpenOptions::new()
.write(true)
.create_new(true)
.open(&qlog_path)
| {
self.remove_timer(&c);
} | conditional_block |
server.rs | /// as this depends on there being some distribution of events.
const TIMER_GRANULARITY: Duration = Duration::from_millis(4);
/// The number of buckets in the timer. As mentioned in the definition of `Timer`,
/// the granularity and capacity need to multiply to be larger than the largest
/// delay that might be used. That's the idle timeout (currently 30s).
const TIMER_CAPACITY: usize = 16384;
type StateRef = Rc<RefCell<ServerConnectionState>>;
type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>;
#[derive(Debug)]
pub struct ServerConnectionState {
c: Connection,
active_attempt: Option<AttemptKey>,
last_timer: Instant,
}
impl Deref for ServerConnectionState {
type Target = Connection;
fn deref(&self) -> &Self::Target {
&self.c
}
}
impl DerefMut for ServerConnectionState {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.c
}
}
/// A `AttemptKey` is used to disambiguate connection attempts.
/// Multiple connection attempts with the same key won't produce multiple connections.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
struct AttemptKey {
// Using the remote address is sufficient for disambiguation,
// until we support multiple local socket addresses.
remote_address: SocketAddr,
odcid: ConnectionId,
}
/// A `ServerZeroRttChecker` is a simple wrapper around a single checker.
/// It uses `RefCell` so that the wrapped checker can be shared between
/// multiple connections created by the server.
#[derive(Clone, Debug)]
struct ServerZeroRttChecker {
checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>,
}
impl ServerZeroRttChecker {
pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self {
Self {
checker: Rc::new(RefCell::new(checker)),
}
}
}
impl ZeroRttChecker for ServerZeroRttChecker {
fn check(&self, token: &[u8]) -> ZeroRttCheckResult {
self.checker.borrow().check(token)
}
}
/// `InitialDetails` holds important information for processing `Initial` packets.
struct InitialDetails {
src_cid: ConnectionId,
dst_cid: ConnectionId,
token: Vec<u8>,
version: Version,
}
impl InitialDetails {
fn new(packet: &PublicPacket) -> Self {
Self {
src_cid: ConnectionId::from(packet.scid()),
dst_cid: ConnectionId::from(packet.dcid()),
token: packet.token().to_vec(),
version: packet.version().unwrap(),
}
}
}
struct EchConfig {
config: u8,
public_name: String,
sk: PrivateKey,
pk: PublicKey,
encoded: Vec<u8>,
}
impl EchConfig {
fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> {
let encoded = encode_ech_config(config, public_name, pk)?;
Ok(Self {
config,
public_name: String::from(public_name),
sk: sk.clone(),
pk: pk.clone(),
encoded,
})
}
}
pub struct Server {
/// The names of certificates.
certs: Vec<String>,
/// The ALPN values that the server supports.
protocols: Vec<String>,
/// The cipher suites that the server supports.
ciphers: Vec<Cipher>,
/// Anti-replay configuration for 0-RTT.
anti_replay: AntiReplay,
/// A function for determining if 0-RTT can be accepted.
zero_rtt_checker: ServerZeroRttChecker,
/// A connection ID generator.
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
/// Connection parameters.
conn_params: ConnectionParameters,
/// Active connection attempts, keyed by `AttemptKey`. Initial packets with
/// the same key are routed to the connection that was first accepted.
/// This is cleared out when the connection is closed or established.
active_attempts: HashMap<AttemptKey, StateRef>,
/// All connections, keyed by ConnectionId.
connections: ConnectionTableRef,
/// The connections that have new events.
active: HashSet<ActiveConnectionRef>,
/// The set of connections that need immediate processing.
waiting: VecDeque<StateRef>,
/// Outstanding timers for connections.
timers: Timer<StateRef>,
/// Address validation logic, which determines whether we send a Retry.
address_validation: Rc<RefCell<AddressValidation>>,
/// Directory to create qlog traces in
qlog_dir: Option<PathBuf>,
/// Encrypted client hello (ECH) configuration.
ech_config: Option<EchConfig>,
}
impl Server {
/// Construct a new server.
/// * `now` is the time that the server is instantiated.
/// * `certs` is a list of the certificates that should be configured.
/// * `protocols` is the preference list of ALPN values.
/// * `anti_replay` is an anti-replay context.
/// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This
/// will be passed the value of the `extra` argument that was passed to
/// `Connection::send_ticket` to see if it is OK.
/// * `cid_generator` is responsible for generating connection IDs and parsing them;
/// connection IDs produced by the manager cannot be zero-length.
pub fn new(
now: Instant,
certs: &[impl AsRef<str>],
protocols: &[impl AsRef<str>],
anti_replay: AntiReplay,
zero_rtt_checker: Box<dyn ZeroRttChecker>,
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
conn_params: ConnectionParameters,
) -> Res<Self> {
let validation = AddressValidation::new(now, ValidateAddress::Never)?;
Ok(Self {
certs: certs.iter().map(|x| String::from(x.as_ref())).collect(),
protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(),
ciphers: Vec::new(),
anti_replay,
zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker),
cid_generator,
conn_params,
active_attempts: HashMap::default(),
connections: Rc::default(),
active: HashSet::default(),
waiting: VecDeque::default(),
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
address_validation: Rc::new(RefCell::new(validation)),
qlog_dir: None,
ech_config: None,
})
}
/// Set or clear directory to create logs of connection events in QLOG format.
pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) {
self.qlog_dir = dir;
}
/// Set the policy for address validation.
pub fn set_validation(&mut self, v: ValidateAddress) {
self.address_validation.borrow_mut().set_validation(v);
}
/// Set the cipher suites that should be used. Set an empty value to use
/// default values.
pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) {
self.ciphers = Vec::from(ciphers.as_ref());
}
pub fn enable_ech(
&mut self,
config: u8,
public_name: &str,
sk: &PrivateKey,
pk: &PublicKey,
) -> Res<()> {
self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?);
Ok(())
}
pub fn ech_config(&self) -> &[u8] {
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
}
fn remove_timer(&mut self, c: &StateRef) {
let last = c.borrow().last_timer;
self.timers.remove(last, |t| Rc::ptr_eq(t, c));
}
fn process_connection(
&mut self,
c: StateRef,
dgram: Option<Datagram>,
now: Instant,
) -> Option<Datagram> {
qtrace!([self], "Process connection {:?}", c);
let out = c.borrow_mut().process(dgram, now);
match out {
Output::Datagram(_) => {
qtrace!([self], "Sending packet, added to waiting connections");
self.waiting.push_back(Rc::clone(&c));
}
Output::Callback(delay) => {
let next = now + delay;
if next != c.borrow().last_timer {
qtrace!([self], "Change timer to {:?}", next);
self.remove_timer(&c);
c.borrow_mut().last_timer = next;
self.timers.add(next, Rc::clone(&c));
}
}
_ => {
self.remove_timer(&c);
}
}
if c.borrow().has_events() {
qtrace!([self], "Connection active: {:?}", c);
self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) });
}
if *c.borrow().state() > State::Handshaking {
// Remove any active connection attempt now that this is no longer handshaking.
if let | const MIN_INITIAL_PACKET_SIZE: usize = 1200;
/// The size of timer buckets. This is higher than the actual timer granularity | random_line_split |
|
server.rs |
}
impl DerefMut for ServerConnectionState {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.c
}
}
/// A `AttemptKey` is used to disambiguate connection attempts.
/// Multiple connection attempts with the same key won't produce multiple connections.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
struct AttemptKey {
// Using the remote address is sufficient for disambiguation,
// until we support multiple local socket addresses.
remote_address: SocketAddr,
odcid: ConnectionId,
}
/// A `ServerZeroRttChecker` is a simple wrapper around a single checker.
/// It uses `RefCell` so that the wrapped checker can be shared between
/// multiple connections created by the server.
#[derive(Clone, Debug)]
struct ServerZeroRttChecker {
checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>,
}
impl ServerZeroRttChecker {
pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self {
Self {
checker: Rc::new(RefCell::new(checker)),
}
}
}
impl ZeroRttChecker for ServerZeroRttChecker {
fn check(&self, token: &[u8]) -> ZeroRttCheckResult {
self.checker.borrow().check(token)
}
}
/// `InitialDetails` holds important information for processing `Initial` packets.
struct InitialDetails {
src_cid: ConnectionId,
dst_cid: ConnectionId,
token: Vec<u8>,
version: Version,
}
impl InitialDetails {
fn new(packet: &PublicPacket) -> Self {
Self {
src_cid: ConnectionId::from(packet.scid()),
dst_cid: ConnectionId::from(packet.dcid()),
token: packet.token().to_vec(),
version: packet.version().unwrap(),
}
}
}
struct EchConfig {
config: u8,
public_name: String,
sk: PrivateKey,
pk: PublicKey,
encoded: Vec<u8>,
}
impl EchConfig {
fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> {
let encoded = encode_ech_config(config, public_name, pk)?;
Ok(Self {
config,
public_name: String::from(public_name),
sk: sk.clone(),
pk: pk.clone(),
encoded,
})
}
}
pub struct Server {
/// The names of certificates.
certs: Vec<String>,
/// The ALPN values that the server supports.
protocols: Vec<String>,
/// The cipher suites that the server supports.
ciphers: Vec<Cipher>,
/// Anti-replay configuration for 0-RTT.
anti_replay: AntiReplay,
/// A function for determining if 0-RTT can be accepted.
zero_rtt_checker: ServerZeroRttChecker,
/// A connection ID generator.
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
/// Connection parameters.
conn_params: ConnectionParameters,
/// Active connection attempts, keyed by `AttemptKey`. Initial packets with
/// the same key are routed to the connection that was first accepted.
/// This is cleared out when the connection is closed or established.
active_attempts: HashMap<AttemptKey, StateRef>,
/// All connections, keyed by ConnectionId.
connections: ConnectionTableRef,
/// The connections that have new events.
active: HashSet<ActiveConnectionRef>,
/// The set of connections that need immediate processing.
waiting: VecDeque<StateRef>,
/// Outstanding timers for connections.
timers: Timer<StateRef>,
/// Address validation logic, which determines whether we send a Retry.
address_validation: Rc<RefCell<AddressValidation>>,
/// Directory to create qlog traces in
qlog_dir: Option<PathBuf>,
/// Encrypted client hello (ECH) configuration.
ech_config: Option<EchConfig>,
}
impl Server {
/// Construct a new server.
/// * `now` is the time that the server is instantiated.
/// * `certs` is a list of the certificates that should be configured.
/// * `protocols` is the preference list of ALPN values.
/// * `anti_replay` is an anti-replay context.
/// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This
/// will be passed the value of the `extra` argument that was passed to
/// `Connection::send_ticket` to see if it is OK.
/// * `cid_generator` is responsible for generating connection IDs and parsing them;
/// connection IDs produced by the manager cannot be zero-length.
pub fn new(
now: Instant,
certs: &[impl AsRef<str>],
protocols: &[impl AsRef<str>],
anti_replay: AntiReplay,
zero_rtt_checker: Box<dyn ZeroRttChecker>,
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
conn_params: ConnectionParameters,
) -> Res<Self> {
let validation = AddressValidation::new(now, ValidateAddress::Never)?;
Ok(Self {
certs: certs.iter().map(|x| String::from(x.as_ref())).collect(),
protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(),
ciphers: Vec::new(),
anti_replay,
zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker),
cid_generator,
conn_params,
active_attempts: HashMap::default(),
connections: Rc::default(),
active: HashSet::default(),
waiting: VecDeque::default(),
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
address_validation: Rc::new(RefCell::new(validation)),
qlog_dir: None,
ech_config: None,
})
}
/// Set or clear directory to create logs of connection events in QLOG format.
pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) {
self.qlog_dir = dir;
}
/// Set the policy for address validation.
pub fn set_validation(&mut self, v: ValidateAddress) {
self.address_validation.borrow_mut().set_validation(v);
}
/// Set the cipher suites that should be used. Set an empty value to use
/// default values.
pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) {
self.ciphers = Vec::from(ciphers.as_ref());
}
pub fn enable_ech(
&mut self,
config: u8,
public_name: &str,
sk: &PrivateKey,
pk: &PublicKey,
) -> Res<()> {
self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?);
Ok(())
}
pub fn ech_config(&self) -> &[u8] {
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
}
fn remove_timer(&mut self, c: &StateRef) {
let last = c.borrow().last_timer;
self.timers.remove(last, |t| Rc::ptr_eq(t, c));
}
fn process_connection(
&mut self,
c: StateRef,
dgram: Option<Datagram>,
now: Instant,
) -> Option<Datagram> {
qtrace!([self], "Process connection {:?}", c);
let out = c.borrow_mut().process(dgram, now);
match out {
Output::Datagram(_) => {
qtrace!([self], "Sending packet, added to waiting connections");
self.waiting.push_back(Rc::clone(&c));
}
Output::Callback(delay) => {
let next = now + delay;
if next != c.borrow().last_timer {
qtrace!([self], "Change timer to {:?}", next);
self.remove_timer(&c);
c.borrow_mut().last_timer = next;
self.timers.add(next, Rc::clone(&c));
}
}
_ => {
self.remove_timer(&c);
}
}
if c.borrow().has_events() {
qtrace!([self], "Connection active: {:?}", c);
self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) });
}
if *c.borrow().state() > State::Handshaking {
// Remove any active connection attempt now that this is no longer handshaking.
if let Some(k) = c.borrow_mut().active_attempt.take() {
self.active_attempts.remove(&k);
}
}
if matches!(c.borrow().state(), State::Closed(_)) {
c.borrow_mut().set_qlog(NeqoQlog::disabled());
self.connections
.borrow_mut()
.retain(|_, v| !Rc::ptr_eq(v, &c));
}
out.dgram()
}
fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> {
self.connections.borrow().get(&cid[..]).map(Rc::clone)
}
fn handle_initial(
&mut self,
initial: InitialDetails,
dgram: Datagram,
now: Instant,
) -> Option<Datagram> {
qdebug!([self], "Handle initial");
let res = self
.address_validation
.borrow()
.validate(&initial.token, dgram.source(), now);
match res {
AddressValidationResult:: | {
&self.c
} | identifier_body |
|
server.rs | {
// Using the remote address is sufficient for disambiguation,
// until we support multiple local socket addresses.
remote_address: SocketAddr,
odcid: ConnectionId,
}
/// A `ServerZeroRttChecker` is a simple wrapper around a single checker.
/// It uses `RefCell` so that the wrapped checker can be shared between
/// multiple connections created by the server.
#[derive(Clone, Debug)]
struct ServerZeroRttChecker {
checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>,
}
impl ServerZeroRttChecker {
pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self {
Self {
checker: Rc::new(RefCell::new(checker)),
}
}
}
impl ZeroRttChecker for ServerZeroRttChecker {
fn check(&self, token: &[u8]) -> ZeroRttCheckResult {
self.checker.borrow().check(token)
}
}
/// `InitialDetails` holds important information for processing `Initial` packets.
struct InitialDetails {
src_cid: ConnectionId,
dst_cid: ConnectionId,
token: Vec<u8>,
version: Version,
}
impl InitialDetails {
fn new(packet: &PublicPacket) -> Self {
Self {
src_cid: ConnectionId::from(packet.scid()),
dst_cid: ConnectionId::from(packet.dcid()),
token: packet.token().to_vec(),
version: packet.version().unwrap(),
}
}
}
struct EchConfig {
config: u8,
public_name: String,
sk: PrivateKey,
pk: PublicKey,
encoded: Vec<u8>,
}
impl EchConfig {
fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> {
let encoded = encode_ech_config(config, public_name, pk)?;
Ok(Self {
config,
public_name: String::from(public_name),
sk: sk.clone(),
pk: pk.clone(),
encoded,
})
}
}
pub struct Server {
/// The names of certificates.
certs: Vec<String>,
/// The ALPN values that the server supports.
protocols: Vec<String>,
/// The cipher suites that the server supports.
ciphers: Vec<Cipher>,
/// Anti-replay configuration for 0-RTT.
anti_replay: AntiReplay,
/// A function for determining if 0-RTT can be accepted.
zero_rtt_checker: ServerZeroRttChecker,
/// A connection ID generator.
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
/// Connection parameters.
conn_params: ConnectionParameters,
/// Active connection attempts, keyed by `AttemptKey`. Initial packets with
/// the same key are routed to the connection that was first accepted.
/// This is cleared out when the connection is closed or established.
active_attempts: HashMap<AttemptKey, StateRef>,
/// All connections, keyed by ConnectionId.
connections: ConnectionTableRef,
/// The connections that have new events.
active: HashSet<ActiveConnectionRef>,
/// The set of connections that need immediate processing.
waiting: VecDeque<StateRef>,
/// Outstanding timers for connections.
timers: Timer<StateRef>,
/// Address validation logic, which determines whether we send a Retry.
address_validation: Rc<RefCell<AddressValidation>>,
/// Directory to create qlog traces in
qlog_dir: Option<PathBuf>,
/// Encrypted client hello (ECH) configuration.
ech_config: Option<EchConfig>,
}
impl Server {
/// Construct a new server.
/// * `now` is the time that the server is instantiated.
/// * `certs` is a list of the certificates that should be configured.
/// * `protocols` is the preference list of ALPN values.
/// * `anti_replay` is an anti-replay context.
/// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This
/// will be passed the value of the `extra` argument that was passed to
/// `Connection::send_ticket` to see if it is OK.
/// * `cid_generator` is responsible for generating connection IDs and parsing them;
/// connection IDs produced by the manager cannot be zero-length.
pub fn new(
now: Instant,
certs: &[impl AsRef<str>],
protocols: &[impl AsRef<str>],
anti_replay: AntiReplay,
zero_rtt_checker: Box<dyn ZeroRttChecker>,
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
conn_params: ConnectionParameters,
) -> Res<Self> {
let validation = AddressValidation::new(now, ValidateAddress::Never)?;
Ok(Self {
certs: certs.iter().map(|x| String::from(x.as_ref())).collect(),
protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(),
ciphers: Vec::new(),
anti_replay,
zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker),
cid_generator,
conn_params,
active_attempts: HashMap::default(),
connections: Rc::default(),
active: HashSet::default(),
waiting: VecDeque::default(),
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
address_validation: Rc::new(RefCell::new(validation)),
qlog_dir: None,
ech_config: None,
})
}
/// Set or clear directory to create logs of connection events in QLOG format.
pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) {
self.qlog_dir = dir;
}
/// Set the policy for address validation.
pub fn set_validation(&mut self, v: ValidateAddress) {
self.address_validation.borrow_mut().set_validation(v);
}
/// Set the cipher suites that should be used. Set an empty value to use
/// default values.
pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) {
self.ciphers = Vec::from(ciphers.as_ref());
}
pub fn enable_ech(
&mut self,
config: u8,
public_name: &str,
sk: &PrivateKey,
pk: &PublicKey,
) -> Res<()> {
self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?);
Ok(())
}
pub fn ech_config(&self) -> &[u8] {
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
}
fn remove_timer(&mut self, c: &StateRef) {
let last = c.borrow().last_timer;
self.timers.remove(last, |t| Rc::ptr_eq(t, c));
}
fn process_connection(
&mut self,
c: StateRef,
dgram: Option<Datagram>,
now: Instant,
) -> Option<Datagram> {
qtrace!([self], "Process connection {:?}", c);
let out = c.borrow_mut().process(dgram, now);
match out {
Output::Datagram(_) => {
qtrace!([self], "Sending packet, added to waiting connections");
self.waiting.push_back(Rc::clone(&c));
}
Output::Callback(delay) => {
let next = now + delay;
if next != c.borrow().last_timer {
qtrace!([self], "Change timer to {:?}", next);
self.remove_timer(&c);
c.borrow_mut().last_timer = next;
self.timers.add(next, Rc::clone(&c));
}
}
_ => {
self.remove_timer(&c);
}
}
if c.borrow().has_events() {
qtrace!([self], "Connection active: {:?}", c);
self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) });
}
if *c.borrow().state() > State::Handshaking {
// Remove any active connection attempt now that this is no longer handshaking.
if let Some(k) = c.borrow_mut().active_attempt.take() {
self.active_attempts.remove(&k);
}
}
if matches!(c.borrow().state(), State::Closed(_)) {
c.borrow_mut().set_qlog(NeqoQlog::disabled());
self.connections
.borrow_mut()
.retain(|_, v| !Rc::ptr_eq(v, &c));
}
out.dgram()
}
fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> {
self.connections.borrow().get(&cid[..]).map(Rc::clone)
}
fn handle_initial(
&mut self,
initial: InitialDetails,
dgram: Datagram,
now: Instant,
) -> Option<Datagram> {
qdebug!([self], "Handle initial");
let res = self
.address_validation
.borrow()
.validate(&initial.token, dgram.source(), now);
match res {
AddressValidationResult::Invalid => None,
AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now),
AddressValidationResult::ValidRetry(orig_dcid) => {
self.connection_attempt(initial, dgram, Some(orig_dcid), now)
}
AddressValidationResult::Validate => {
qinfo!([self], "Send retry for {:?}", initial.dst_cid);
let res = self.address_validation.borrow(). | AttemptKey | identifier_name |
|
replay.py | 0 then the algorithm works in the inverse manner as described in the paper.
You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of
the company, even though many people think they could do without him."""
self.capacity = initial_capacity
self.k = size_change
self.td_error = 0
def update_td_error(self, new_td_error):
self.td_error = abs(new_td_error)
def update_memory_size(self, new_td_error):
new_td_error = abs(new_td_error)
# update = -1 if new_td_error < self.td_error, then the buffer must decrease;
# update = 1 if new_td_error > self.td_error, than the buffer must increase;
# update = 0 if new_td_error = self.td_error, buffer size remains constant.
delta = new_td_error - self.td_error
e = 1e-7
if abs(delta) < e:
# for numeric stability
return self.capacity
update = delta / abs(delta)
# allow for non-linear update (not covered in the method proposed by the paper)
if abs(self.k) < 1:
update *= int(self.capacity * self.k)
else:
update *= int(self.k)
# Update the buffer size
self.capacity = max(self.capacity + update, 1)
# Update the stored td_error
self.update_td_error(new_td_error)
return self.capacity
class NaiveReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
# List is necessary for dynamic buffer
self.memory = [] # deque(maxlen=capacity)
def pop(self, idx=0):
# Pop is redefined as taking the oldest element (FIFO) for convinience.
return self.memory.pop(idx)
def memory_full(self):
return len(self.memory) >= self.capacity
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def resize_memory(self, new_size=None):
"""Redefines the size of the buffer.
Inputs:
new_size (type: int), capacity = new_size."""
self.capacity = new_size
# self.push() takes care of decreasing the memory.
# # Oldest experiences are discarded. For Ever.
# # TODO: Check for a more efficient way of cleaning the memory.
# while len(self.memory) > self.capacity:
# _ = self.pop()
def __len__(self):
return len(self.memory)
# Add different experience replay methods
class CombinedReplayMemory(NaiveReplayMemory):
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
self.last_transition = transition
def sample(self, batch_size):
samples = random.sample(self.memory, batch_size - 1)
samples.append(self.last_transition)
return samples
class SumTree:
# started from https://github.com/wotmd5731/dqn/blob/master/memory.py
write = 0
def __init__(self, max_capacity):
self.capacity = max_capacity
self.tree = np.zeros(2 * max_capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(max_capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
self.num = 0
self.e = 0.01 # small amount to avoid zero priority
self.a = 0.6 # [0~1] convert the importance of TD error to priority
def _get_priority(self, error):
return (error + self.e) ** self.a
def _propagate_old(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _propagate(self, idx):
parent = (idx - 1) // 2
left = parent * 2 + 1
right = parent * 2 + 2
self.tree[parent] = self.tree[right] + self.tree[left]
if parent != 0:
self._propagate(parent)
def _retrieve(self, idx, rand):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree): # end search when no more child
return idx
if rand <= self.tree[left]: # downward search, always search for a higher priority node
return self._retrieve(left, rand)
else:
return self._retrieve(right, rand - self.tree[left])
def _total(self):
return self.tree[0] # the root
def add(self, error, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data # update data_frame
self.update(idx, error) # update tree_frame
self.write += 1
if self.write >= self.capacity: # replace when exceed the capacity
self.write = 0
if self.num < self.capacity:
self.num += 1
def update(self, idx, error):
p = self._get_priority(error)
# change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx)
def _get_single(self, a, b, rand):
#rand = random.uniform(a, b)
idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here)
data_idx = idx - self.capacity + 1
return idx, self.tree[idx], self.data[data_idx]
def get_batch(self, n):
batch_idx = []
batch = []
priorities = []
segment = self._total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
rand = random.uniform(a, b)
(idx, p, data) = self._get_single(a, b, rand)
if data == 0:
(idx, p, data) = self._get_single(a, b, rand)
batch.append(data)
batch_idx.append(idx)
priorities.append(p)
if batch[63] == 0:
batch = batch
return batch, batch_idx, priorities
def get_len(self):
return self.num
class RankBased:
def __init__(self, max_capacity):
self.capacity = max_capacity
self.data = []
self.priorities = None
self.total = None
self.cum_sum = None
self.tiebreaker = count()
def memory_full(self):
return len(self.data) >= self.capacity
def add(self, error, data):
# check if there is space left in memory
while self.memory_full():
oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0]
del self.data[oldest_idx]
# use tie breaker for transitions with equal error
data = (error, next(self.tiebreaker), *data)
heapq.heappush(self.data, data)
def update(self, idx, error):
self.data[idx] = (error, *self.data[idx][1:])
def get_batch(self, n):
self._update_priorities()
self.total = np.sum(self.priorities)
self.cum_sum = np.cumsum(self.priorities)
batch = []
priorities = []
# sample hole batch indicies is faster than each individual
rands = np.random.rand(n) * self.total
batch_idx = np.searchsorted(self.cum_sum, rands)
# picking transitions one by one is faster than indixing with a list
for idx in batch_idx:
batch.append(self.data[idx][2:])
priorities.append(self.priorities[idx])
return batch, batch_idx, priorities
def get_len(self):
return len(self.data)
def _update_priorities(self):
# order is inverse of actual position in heap
order = np.array(range(self.get_len() + 1, 1, -1))
self.priorities = 1. / order
class PrioritizedReplayMemory:
# stored as ( s, a, r, s_ ) in SumTree
# modified https://github.com/wotmd5731/dqn/blob/master/memory.py
def __init__(self, max_capacity, method="prop"):
if method == "prop":
self.container = SumTree(max_capacity)
elif method == "rank": | self.container = RankBased(max_capacity)
else:
raise ValueError("Bad replay method")
def memory_full(self): | random_line_split |
|
replay.py | then the algorithm works in the inverse manner as described in the paper.
You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of
the company, even though many people think they could do without him."""
self.capacity = initial_capacity
self.k = size_change
self.td_error = 0
def update_td_error(self, new_td_error):
self.td_error = abs(new_td_error)
def update_memory_size(self, new_td_error):
new_td_error = abs(new_td_error)
# update = -1 if new_td_error < self.td_error, then the buffer must decrease;
# update = 1 if new_td_error > self.td_error, than the buffer must increase;
# update = 0 if new_td_error = self.td_error, buffer size remains constant.
delta = new_td_error - self.td_error
e = 1e-7
if abs(delta) < e:
# for numeric stability
return self.capacity
update = delta / abs(delta)
# allow for non-linear update (not covered in the method proposed by the paper)
if abs(self.k) < 1:
update *= int(self.capacity * self.k)
else:
update *= int(self.k)
# Update the buffer size
self.capacity = max(self.capacity + update, 1)
# Update the stored td_error
self.update_td_error(new_td_error)
return self.capacity
class NaiveReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
# List is necessary for dynamic buffer
self.memory = [] # deque(maxlen=capacity)
def pop(self, idx=0):
# Pop is redefined as taking the oldest element (FIFO) for convinience.
return self.memory.pop(idx)
def memory_full(self):
return len(self.memory) >= self.capacity
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def resize_memory(self, new_size=None):
"""Redefines the size of the buffer.
Inputs:
new_size (type: int), capacity = new_size."""
self.capacity = new_size
# self.push() takes care of decreasing the memory.
# # Oldest experiences are discarded. For Ever.
# # TODO: Check for a more efficient way of cleaning the memory.
# while len(self.memory) > self.capacity:
# _ = self.pop()
def __len__(self):
return len(self.memory)
# Add different experience replay methods
class CombinedReplayMemory(NaiveReplayMemory):
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
self.last_transition = transition
def sample(self, batch_size):
samples = random.sample(self.memory, batch_size - 1)
samples.append(self.last_transition)
return samples
class SumTree:
# started from https://github.com/wotmd5731/dqn/blob/master/memory.py
write = 0
def __init__(self, max_capacity):
self.capacity = max_capacity
self.tree = np.zeros(2 * max_capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(max_capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
self.num = 0
self.e = 0.01 # small amount to avoid zero priority
self.a = 0.6 # [0~1] convert the importance of TD error to priority
def _get_priority(self, error):
return (error + self.e) ** self.a
def _propagate_old(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _propagate(self, idx):
parent = (idx - 1) // 2
left = parent * 2 + 1
right = parent * 2 + 2
self.tree[parent] = self.tree[right] + self.tree[left]
if parent != 0:
self._propagate(parent)
def _retrieve(self, idx, rand):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree): # end search when no more child
return idx
if rand <= self.tree[left]: # downward search, always search for a higher priority node
return self._retrieve(left, rand)
else:
return self._retrieve(right, rand - self.tree[left])
def _total(self):
return self.tree[0] # the root
def add(self, error, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data # update data_frame
self.update(idx, error) # update tree_frame
self.write += 1
if self.write >= self.capacity: # replace when exceed the capacity
self.write = 0
if self.num < self.capacity:
self.num += 1
def update(self, idx, error):
p = self._get_priority(error)
# change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx)
def _get_single(self, a, b, rand):
#rand = random.uniform(a, b)
idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here)
data_idx = idx - self.capacity + 1
return idx, self.tree[idx], self.data[data_idx]
def get_batch(self, n):
batch_idx = []
batch = []
priorities = []
segment = self._total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
rand = random.uniform(a, b)
(idx, p, data) = self._get_single(a, b, rand)
if data == 0:
(idx, p, data) = self._get_single(a, b, rand)
batch.append(data)
batch_idx.append(idx)
priorities.append(p)
if batch[63] == 0:
batch = batch
return batch, batch_idx, priorities
def get_len(self):
return self.num
class RankBased:
def __init__(self, max_capacity):
self.capacity = max_capacity
self.data = []
self.priorities = None
self.total = None
self.cum_sum = None
self.tiebreaker = count()
def memory_full(self):
return len(self.data) >= self.capacity
def add(self, error, data):
# check if there is space left in memory
while self.memory_full():
oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0]
del self.data[oldest_idx]
# use tie breaker for transitions with equal error
data = (error, next(self.tiebreaker), *data)
heapq.heappush(self.data, data)
def | (self, idx, error):
self.data[idx] = (error, *self.data[idx][1:])
def get_batch(self, n):
self._update_priorities()
self.total = np.sum(self.priorities)
self.cum_sum = np.cumsum(self.priorities)
batch = []
priorities = []
# sample hole batch indicies is faster than each individual
rands = np.random.rand(n) * self.total
batch_idx = np.searchsorted(self.cum_sum, rands)
# picking transitions one by one is faster than indixing with a list
for idx in batch_idx:
batch.append(self.data[idx][2:])
priorities.append(self.priorities[idx])
return batch, batch_idx, priorities
def get_len(self):
return len(self.data)
def _update_priorities(self):
# order is inverse of actual position in heap
order = np.array(range(self.get_len() + 1, 1, -1))
self.priorities = 1. / order
class PrioritizedReplayMemory:
# stored as ( s, a, r, s_ ) in SumTree
# modified https://github.com/wotmd5731/dqn/blob/master/memory.py
def __init__(self, max_capacity, method="prop"):
if method == "prop":
self.container = SumTree(max_capacity)
elif method == "rank":
self.container = RankBased(max_capacity)
else:
raise ValueError("Bad replay method")
def memory_full(self):
| update | identifier_name |
replay.py | then the algorithm works in the inverse manner as described in the paper.
You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of
the company, even though many people think they could do without him."""
self.capacity = initial_capacity
self.k = size_change
self.td_error = 0
def update_td_error(self, new_td_error):
self.td_error = abs(new_td_error)
def update_memory_size(self, new_td_error):
new_td_error = abs(new_td_error)
# update = -1 if new_td_error < self.td_error, then the buffer must decrease;
# update = 1 if new_td_error > self.td_error, than the buffer must increase;
# update = 0 if new_td_error = self.td_error, buffer size remains constant.
delta = new_td_error - self.td_error
e = 1e-7
if abs(delta) < e:
# for numeric stability
return self.capacity
update = delta / abs(delta)
# allow for non-linear update (not covered in the method proposed by the paper)
if abs(self.k) < 1:
update *= int(self.capacity * self.k)
else:
update *= int(self.k)
# Update the buffer size
self.capacity = max(self.capacity + update, 1)
# Update the stored td_error
self.update_td_error(new_td_error)
return self.capacity
class NaiveReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
# List is necessary for dynamic buffer
self.memory = [] # deque(maxlen=capacity)
def pop(self, idx=0):
# Pop is redefined as taking the oldest element (FIFO) for convinience.
return self.memory.pop(idx)
def memory_full(self):
return len(self.memory) >= self.capacity
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def resize_memory(self, new_size=None):
"""Redefines the size of the buffer.
Inputs:
new_size (type: int), capacity = new_size."""
self.capacity = new_size
# self.push() takes care of decreasing the memory.
# # Oldest experiences are discarded. For Ever.
# # TODO: Check for a more efficient way of cleaning the memory.
# while len(self.memory) > self.capacity:
# _ = self.pop()
def __len__(self):
return len(self.memory)
# Add different experience replay methods
class CombinedReplayMemory(NaiveReplayMemory):
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
self.last_transition = transition
def sample(self, batch_size):
samples = random.sample(self.memory, batch_size - 1)
samples.append(self.last_transition)
return samples
class SumTree:
# started from https://github.com/wotmd5731/dqn/blob/master/memory.py
write = 0
def __init__(self, max_capacity):
self.capacity = max_capacity
self.tree = np.zeros(2 * max_capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(max_capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
self.num = 0
self.e = 0.01 # small amount to avoid zero priority
self.a = 0.6 # [0~1] convert the importance of TD error to priority
def _get_priority(self, error):
return (error + self.e) ** self.a
def _propagate_old(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _propagate(self, idx):
parent = (idx - 1) // 2
left = parent * 2 + 1
right = parent * 2 + 2
self.tree[parent] = self.tree[right] + self.tree[left]
if parent != 0:
self._propagate(parent)
def _retrieve(self, idx, rand):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree): # end search when no more child
return idx
if rand <= self.tree[left]: # downward search, always search for a higher priority node
return self._retrieve(left, rand)
else:
return self._retrieve(right, rand - self.tree[left])
def _total(self):
return self.tree[0] # the root
def add(self, error, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data # update data_frame
self.update(idx, error) # update tree_frame
self.write += 1
if self.write >= self.capacity: # replace when exceed the capacity
self.write = 0
if self.num < self.capacity:
self.num += 1
def update(self, idx, error):
p = self._get_priority(error)
# change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx)
def _get_single(self, a, b, rand):
#rand = random.uniform(a, b)
idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here)
data_idx = idx - self.capacity + 1
return idx, self.tree[idx], self.data[data_idx]
def get_batch(self, n):
batch_idx = []
batch = []
priorities = []
segment = self._total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
rand = random.uniform(a, b)
(idx, p, data) = self._get_single(a, b, rand)
if data == 0:
(idx, p, data) = self._get_single(a, b, rand)
batch.append(data)
batch_idx.append(idx)
priorities.append(p)
if batch[63] == 0:
batch = batch
return batch, batch_idx, priorities
def get_len(self):
return self.num
class RankBased:
def __init__(self, max_capacity):
self.capacity = max_capacity
self.data = []
self.priorities = None
self.total = None
self.cum_sum = None
self.tiebreaker = count()
def memory_full(self):
return len(self.data) >= self.capacity
def add(self, error, data):
# check if there is space left in memory
while self.memory_full():
oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0]
del self.data[oldest_idx]
# use tie breaker for transitions with equal error
data = (error, next(self.tiebreaker), *data)
heapq.heappush(self.data, data)
def update(self, idx, error):
self.data[idx] = (error, *self.data[idx][1:])
def get_batch(self, n):
self._update_priorities()
self.total = np.sum(self.priorities)
self.cum_sum = np.cumsum(self.priorities)
batch = []
priorities = []
# sample hole batch indicies is faster than each individual
rands = np.random.rand(n) * self.total
batch_idx = np.searchsorted(self.cum_sum, rands)
# picking transitions one by one is faster than indixing with a list
for idx in batch_idx:
|
return batch, batch_idx, priorities
def get_len(self):
return len(self.data)
def _update_priorities(self):
# order is inverse of actual position in heap
order = np.array(range(self.get_len() + 1, 1, -1))
self.priorities = 1. / order
class PrioritizedReplayMemory:
# stored as ( s, a, r, s_ ) in SumTree
# modified https://github.com/wotmd5731/dqn/blob/master/memory.py
def __init__(self, max_capacity, method="prop"):
if method == "prop":
self.container = SumTree(max_capacity)
elif method == "rank":
self.container = RankBased(max_capacity)
else:
raise ValueError("Bad replay method")
def memory_full(self):
| batch.append(self.data[idx][2:])
priorities.append(self.priorities[idx]) | conditional_block |
replay.py | .capacity = initial_capacity
self.k = size_change
self.td_error = 0
def update_td_error(self, new_td_error):
self.td_error = abs(new_td_error)
def update_memory_size(self, new_td_error):
new_td_error = abs(new_td_error)
# update = -1 if new_td_error < self.td_error, then the buffer must decrease;
# update = 1 if new_td_error > self.td_error, than the buffer must increase;
# update = 0 if new_td_error = self.td_error, buffer size remains constant.
delta = new_td_error - self.td_error
e = 1e-7
if abs(delta) < e:
# for numeric stability
return self.capacity
update = delta / abs(delta)
# allow for non-linear update (not covered in the method proposed by the paper)
if abs(self.k) < 1:
update *= int(self.capacity * self.k)
else:
update *= int(self.k)
# Update the buffer size
self.capacity = max(self.capacity + update, 1)
# Update the stored td_error
self.update_td_error(new_td_error)
return self.capacity
class NaiveReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
# List is necessary for dynamic buffer
self.memory = [] # deque(maxlen=capacity)
def pop(self, idx=0):
# Pop is redefined as taking the oldest element (FIFO) for convinience.
return self.memory.pop(idx)
def memory_full(self):
return len(self.memory) >= self.capacity
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def resize_memory(self, new_size=None):
"""Redefines the size of the buffer.
Inputs:
new_size (type: int), capacity = new_size."""
self.capacity = new_size
# self.push() takes care of decreasing the memory.
# # Oldest experiences are discarded. For Ever.
# # TODO: Check for a more efficient way of cleaning the memory.
# while len(self.memory) > self.capacity:
# _ = self.pop()
def __len__(self):
return len(self.memory)
# Add different experience replay methods
class CombinedReplayMemory(NaiveReplayMemory):
def push(self, transition):
while len(self.memory) >= self.capacity:
_ = self.pop()
self.memory.append(transition)
self.last_transition = transition
def sample(self, batch_size):
samples = random.sample(self.memory, batch_size - 1)
samples.append(self.last_transition)
return samples
class SumTree:
# started from https://github.com/wotmd5731/dqn/blob/master/memory.py
write = 0
def __init__(self, max_capacity):
self.capacity = max_capacity
self.tree = np.zeros(2 * max_capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(max_capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
self.num = 0
self.e = 0.01 # small amount to avoid zero priority
self.a = 0.6 # [0~1] convert the importance of TD error to priority
def _get_priority(self, error):
return (error + self.e) ** self.a
def _propagate_old(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _propagate(self, idx):
parent = (idx - 1) // 2
left = parent * 2 + 1
right = parent * 2 + 2
self.tree[parent] = self.tree[right] + self.tree[left]
if parent != 0:
self._propagate(parent)
def _retrieve(self, idx, rand):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree): # end search when no more child
return idx
if rand <= self.tree[left]: # downward search, always search for a higher priority node
return self._retrieve(left, rand)
else:
return self._retrieve(right, rand - self.tree[left])
def _total(self):
return self.tree[0] # the root
def add(self, error, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data # update data_frame
self.update(idx, error) # update tree_frame
self.write += 1
if self.write >= self.capacity: # replace when exceed the capacity
self.write = 0
if self.num < self.capacity:
self.num += 1
def update(self, idx, error):
p = self._get_priority(error)
# change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx)
def _get_single(self, a, b, rand):
#rand = random.uniform(a, b)
idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here)
data_idx = idx - self.capacity + 1
return idx, self.tree[idx], self.data[data_idx]
def get_batch(self, n):
batch_idx = []
batch = []
priorities = []
segment = self._total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
rand = random.uniform(a, b)
(idx, p, data) = self._get_single(a, b, rand)
if data == 0:
(idx, p, data) = self._get_single(a, b, rand)
batch.append(data)
batch_idx.append(idx)
priorities.append(p)
if batch[63] == 0:
batch = batch
return batch, batch_idx, priorities
def get_len(self):
return self.num
class RankBased:
def __init__(self, max_capacity):
self.capacity = max_capacity
self.data = []
self.priorities = None
self.total = None
self.cum_sum = None
self.tiebreaker = count()
def memory_full(self):
return len(self.data) >= self.capacity
def add(self, error, data):
# check if there is space left in memory
while self.memory_full():
oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0]
del self.data[oldest_idx]
# use tie breaker for transitions with equal error
data = (error, next(self.tiebreaker), *data)
heapq.heappush(self.data, data)
def update(self, idx, error):
self.data[idx] = (error, *self.data[idx][1:])
def get_batch(self, n):
self._update_priorities()
self.total = np.sum(self.priorities)
self.cum_sum = np.cumsum(self.priorities)
batch = []
priorities = []
# sample hole batch indicies is faster than each individual
rands = np.random.rand(n) * self.total
batch_idx = np.searchsorted(self.cum_sum, rands)
# picking transitions one by one is faster than indixing with a list
for idx in batch_idx:
batch.append(self.data[idx][2:])
priorities.append(self.priorities[idx])
return batch, batch_idx, priorities
def get_len(self):
return len(self.data)
def _update_priorities(self):
# order is inverse of actual position in heap
order = np.array(range(self.get_len() + 1, 1, -1))
self.priorities = 1. / order
class PrioritizedReplayMemory:
# stored as ( s, a, r, s_ ) in SumTree
# modified https://github.com/wotmd5731/dqn/blob/master/memory.py
| def __init__(self, max_capacity, method="prop"):
if method == "prop":
self.container = SumTree(max_capacity)
elif method == "rank":
self.container = RankBased(max_capacity)
else:
raise ValueError("Bad replay method")
def memory_full(self):
return self.container.memory_full()
def push(self, error, sample):
self.container.add(error, sample)
def sample(self, n):
return self.container.get_batch(n)
def update(self, idx, error):
self.container.update(idx, error)
| identifier_body |
|
jupyterExporter.ts | upyterlab/coreutils';
import { inject, injectable } from 'inversify';
import * as os from 'os';
import * as path from 'path';
import * as uuid from 'uuid/v4';
import { Uri } from 'vscode';
import { concatMultilineStringInput } from '../../../datascience-ui/common';
import { createCodeCell } from '../../../datascience-ui/common/cellFactory';
import { IApplicationShell, IWorkspaceService } from '../../common/application/types';
import { traceError } from '../../common/logger';
import { IFileSystem, IPlatformService } from '../../common/platform/types';
import { IConfigurationService } from '../../common/types';
import * as localize from '../../common/utils/localize';
import { noop } from '../../common/utils/misc';
import { CellMatcher } from '../cellMatcher';
import { CodeSnippits, Identifiers } from '../constants';
import {
CellState,
ICell,
IDataScienceErrorHandler,
IJupyterExecution,
INotebookEditorProvider,
INotebookExporter
} from '../types';
@injectable()
export class JupyterExporter implements INotebookExporter {
| (
@inject(IJupyterExecution) private jupyterExecution: IJupyterExecution,
@inject(IWorkspaceService) private workspaceService: IWorkspaceService,
@inject(IConfigurationService) private configService: IConfigurationService,
@inject(IFileSystem) private fileSystem: IFileSystem,
@inject(IPlatformService) private readonly platform: IPlatformService,
@inject(IApplicationShell) private readonly applicationShell: IApplicationShell,
@inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider,
@inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler
) {}
public dispose() {
noop();
}
public async exportToFile(cells: ICell[], file: string): Promise<void> {
let directoryChange;
const settings = this.configService.getSettings();
if (settings.datascience.changeDirOnImportExport) {
directoryChange = file;
}
const notebook = await this.translateToNotebook(cells, directoryChange);
try {
// tslint:disable-next-line: no-any
const contents = JSON.stringify(notebook);
await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' });
const openQuestion1 = localize.DataScience.exportOpenQuestion1();
const openQuestion2 = (await this.jupyterExecution.isSpawnSupported())
? localize.DataScience.exportOpenQuestion()
: undefined;
this.showInformationMessage(
localize.DataScience.exportDialogComplete().format(file),
openQuestion1,
openQuestion2
).then(async (str: string | undefined) => {
try {
if (str === openQuestion2 && openQuestion2) {
// If the user wants to, open the notebook they just generated.
await this.jupyterExecution.spawnNotebook(file);
} else if (str === openQuestion1) {
await this.ipynbProvider.open(Uri.file(file));
}
} catch (e) {
await this.errorHandler.handleError(e);
}
});
} catch (exc) {
traceError('Error in exporting notebook file');
this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc));
}
}
public async translateToNotebook(
cells: ICell[],
changeDirectory?: string
): Promise<nbformat.INotebookContent | undefined> {
// If requested, add in a change directory cell to fix relative paths
if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) {
cells = await this.addDirectoryChangeCell(cells, changeDirectory);
}
const pythonNumber = await this.extractPythonMainVersion();
// Use this to build our metadata object
const metadata: nbformat.INotebookMetadata = {
language_info: {
codemirror_mode: {
name: 'ipython',
version: pythonNumber
},
file_extension: '.py',
mimetype: 'text/x-python',
name: 'python',
nbconvert_exporter: 'python',
pygments_lexer: `ipython${pythonNumber}`,
version: pythonNumber
},
orig_nbformat: 2
};
// Create an object for matching cell definitions
const matcher = new CellMatcher(this.configService.getSettings().datascience);
// Combine this into a JSON object
return {
cells: this.pruneCells(cells, matcher),
nbformat: 4,
nbformat_minor: 2,
metadata: metadata
};
}
private showInformationMessage(
message: string,
question1: string,
question2?: string
): Thenable<string | undefined> {
if (question2) {
return this.applicationShell.showInformationMessage(message, question1, question2);
} else {
return this.applicationShell.showInformationMessage(message, question1);
}
}
// For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly
private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => {
const changeDirectory = await this.calculateDirectoryChange(file, cells);
if (changeDirectory) {
const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format(
localize.DataScience.exportChangeDirectoryComment(),
CodeSnippits.ChangeDirectoryCommentIdentifier,
changeDirectory
);
const cell: ICell = {
data: createCodeCell(exportChangeDirectory),
id: uuid(),
file: Identifiers.EmptyFileName,
line: 0,
state: CellState.finished
};
return [cell, ...cells];
} else {
return cells;
}
};
// When we export we want to our change directory back to the first real file that we saw run from any workspace folder
private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => {
for (const cell of cells) {
const filename = cell.file;
// First check that this is an absolute file that exists (we add in temp files to run system cell)
if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) {
// We've already check that workspace folders above
for (const folder of this.workspaceService.workspaceFolders!) {
if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) {
return folder.uri.fsPath;
}
}
}
}
return undefined;
};
private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => {
// Make sure we don't already have a cell with a ChangeDirectory comment in it.
let directoryChange: string | undefined;
const haveChangeAlready = cells.find((c) =>
concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier)
);
if (!haveChangeAlready) {
const notebookFilePath = path.dirname(notebookFile);
// First see if we have a workspace open, this only works if we have a workspace root to be relative to
if (this.workspaceService.hasWorkspaceFolders) {
const workspacePath = await this.firstWorkspaceFolder(cells);
// Make sure that we have everything that we need here
if (
workspacePath &&
path.isAbsolute(workspacePath) &&
notebookFilePath &&
path.isAbsolute(notebookFilePath)
) {
directoryChange = path.relative(notebookFilePath, workspacePath);
}
}
}
// If path.relative can't calculate a relative path, then it just returns the full second path
// so check here, we only want this if we were able to calculate a relative path, no network shares or drives
if (directoryChange && !path.isAbsolute(directoryChange)) {
// Escape windows path chars so they end up in the source escaped
if (this.platform.isWindows) {
directoryChange = directoryChange.replace('\\', '\\\\');
}
return directoryChange;
} else {
return undefined;
}
};
private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => {
// First filter out sys info cells. Jupyter doesn't understand these
const filtered = cells.filter((c) => c.data.cell_type !== 'messages');
// Then prune each cell down to just the cell data.
return filtered.map((c) => this.pruneCell(c, cellMatcher));
};
private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => {
// Remove the #%% of the top of the source if there is any. We don't need
// this to end up in the exported ipynb file.
const copy = { ...cell.data };
copy.source = this.pruneSource(cell.data.source, cellMatcher);
return copy;
};
private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => {
// Remove the comments on the top if there.
if (Array.isArray(source) && source.length > 0) {
if (cellMatcher.isCell(source[0])) {
return source.slice(1);
}
} else {
const array = source
.toString()
.split('\n')
.map((s) => `${s}\n`);
if (array.length > 0 && cellMatcher.isCell(array[0])) {
| constructor | identifier_name |
jupyterExporter.ts | upyterlab/coreutils';
import { inject, injectable } from 'inversify';
import * as os from 'os';
import * as path from 'path';
import * as uuid from 'uuid/v4';
import { Uri } from 'vscode';
import { concatMultilineStringInput } from '../../../datascience-ui/common';
import { createCodeCell } from '../../../datascience-ui/common/cellFactory';
import { IApplicationShell, IWorkspaceService } from '../../common/application/types';
import { traceError } from '../../common/logger';
import { IFileSystem, IPlatformService } from '../../common/platform/types';
import { IConfigurationService } from '../../common/types';
import * as localize from '../../common/utils/localize';
import { noop } from '../../common/utils/misc';
import { CellMatcher } from '../cellMatcher';
import { CodeSnippits, Identifiers } from '../constants';
import {
CellState,
ICell,
IDataScienceErrorHandler,
IJupyterExecution,
INotebookEditorProvider,
INotebookExporter
} from '../types';
@injectable()
export class JupyterExporter implements INotebookExporter {
constructor(
@inject(IJupyterExecution) private jupyterExecution: IJupyterExecution,
@inject(IWorkspaceService) private workspaceService: IWorkspaceService,
@inject(IConfigurationService) private configService: IConfigurationService,
@inject(IFileSystem) private fileSystem: IFileSystem,
@inject(IPlatformService) private readonly platform: IPlatformService,
@inject(IApplicationShell) private readonly applicationShell: IApplicationShell,
@inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider,
@inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler
) {}
public dispose() {
noop();
}
public async exportToFile(cells: ICell[], file: string): Promise<void> {
let directoryChange;
const settings = this.configService.getSettings();
if (settings.datascience.changeDirOnImportExport) {
directoryChange = file;
}
const notebook = await this.translateToNotebook(cells, directoryChange);
try {
// tslint:disable-next-line: no-any
const contents = JSON.stringify(notebook);
await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' });
const openQuestion1 = localize.DataScience.exportOpenQuestion1();
const openQuestion2 = (await this.jupyterExecution.isSpawnSupported())
? localize.DataScience.exportOpenQuestion()
: undefined;
this.showInformationMessage(
localize.DataScience.exportDialogComplete().format(file),
openQuestion1,
openQuestion2
).then(async (str: string | undefined) => {
try {
if (str === openQuestion2 && openQuestion2) {
// If the user wants to, open the notebook they just generated.
await this.jupyterExecution.spawnNotebook(file);
} else if (str === openQuestion1) {
await this.ipynbProvider.open(Uri.file(file));
}
} catch (e) {
await this.errorHandler.handleError(e);
}
});
} catch (exc) {
traceError('Error in exporting notebook file');
this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc));
}
}
public async translateToNotebook(
cells: ICell[],
changeDirectory?: string
): Promise<nbformat.INotebookContent | undefined> {
// If requested, add in a change directory cell to fix relative paths
if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) {
cells = await this.addDirectoryChangeCell(cells, changeDirectory);
}
const pythonNumber = await this.extractPythonMainVersion();
// Use this to build our metadata object
const metadata: nbformat.INotebookMetadata = {
language_info: {
codemirror_mode: {
name: 'ipython',
version: pythonNumber
},
file_extension: '.py',
mimetype: 'text/x-python',
name: 'python',
nbconvert_exporter: 'python',
pygments_lexer: `ipython${pythonNumber}`,
version: pythonNumber
},
orig_nbformat: 2
};
// Create an object for matching cell definitions
const matcher = new CellMatcher(this.configService.getSettings().datascience);
// Combine this into a JSON object
return {
cells: this.pruneCells(cells, matcher),
nbformat: 4,
nbformat_minor: 2,
metadata: metadata
};
}
private showInformationMessage(
message: string,
question1: string,
question2?: string
): Thenable<string | undefined> {
if (question2) {
return this.applicationShell.showInformationMessage(message, question1, question2);
} else {
return this.applicationShell.showInformationMessage(message, question1);
}
}
// For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly
private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => {
const changeDirectory = await this.calculateDirectoryChange(file, cells);
if (changeDirectory) {
const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format(
localize.DataScience.exportChangeDirectoryComment(),
CodeSnippits.ChangeDirectoryCommentIdentifier,
changeDirectory
);
const cell: ICell = {
data: createCodeCell(exportChangeDirectory),
id: uuid(),
file: Identifiers.EmptyFileName,
line: 0,
state: CellState.finished
};
return [cell, ...cells];
} else {
return cells;
}
};
// When we export we want to our change directory back to the first real file that we saw run from any workspace folder
private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => {
for (const cell of cells) {
const filename = cell.file;
// First check that this is an absolute file that exists (we add in temp files to run system cell)
if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) {
// We've already check that workspace folders above
for (const folder of this.workspaceService.workspaceFolders!) {
if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) {
return folder.uri.fsPath;
}
}
}
}
return undefined;
};
private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => {
// Make sure we don't already have a cell with a ChangeDirectory comment in it.
let directoryChange: string | undefined;
const haveChangeAlready = cells.find((c) =>
concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier)
);
if (!haveChangeAlready) {
const notebookFilePath = path.dirname(notebookFile);
// First see if we have a workspace open, this only works if we have a workspace root to be relative to
if (this.workspaceService.hasWorkspaceFolders) {
const workspacePath = await this.firstWorkspaceFolder(cells);
// Make sure that we have everything that we need here
if (
workspacePath &&
path.isAbsolute(workspacePath) &&
notebookFilePath &&
path.isAbsolute(notebookFilePath)
) {
directoryChange = path.relative(notebookFilePath, workspacePath);
}
}
}
// If path.relative can't calculate a relative path, then it just returns the full second path
// so check here, we only want this if we were able to calculate a relative path, no network shares or drives
if (directoryChange && !path.isAbsolute(directoryChange)) {
// Escape windows path chars so they end up in the source escaped
if (this.platform.isWindows) {
directoryChange = directoryChange.replace('\\', '\\\\');
}
return directoryChange;
} else {
return undefined;
}
};
private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => {
// First filter out sys info cells. Jupyter doesn't understand these
const filtered = cells.filter((c) => c.data.cell_type !== 'messages');
// Then prune each cell down to just the cell data.
return filtered.map((c) => this.pruneCell(c, cellMatcher));
};
private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => {
// Remove the #%% of the top of the source if there is any. We don't need
// this to end up in the exported ipynb file.
const copy = { ...cell.data };
copy.source = this.pruneSource(cell.data.source, cellMatcher);
return copy;
};
private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => {
// Remove the comments on the top if there.
if (Array.isArray(source) && source.length > 0) | else {
const array = source
.toString()
.split('\n')
.map((s) => `${s}\n`);
if (array.length > 0 && cellMatcher.isCell(array[0])) {
| {
if (cellMatcher.isCell(source[0])) {
return source.slice(1);
}
} | conditional_block |
jupyterExporter.ts | upyterlab/coreutils';
import { inject, injectable } from 'inversify';
import * as os from 'os';
import * as path from 'path';
import * as uuid from 'uuid/v4';
import { Uri } from 'vscode';
import { concatMultilineStringInput } from '../../../datascience-ui/common';
import { createCodeCell } from '../../../datascience-ui/common/cellFactory';
import { IApplicationShell, IWorkspaceService } from '../../common/application/types';
import { traceError } from '../../common/logger';
import { IFileSystem, IPlatformService } from '../../common/platform/types';
import { IConfigurationService } from '../../common/types';
import * as localize from '../../common/utils/localize';
import { noop } from '../../common/utils/misc';
import { CellMatcher } from '../cellMatcher';
import { CodeSnippits, Identifiers } from '../constants';
import {
CellState,
ICell,
IDataScienceErrorHandler,
IJupyterExecution,
INotebookEditorProvider,
INotebookExporter
} from '../types';
@injectable()
export class JupyterExporter implements INotebookExporter {
constructor(
@inject(IJupyterExecution) private jupyterExecution: IJupyterExecution,
@inject(IWorkspaceService) private workspaceService: IWorkspaceService,
@inject(IConfigurationService) private configService: IConfigurationService,
@inject(IFileSystem) private fileSystem: IFileSystem,
@inject(IPlatformService) private readonly platform: IPlatformService,
@inject(IApplicationShell) private readonly applicationShell: IApplicationShell,
@inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider,
@inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler
) {}
public dispose() {
noop();
}
public async exportToFile(cells: ICell[], file: string): Promise<void> {
let directoryChange;
const settings = this.configService.getSettings();
if (settings.datascience.changeDirOnImportExport) {
directoryChange = file;
}
const notebook = await this.translateToNotebook(cells, directoryChange);
try {
// tslint:disable-next-line: no-any
const contents = JSON.stringify(notebook);
await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' });
const openQuestion1 = localize.DataScience.exportOpenQuestion1();
const openQuestion2 = (await this.jupyterExecution.isSpawnSupported())
? localize.DataScience.exportOpenQuestion()
: undefined;
this.showInformationMessage(
localize.DataScience.exportDialogComplete().format(file),
openQuestion1,
openQuestion2
).then(async (str: string | undefined) => {
try {
if (str === openQuestion2 && openQuestion2) {
// If the user wants to, open the notebook they just generated.
await this.jupyterExecution.spawnNotebook(file);
} else if (str === openQuestion1) {
await this.ipynbProvider.open(Uri.file(file));
}
} catch (e) {
await this.errorHandler.handleError(e);
}
});
} catch (exc) {
traceError('Error in exporting notebook file');
this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc));
}
}
public async translateToNotebook(
cells: ICell[],
changeDirectory?: string
): Promise<nbformat.INotebookContent | undefined> {
// If requested, add in a change directory cell to fix relative paths
if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) {
cells = await this.addDirectoryChangeCell(cells, changeDirectory);
}
const pythonNumber = await this.extractPythonMainVersion();
// Use this to build our metadata object
const metadata: nbformat.INotebookMetadata = {
language_info: {
codemirror_mode: {
name: 'ipython',
version: pythonNumber
},
file_extension: '.py',
mimetype: 'text/x-python',
name: 'python',
nbconvert_exporter: 'python',
pygments_lexer: `ipython${pythonNumber}`,
version: pythonNumber
},
orig_nbformat: 2
};
// Create an object for matching cell definitions
const matcher = new CellMatcher(this.configService.getSettings().datascience);
// Combine this into a JSON object
return {
cells: this.pruneCells(cells, matcher),
nbformat: 4,
nbformat_minor: 2,
metadata: metadata
};
}
private showInformationMessage(
message: string,
question1: string,
question2?: string
): Thenable<string | undefined> {
if (question2) {
return this.applicationShell.showInformationMessage(message, question1, question2);
} else {
return this.applicationShell.showInformationMessage(message, question1);
}
}
// For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly
private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => {
const changeDirectory = await this.calculateDirectoryChange(file, cells);
if (changeDirectory) {
const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format(
localize.DataScience.exportChangeDirectoryComment(),
CodeSnippits.ChangeDirectoryCommentIdentifier,
changeDirectory
);
const cell: ICell = {
data: createCodeCell(exportChangeDirectory),
id: uuid(),
file: Identifiers.EmptyFileName,
line: 0,
state: CellState.finished
};
return [cell, ...cells];
} else {
return cells;
}
};
// When we export we want to our change directory back to the first real file that we saw run from any workspace folder
private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => {
for (const cell of cells) {
const filename = cell.file;
// First check that this is an absolute file that exists (we add in temp files to run system cell)
if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) {
// We've already check that workspace folders above
for (const folder of this.workspaceService.workspaceFolders!) {
if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) {
return folder.uri.fsPath;
}
}
}
}
return undefined;
};
private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => {
// Make sure we don't already have a cell with a ChangeDirectory comment in it.
let directoryChange: string | undefined;
const haveChangeAlready = cells.find((c) =>
concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier)
);
if (!haveChangeAlready) { | if (this.workspaceService.hasWorkspaceFolders) {
const workspacePath = await this.firstWorkspaceFolder(cells);
// Make sure that we have everything that we need here
if (
workspacePath &&
path.isAbsolute(workspacePath) &&
notebookFilePath &&
path.isAbsolute(notebookFilePath)
) {
directoryChange = path.relative(notebookFilePath, workspacePath);
}
}
}
// If path.relative can't calculate a relative path, then it just returns the full second path
// so check here, we only want this if we were able to calculate a relative path, no network shares or drives
if (directoryChange && !path.isAbsolute(directoryChange)) {
// Escape windows path chars so they end up in the source escaped
if (this.platform.isWindows) {
directoryChange = directoryChange.replace('\\', '\\\\');
}
return directoryChange;
} else {
return undefined;
}
};
private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => {
// First filter out sys info cells. Jupyter doesn't understand these
const filtered = cells.filter((c) => c.data.cell_type !== 'messages');
// Then prune each cell down to just the cell data.
return filtered.map((c) => this.pruneCell(c, cellMatcher));
};
private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => {
// Remove the #%% of the top of the source if there is any. We don't need
// this to end up in the exported ipynb file.
const copy = { ...cell.data };
copy.source = this.pruneSource(cell.data.source, cellMatcher);
return copy;
};
private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => {
// Remove the comments on the top if there.
if (Array.isArray(source) && source.length > 0) {
if (cellMatcher.isCell(source[0])) {
return source.slice(1);
}
} else {
const array = source
.toString()
.split('\n')
.map((s) => `${s}\n`);
if (array.length > 0 && cellMatcher.isCell(array[0])) {
return | const notebookFilePath = path.dirname(notebookFile);
// First see if we have a workspace open, this only works if we have a workspace root to be relative to | random_line_split |
jupyterExporter.ts | upyterlab/coreutils';
import { inject, injectable } from 'inversify';
import * as os from 'os';
import * as path from 'path';
import * as uuid from 'uuid/v4';
import { Uri } from 'vscode';
import { concatMultilineStringInput } from '../../../datascience-ui/common';
import { createCodeCell } from '../../../datascience-ui/common/cellFactory';
import { IApplicationShell, IWorkspaceService } from '../../common/application/types';
import { traceError } from '../../common/logger';
import { IFileSystem, IPlatformService } from '../../common/platform/types';
import { IConfigurationService } from '../../common/types';
import * as localize from '../../common/utils/localize';
import { noop } from '../../common/utils/misc';
import { CellMatcher } from '../cellMatcher';
import { CodeSnippits, Identifiers } from '../constants';
import {
CellState,
ICell,
IDataScienceErrorHandler,
IJupyterExecution,
INotebookEditorProvider,
INotebookExporter
} from '../types';
@injectable()
export class JupyterExporter implements INotebookExporter {
constructor(
@inject(IJupyterExecution) private jupyterExecution: IJupyterExecution,
@inject(IWorkspaceService) private workspaceService: IWorkspaceService,
@inject(IConfigurationService) private configService: IConfigurationService,
@inject(IFileSystem) private fileSystem: IFileSystem,
@inject(IPlatformService) private readonly platform: IPlatformService,
@inject(IApplicationShell) private readonly applicationShell: IApplicationShell,
@inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider,
@inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler
) {}
public dispose() {
noop();
}
public async exportToFile(cells: ICell[], file: string): Promise<void> {
let directoryChange;
const settings = this.configService.getSettings();
if (settings.datascience.changeDirOnImportExport) {
directoryChange = file;
}
const notebook = await this.translateToNotebook(cells, directoryChange);
try {
// tslint:disable-next-line: no-any
const contents = JSON.stringify(notebook);
await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' });
const openQuestion1 = localize.DataScience.exportOpenQuestion1();
const openQuestion2 = (await this.jupyterExecution.isSpawnSupported())
? localize.DataScience.exportOpenQuestion()
: undefined;
this.showInformationMessage(
localize.DataScience.exportDialogComplete().format(file),
openQuestion1,
openQuestion2
).then(async (str: string | undefined) => {
try {
if (str === openQuestion2 && openQuestion2) {
// If the user wants to, open the notebook they just generated.
await this.jupyterExecution.spawnNotebook(file);
} else if (str === openQuestion1) {
await this.ipynbProvider.open(Uri.file(file));
}
} catch (e) {
await this.errorHandler.handleError(e);
}
});
} catch (exc) {
traceError('Error in exporting notebook file');
this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc));
}
}
public async translateToNotebook(
cells: ICell[],
changeDirectory?: string
): Promise<nbformat.INotebookContent | undefined> | version: pythonNumber
},
orig_nbformat: 2
};
// Create an object for matching cell definitions
const matcher = new CellMatcher(this.configService.getSettings().datascience);
// Combine this into a JSON object
return {
cells: this.pruneCells(cells, matcher),
nbformat: 4,
nbformat_minor: 2,
metadata: metadata
};
}
private showInformationMessage(
message: string,
question1: string,
question2?: string
): Thenable<string | undefined> {
if (question2) {
return this.applicationShell.showInformationMessage(message, question1, question2);
} else {
return this.applicationShell.showInformationMessage(message, question1);
}
}
// For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly
private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => {
const changeDirectory = await this.calculateDirectoryChange(file, cells);
if (changeDirectory) {
const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format(
localize.DataScience.exportChangeDirectoryComment(),
CodeSnippits.ChangeDirectoryCommentIdentifier,
changeDirectory
);
const cell: ICell = {
data: createCodeCell(exportChangeDirectory),
id: uuid(),
file: Identifiers.EmptyFileName,
line: 0,
state: CellState.finished
};
return [cell, ...cells];
} else {
return cells;
}
};
// When we export we want to our change directory back to the first real file that we saw run from any workspace folder
private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => {
for (const cell of cells) {
const filename = cell.file;
// First check that this is an absolute file that exists (we add in temp files to run system cell)
if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) {
// We've already check that workspace folders above
for (const folder of this.workspaceService.workspaceFolders!) {
if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) {
return folder.uri.fsPath;
}
}
}
}
return undefined;
};
private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => {
// Make sure we don't already have a cell with a ChangeDirectory comment in it.
let directoryChange: string | undefined;
const haveChangeAlready = cells.find((c) =>
concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier)
);
if (!haveChangeAlready) {
const notebookFilePath = path.dirname(notebookFile);
// First see if we have a workspace open, this only works if we have a workspace root to be relative to
if (this.workspaceService.hasWorkspaceFolders) {
const workspacePath = await this.firstWorkspaceFolder(cells);
// Make sure that we have everything that we need here
if (
workspacePath &&
path.isAbsolute(workspacePath) &&
notebookFilePath &&
path.isAbsolute(notebookFilePath)
) {
directoryChange = path.relative(notebookFilePath, workspacePath);
}
}
}
// If path.relative can't calculate a relative path, then it just returns the full second path
// so check here, we only want this if we were able to calculate a relative path, no network shares or drives
if (directoryChange && !path.isAbsolute(directoryChange)) {
// Escape windows path chars so they end up in the source escaped
if (this.platform.isWindows) {
directoryChange = directoryChange.replace('\\', '\\\\');
}
return directoryChange;
} else {
return undefined;
}
};
private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => {
// First filter out sys info cells. Jupyter doesn't understand these
const filtered = cells.filter((c) => c.data.cell_type !== 'messages');
// Then prune each cell down to just the cell data.
return filtered.map((c) => this.pruneCell(c, cellMatcher));
};
private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => {
// Remove the #%% of the top of the source if there is any. We don't need
// this to end up in the exported ipynb file.
const copy = { ...cell.data };
copy.source = this.pruneSource(cell.data.source, cellMatcher);
return copy;
};
private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => {
// Remove the comments on the top if there.
if (Array.isArray(source) && source.length > 0) {
if (cellMatcher.isCell(source[0])) {
return source.slice(1);
}
} else {
const array = source
.toString()
.split('\n')
.map((s) => `${s}\n`);
if (array.length > 0 && cellMatcher.isCell(array[0])) {
| {
// If requested, add in a change directory cell to fix relative paths
if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) {
cells = await this.addDirectoryChangeCell(cells, changeDirectory);
}
const pythonNumber = await this.extractPythonMainVersion();
// Use this to build our metadata object
const metadata: nbformat.INotebookMetadata = {
language_info: {
codemirror_mode: {
name: 'ipython',
version: pythonNumber
},
file_extension: '.py',
mimetype: 'text/x-python',
name: 'python',
nbconvert_exporter: 'python',
pygments_lexer: `ipython${pythonNumber}`, | identifier_body |
routex.go | err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token.UserId, token.Cross.ID, err)
continue
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
}
}
func (m RouteMap) Options(ctx rest.Context) {
ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain)
ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true")
ctx.Response().Header().Set("Cache-Control", "no-cache")
ctx.Return(http.StatusNoContent)
}
func (m RouteMap) SendNotification(ctx rest.Context) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var id string
ctx.Bind("id", &id)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
to := model.FromIdentityId(id)
var toInvitation *model.Invitation
for _, inv := range token.Cross.Exfee.Invitations {
if inv.Identity.Equal(to) {
toInvitation = &inv
break
}
}
if toInvitation == nil {
ctx.Return(http.StatusForbidden, "%s is not attend cross %d", to.Id(), token.Cross.ID)
return
}
to = toInvitation.Identity
recipients, err := m.platform.GetRecipientsById(to.Id())
if err != nil {
ctx.Return(http.StatusInternalServerError, err)
return
}
m.update(int64(token.Cross.ID), token.Identity)
arg := notifier.RequestArg{
CrossId: token.Cross.ID,
From: token.Identity,
}
pushed := false
for _, recipient := range recipients {
switch recipient.Provider {
case "iOS":
fallthrough
case "Android":
arg.To = recipient
m.sendRequest(arg)
pushed = true
}
}
if to.Provider == "wechat" {
if ok, err := m.platform.CheckWechatFollowing(to.ExternalUsername); (err != nil || !ok) && !pushed {
ctx.Return(http.StatusNotAcceptable, "can't find provider avaliable")
}
}
go func() {
arg.To = to.ToRecipient()
m.sendRequest(arg)
for _, id := range toInvitation.Notifications {
to := model.FromIdentityId(id)
arg.To.ExternalUsername, arg.To.Provider = to.ExternalUsername, to.Provider
m.sendRequest(arg)
}
}()
}
func (m *RouteMap) getObjects(cross model.Cross, toMars bool) []rmodel.Geomark {
isTutorial := false
if cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
var ret []rmodel.Geomark
breadcrumbs, err := m.breadcrumbCache.LoadAllCross(int64(cross.ID))
now := time.Now()
if isTutorial {
for _, id := range m.config.TutorialBotUserIds {
l := m.getTutorialData(now, id, 1)
if len(l) > 0 {
breadcrumbs[id] = l[0]
}
}
}
users := make(map[int64]bool)
for _, inv := range cross.Exfee.Invitations {
users[inv.Identity.UserID] = true
}
if err == nil {
for userId, l := range breadcrumbs {
if !users[userId] {
if err := m.breadcrumbCache.RemoveCross(userId, int64(cross.ID)); err != nil {
logger.ERROR("remove user %d cross %d breadcrumb error: %s", userId, cross.ID, err)
}
continue
}
mark := m.breadcrumbsToGeomark(userId, 1, []rmodel.SimpleLocation{l})
if toMars {
mark.ToMars(m.conversion)
}
ret = append(ret, mark)
}
} else {
logger.ERROR("can't get current breadcrumb of cross %d: %s", cross.ID, err)
}
marks, err := m.getGeomarks_(cross, toMars)
if err == nil {
ret = append(ret, marks...)
} else {
logger.ERROR("can't get route of cross %d: %s", cross.ID, err)
}
return ret
}
func (m *RouteMap) sendRequest(arg notifier.RequestArg) {
body, err := json.Marshal(arg)
if err != nil {
logger.ERROR("can't marshal: %s with %+v", err, arg)
return
}
url := fmt.Sprintf("http://%s:%d/v3/notifier/routex/request", m.config.ExfeService.Addr, m.config.ExfeService.Port)
resp, err := broker.HttpResponse(broker.Http("POST", url, "applicatioin/json", body))
if err != nil {
logger.ERROR("post %s error: %s with %#v", url, err, string(body))
return
}
resp.Close()
}
func (m RouteMap) switchWindow(crossId int64, identity model.Identity, save bool, afterInSeconds int) {
m.update(crossId, identity)
if save {
if err := m.breadcrumbsRepo.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil | {
logger.ERROR("set user %d enable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err)
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.